code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import unittest
from copy import deepcopy
from tensorly.decomposition import partial_tucker
from palmnet.core.layer_replacer_tucker import LayerReplacerTucker
from palmnet.data import Mnist
import numpy as np
from tensorly.tenalg.n_mode_product import multi_mode_dot
class TestLayerReplacerTucker(unittest.TestCase):
def setUp(self) -> None:
self.base_model = Mnist.load_model("cifar100_vgg19_2048x2048")
def test_simple(self):
model_transformer = LayerReplacerTucker(keep_last_layer=True)
new_model = model_transformer.fit_transform(deepcopy(self.base_model))
model_transformer = LayerReplacerTucker(rank_percentage_dense=0.5, keep_last_layer=True)
new_model = model_transformer.fit_transform(deepcopy(self.base_model))
def test_tucker_decomposition(self):
import tensorly
h, w, c, f = 3, 3, 64, 128
c_prim, f_prim = 16, 32
base_tensor = np.random.rand(h, w, c, f)
lst_fac = []
for k in [2, 3]:
mod_k_unfold = tensorly.base.unfold(base_tensor, k)
U, _, _ = np.linalg.svd(mod_k_unfold)
lst_fac.append(U)
# real_in_fac, real_out_fac = lst_fac[0][:, :c_prim], lst_fac[1][:, :f_prim]
real_in_fac, real_out_fac = lst_fac[0], lst_fac[1]
real_core = multi_mode_dot(base_tensor, [real_in_fac.T, real_out_fac.T], modes=(2,3))
del base_tensor # no need of it anymore
real_core = real_core[:,:,:c_prim,:f_prim]
real_in_fac = real_in_fac[:, :c_prim]
real_out_fac = real_out_fac[:, :f_prim]
base_tensor_low_rank = multi_mode_dot(real_core, [real_in_fac, real_out_fac], modes=(2,3))
in_rank, out_rank = LayerReplacerTucker.get_rank_layer(base_tensor_low_rank)
assert in_rank == c_prim and out_rank == f_prim, f"{in_rank}!={c_prim} or {out_rank} != {f_prim}" # in_rank=16, out_rank=32 -> it works!
decomposition = LayerReplacerTucker.get_tucker_decomposition(base_tensor_low_rank, in_rank, out_rank)
# core_tilde, (in_fac_tilde, out_fac_tilde) = partial_tucker(base_tensor, modes=(2, 3), ranks=(in_rank, out_rank), init='svd')
in_fac_tilde, core_tilde, out_fac_tilde = decomposition
base_tensor_tilde = multi_mode_dot(core_tilde, [in_fac_tilde, out_fac_tilde], modes=(2,3))
assert np.allclose(base_tensor_tilde, base_tensor_low_rank)
print(np.linalg.norm(in_fac_tilde - real_in_fac) / np.linalg.norm(real_in_fac))
# assert np.allclose(in_fac_tilde, real_in_fac)
# assert np.allclose(core_tilde, core)
# assert np.allclose(out_fac_tilde, out_fac)
def test_stack_overflow(self):
import tensorly
import numpy as np
h, w, c, f = 3, 3, 64, 128
c_prim, f_prim = 16, 32
base_tensor = np.random.rand(h, w, c, f)
# compute tucker decomposition by hand using higher order svd describred here: https://www.alexejgossmann.com/tensor_decomposition_tucker/.
lst_fac = []
for k in [2, 3]:
mod_k_unfold = tensorly.base.unfold(base_tensor, k)
U, _, _ = np.linalg.svd(mod_k_unfold)
lst_fac.append(U)
real_in_fac, real_out_fac = lst_fac[0], lst_fac[1]
real_core = multi_mode_dot(base_tensor, [real_in_fac.T, real_out_fac.T], modes=(2, 3))
del base_tensor # no need of it anymore
# what i call the "low rank tucker decomposition"
real_core = real_core[:, :, :c_prim, :f_prim]
real_in_fac = real_in_fac[:, :c_prim]
real_out_fac = real_out_fac[:, :f_prim]
# low rank approximation
base_tensor_low_rank = multi_mode_dot(real_core, [real_in_fac, real_out_fac], modes=(2, 3))
in_rank, out_rank = c_prim, f_prim
core_tilde, (in_fac_tilde, out_fac_tilde) = partial_tucker(base_tensor_low_rank, modes=(2, 3), ranks=(in_rank, out_rank), init='svd')
base_tensor_tilde = multi_mode_dot(core_tilde, [in_fac_tilde, out_fac_tilde], modes=(2, 3))
assert np.allclose(base_tensor_tilde, base_tensor_low_rank) # this is OK
assert np.allclose(in_fac_tilde, real_in_fac) # this fails
if __name__ == '__main__':
unittest.main()
| [
"tensorly.tenalg.n_mode_product.multi_mode_dot",
"numpy.allclose",
"tensorly.base.unfold",
"numpy.random.rand",
"palmnet.data.Mnist.load_model",
"palmnet.core.layer_replacer_tucker.LayerReplacerTucker.get_tucker_decomposition",
"numpy.linalg.norm",
"numpy.linalg.svd",
"palmnet.core.layer_replacer_tu... | [((4196, 4211), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4209, 4211), False, 'import unittest\n'), ((377, 421), 'palmnet.data.Mnist.load_model', 'Mnist.load_model', (['"""cifar100_vgg19_2048x2048"""'], {}), "('cifar100_vgg19_2048x2048')\n", (393, 421), False, 'from palmnet.data import Mnist\n'), ((478, 519), 'palmnet.core.layer_replacer_tucker.LayerReplacerTucker', 'LayerReplacerTucker', ([], {'keep_last_layer': '(True)'}), '(keep_last_layer=True)\n', (497, 519), False, 'from palmnet.core.layer_replacer_tucker import LayerReplacerTucker\n'), ((628, 696), 'palmnet.core.layer_replacer_tucker.LayerReplacerTucker', 'LayerReplacerTucker', ([], {'rank_percentage_dense': '(0.5)', 'keep_last_layer': '(True)'}), '(rank_percentage_dense=0.5, keep_last_layer=True)\n', (647, 696), False, 'from palmnet.core.layer_replacer_tucker import LayerReplacerTucker\n'), ((932, 958), 'numpy.random.rand', 'np.random.rand', (['h', 'w', 'c', 'f'], {}), '(h, w, c, f)\n', (946, 958), True, 'import numpy as np\n'), ((1315, 1389), 'tensorly.tenalg.n_mode_product.multi_mode_dot', 'multi_mode_dot', (['base_tensor', '[real_in_fac.T, real_out_fac.T]'], {'modes': '(2, 3)'}), '(base_tensor, [real_in_fac.T, real_out_fac.T], modes=(2, 3))\n', (1329, 1389), False, 'from tensorly.tenalg.n_mode_product import multi_mode_dot\n'), ((1616, 1684), 'tensorly.tenalg.n_mode_product.multi_mode_dot', 'multi_mode_dot', (['real_core', '[real_in_fac, real_out_fac]'], {'modes': '(2, 3)'}), '(real_core, [real_in_fac, real_out_fac], modes=(2, 3))\n', (1630, 1684), False, 'from tensorly.tenalg.n_mode_product import multi_mode_dot\n'), ((1714, 1770), 'palmnet.core.layer_replacer_tucker.LayerReplacerTucker.get_rank_layer', 'LayerReplacerTucker.get_rank_layer', (['base_tensor_low_rank'], {}), '(base_tensor_low_rank)\n', (1748, 1770), False, 'from palmnet.core.layer_replacer_tucker import LayerReplacerTucker\n'), ((1942, 2031), 'palmnet.core.layer_replacer_tucker.LayerReplacerTucker.get_tucker_decomposition', 'LayerReplacerTucker.get_tucker_decomposition', (['base_tensor_low_rank', 'in_rank', 'out_rank'], {}), '(base_tensor_low_rank, in_rank,\n out_rank)\n', (1986, 2031), False, 'from palmnet.core.layer_replacer_tucker import LayerReplacerTucker\n'), ((2255, 2326), 'tensorly.tenalg.n_mode_product.multi_mode_dot', 'multi_mode_dot', (['core_tilde', '[in_fac_tilde, out_fac_tilde]'], {'modes': '(2, 3)'}), '(core_tilde, [in_fac_tilde, out_fac_tilde], modes=(2, 3))\n', (2269, 2326), False, 'from tensorly.tenalg.n_mode_product import multi_mode_dot\n'), ((2341, 2393), 'numpy.allclose', 'np.allclose', (['base_tensor_tilde', 'base_tensor_low_rank'], {}), '(base_tensor_tilde, base_tensor_low_rank)\n', (2352, 2393), True, 'import numpy as np\n'), ((2816, 2842), 'numpy.random.rand', 'np.random.rand', (['h', 'w', 'c', 'f'], {}), '(h, w, c, f)\n', (2830, 2842), True, 'import numpy as np\n'), ((3262, 3336), 'tensorly.tenalg.n_mode_product.multi_mode_dot', 'multi_mode_dot', (['base_tensor', '[real_in_fac.T, real_out_fac.T]'], {'modes': '(2, 3)'}), '(base_tensor, [real_in_fac.T, real_out_fac.T], modes=(2, 3))\n', (3276, 3336), False, 'from tensorly.tenalg.n_mode_product import multi_mode_dot\n'), ((3658, 3726), 'tensorly.tenalg.n_mode_product.multi_mode_dot', 'multi_mode_dot', (['real_core', '[real_in_fac, real_out_fac]'], {'modes': '(2, 3)'}), '(real_core, [real_in_fac, real_out_fac], modes=(2, 3))\n', (3672, 3726), False, 'from tensorly.tenalg.n_mode_product import multi_mode_dot\n'), ((3822, 3916), 'tensorly.decomposition.partial_tucker', 'partial_tucker', (['base_tensor_low_rank'], {'modes': '(2, 3)', 'ranks': '(in_rank, out_rank)', 'init': '"""svd"""'}), "(base_tensor_low_rank, modes=(2, 3), ranks=(in_rank, out_rank\n ), init='svd')\n", (3836, 3916), False, 'from tensorly.decomposition import partial_tucker\n'), ((3940, 4011), 'tensorly.tenalg.n_mode_product.multi_mode_dot', 'multi_mode_dot', (['core_tilde', '[in_fac_tilde, out_fac_tilde]'], {'modes': '(2, 3)'}), '(core_tilde, [in_fac_tilde, out_fac_tilde], modes=(2, 3))\n', (3954, 4011), False, 'from tensorly.tenalg.n_mode_product import multi_mode_dot\n'), ((4027, 4079), 'numpy.allclose', 'np.allclose', (['base_tensor_tilde', 'base_tensor_low_rank'], {}), '(base_tensor_tilde, base_tensor_low_rank)\n', (4038, 4079), True, 'import numpy as np\n'), ((4110, 4148), 'numpy.allclose', 'np.allclose', (['in_fac_tilde', 'real_in_fac'], {}), '(in_fac_tilde, real_in_fac)\n', (4121, 4148), True, 'import numpy as np\n'), ((572, 597), 'copy.deepcopy', 'deepcopy', (['self.base_model'], {}), '(self.base_model)\n', (580, 597), False, 'from copy import deepcopy\n'), ((749, 774), 'copy.deepcopy', 'deepcopy', (['self.base_model'], {}), '(self.base_model)\n', (757, 774), False, 'from copy import deepcopy\n'), ((1033, 1069), 'tensorly.base.unfold', 'tensorly.base.unfold', (['base_tensor', 'k'], {}), '(base_tensor, k)\n', (1053, 1069), False, 'import tensorly\n'), ((1092, 1119), 'numpy.linalg.svd', 'np.linalg.svd', (['mod_k_unfold'], {}), '(mod_k_unfold)\n', (1105, 1119), True, 'import numpy as np\n'), ((3065, 3101), 'tensorly.base.unfold', 'tensorly.base.unfold', (['base_tensor', 'k'], {}), '(base_tensor, k)\n', (3085, 3101), False, 'import tensorly\n'), ((3124, 3151), 'numpy.linalg.svd', 'np.linalg.svd', (['mod_k_unfold'], {}), '(mod_k_unfold)\n', (3137, 3151), True, 'import numpy as np\n'), ((2408, 2450), 'numpy.linalg.norm', 'np.linalg.norm', (['(in_fac_tilde - real_in_fac)'], {}), '(in_fac_tilde - real_in_fac)\n', (2422, 2450), True, 'import numpy as np\n'), ((2453, 2480), 'numpy.linalg.norm', 'np.linalg.norm', (['real_in_fac'], {}), '(real_in_fac)\n', (2467, 2480), True, 'import numpy as np\n')] |
import sys
import os
ROOT_DIR = os.path.abspath("Mask_RCNN")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
import brats
import h5py
import numpy as np
import random
from numpy import genfromtxt
import matplotlib.pyplot as plt
import math
def dice(im1, im2):
"""
Computes the Dice coefficient, a measure of set similarity.
Parameters
----------
im1 : array-like, bool
Any array of arbitrary size. If not boolean, will be converted.
im2 : array-like, bool
Any other array of identical size. If not boolean, will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / (im1.sum() + im2.sum())
try:
inference_config = brats.InferenceConfig()
except:
inference_config=brats.BratsConfig()
test_dir = "C:/Users/flohr/PythonProjects/MaskRCNN/Mask_RCNN/datasets/brats/test"
data_dir = "C:/Users/flohr/PythonProjects/MaskRCNN/Mask_RCNN/datasets/brats/train"
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
#this could be a potential error source 'mrimath2018063...'
#result_data = genfromtxt(MODEL_DIR +'/brats20180922T1937/model_loss_log.csv', delimiter=',')
result_data = h5py.File(MODEL_DIR + '/brats20180922T1937/mask_rcnn_brats_0003.h5', 'r')
plt.figure()
#plt.plot(result_data[:,0], result_data[:,1],label="total loss")
plt.plot(result_data[:,0], result_data[:,4], label = "loss")
plt.plot(result_data[:,0], result_data[:,10], label = "val loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(loc='upper right')
plt.show()
dataset = brats.FlairDataset()
dataset.load_images(test_dir)
dataset.prepare()
print("Testing on " + str(len(dataset.image_info)) + " images")
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Test on a random image
"""
here starts the diversion
"""
image_id = random.choice(dataset.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =modellib.load_image_gt(dataset, inference_config, image_id, use_mini_mask=False)
print(dataset.image_info[image_id])
results = model.detect([original_image], verbose=0)
r = results[0]
visualize.display_differences(original_image, gt_bbox, gt_class_id, gt_mask,r["rois"], r["class_ids"], r["scores"], r['masks'],dataset.class_names, show_box=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
#image_ids = np.random.choice(dataset.image_ids, 1000)
#print(image_ids)
#print(dataset.image_ids)
APs = []
precision = {}
precision[1] = []
recall = {}
recall[1] = []
dices = []
for image_id in dataset.image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
if ((r['masks'] > 0.5).size <= 0 or (gt_mask > 0.5).size <= 0):
continue
if r['masks'].shape[0] is 256:
score = -math.inf
for i in range(0, r['masks'].shape[2]):
if dice(r['masks'][:,:,i:i+1], gt_mask) > score:
score = dice(r['masks'][:,:,i:i+1], gt_mask)
dices.append(score)
"""
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'], iou_threshold=0.5)
"""
#print(jaccards[label])
#print(AP)
#APs.append(AP)
print("Dice Coefficient: " + str(np.mean(np.asarray(dices))))
# move the testing data back
"""
list_imgs = os.listdir(test_dir)
for sub_dir in list_imgs:
dir_to_move = os.path.join(test_dir, sub_dir)
shutil.move(dir_to_move, data_dir)
"""
| [
"mrcnn.model.MaskRCNN",
"matplotlib.pyplot.ylabel",
"mrcnn.model.log",
"sys.path.append",
"brats.FlairDataset",
"mrcnn.model.mold_image",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"brats.BratsConfig",
"numpy.asarray",
"random.choice",
"h5py.File",
"mrcnn.visualize.display_differe... | [((32, 60), 'os.path.abspath', 'os.path.abspath', (['"""Mask_RCNN"""'], {}), "('Mask_RCNN')\n", (47, 60), False, 'import os\n'), ((81, 106), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (96, 106), False, 'import sys\n'), ((1726, 1756), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1738, 1756), False, 'import os\n'), ((1985, 2058), 'h5py.File', 'h5py.File', (["(MODEL_DIR + '/brats20180922T1937/mask_rcnn_brats_0003.h5')", '"""r"""'], {}), "(MODEL_DIR + '/brats20180922T1937/mask_rcnn_brats_0003.h5', 'r')\n", (1994, 2058), False, 'import h5py\n'), ((2059, 2071), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2198), 'matplotlib.pyplot.plot', 'plt.plot', (['result_data[:, 0]', 'result_data[:, 4]'], {'label': '"""loss"""'}), "(result_data[:, 0], result_data[:, 4], label='loss')\n", (2146, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2199, 2264), 'matplotlib.pyplot.plot', 'plt.plot', (['result_data[:, 0]', 'result_data[:, 10]'], {'label': '"""val loss"""'}), "(result_data[:, 0], result_data[:, 10], label='val loss')\n", (2207, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2286), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2276, 2286), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2306), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2298, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2337), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2318, 2337), True, 'import matplotlib.pyplot as plt\n'), ((2338, 2348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2346, 2348), True, 'import matplotlib.pyplot as plt\n'), ((2360, 2380), 'brats.FlairDataset', 'brats.FlairDataset', ([], {}), '()\n', (2378, 2380), False, 'import brats\n'), ((2543, 2629), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (2560, 2629), True, 'import mrcnn.model as modellib\n'), ((3082, 3114), 'random.choice', 'random.choice', (['dataset.image_ids'], {}), '(dataset.image_ids)\n', (3095, 3114), False, 'import random\n'), ((3174, 3259), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset', 'inference_config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset, inference_config, image_id, use_mini_mask=False\n )\n', (3196, 3259), True, 'import mrcnn.model as modellib\n'), ((3367, 3540), 'mrcnn.visualize.display_differences', 'visualize.display_differences', (['original_image', 'gt_bbox', 'gt_class_id', 'gt_mask', "r['rois']", "r['class_ids']", "r['scores']", "r['masks']", 'dataset.class_names'], {'show_box': '(False)'}), "(original_image, gt_bbox, gt_class_id, gt_mask,\n r['rois'], r['class_ids'], r['scores'], r['masks'], dataset.class_names,\n show_box=False)\n", (3396, 3540), False, 'from mrcnn import visualize\n'), ((3537, 3574), 'mrcnn.model.log', 'log', (['"""original_image"""', 'original_image'], {}), "('original_image', original_image)\n", (3540, 3574), False, 'from mrcnn.model import log\n'), ((3575, 3604), 'mrcnn.model.log', 'log', (['"""image_meta"""', 'image_meta'], {}), "('image_meta', image_meta)\n", (3578, 3604), False, 'from mrcnn.model import log\n'), ((3605, 3636), 'mrcnn.model.log', 'log', (['"""gt_class_id"""', 'gt_class_id'], {}), "('gt_class_id', gt_class_id)\n", (3608, 3636), False, 'from mrcnn.model import log\n'), ((3637, 3660), 'mrcnn.model.log', 'log', (['"""gt_bbox"""', 'gt_bbox'], {}), "('gt_bbox', gt_bbox)\n", (3640, 3660), False, 'from mrcnn.model import log\n'), ((3661, 3684), 'mrcnn.model.log', 'log', (['"""gt_mask"""', 'gt_mask'], {}), "('gt_mask', gt_mask)\n", (3664, 3684), False, 'from mrcnn.model import log\n'), ((1304, 1328), 'numpy.logical_and', 'np.logical_and', (['im1', 'im2'], {}), '(im1, im2)\n', (1318, 1328), True, 'import numpy as np\n'), ((1420, 1443), 'brats.InferenceConfig', 'brats.InferenceConfig', ([], {}), '()\n', (1441, 1443), False, 'import brats\n'), ((4103, 4188), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset', 'inference_config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset, inference_config, image_id, use_mini_mask=False\n )\n', (4125, 4188), True, 'import mrcnn.model as modellib\n'), ((1478, 1497), 'brats.BratsConfig', 'brats.BratsConfig', ([], {}), '()\n', (1495, 1497), False, 'import brats\n'), ((4250, 4294), 'mrcnn.model.mold_image', 'modellib.mold_image', (['image', 'inference_config'], {}), '(image, inference_config)\n', (4269, 4294), True, 'import mrcnn.model as modellib\n'), ((1065, 1080), 'numpy.asarray', 'np.asarray', (['im1'], {}), '(im1)\n', (1075, 1080), True, 'import numpy as np\n'), ((1107, 1122), 'numpy.asarray', 'np.asarray', (['im2'], {}), '(im2)\n', (1117, 1122), True, 'import numpy as np\n'), ((5079, 5096), 'numpy.asarray', 'np.asarray', (['dices'], {}), '(dices)\n', (5089, 5096), True, 'import numpy as np\n')] |
import sc2
from sc2.position import Point2
from sc2.units import Units
from typing import List, Callable
import py_trees
from py_trees.composites import Sequence, Selector
from py_trees.idioms import eternal_guard
from py_trees.behaviour import Behaviour
from unit_ai_data import UnitAiOrderType, UnitAiOrder, UnitAiController
import numpy as np
import random
class Army:
"""
Pomocnicza klasa gromadząca dane przydatne dla węzłów drzewa zachowań kontrolującego armię gracza.
"""
def __init__(self, bot: sc2.BotAI, get_unit_ai: Callable[[int], UnitAiController]):
self.bot: sc2.BotAI = bot
self.units: List[int] = []
self.army_cluster_size: float = 3.
self.enemy_strength: float = 0.
self.get_unit_ai: Callable[[int], UnitAiController] = get_unit_ai
class IsArmyStrongEnough(Behaviour):
"""
Węzeł sprawdzający, czy armia bota jest dość silna, aby być w stanie walczyć z wrogiem. Siła armii jest obliczana
na podstawie sumarycznej ilości punktów obrażeń zadawanych na sekundę (dps) przez wszystkie jednostki armii.
Wartość ta porównywana jest z tą samą miarą wyliczaną na podstawie do tej pory widzianych jednostek przeciwnika.
Węzeł kończy pracę ze statusem *SUCCESS*, jeśli armia bota ma przewagę oraz *FAILURE* w przeciwnym wypadku.
"""
def __init__(self, name: str, army: Army):
super().__init__(name)
self.army: Army = army
def get_army_strength(self) -> float:
return sum(unit.ground_dps for unit in self.army.bot.units.tags_in(self.army.units))
def update(self):
if self.get_army_strength() * 1.25 >= self.army.enemy_strength:
return py_trees.common.Status.SUCCESS
else:
return py_trees.common.Status.FAILURE
class AreEnemiesVisible(Behaviour):
"""
Węzeł sprawdza, czy którakolwiek jednostka należąca do armii ma w swoim zasięgu wzroku jednostkę przeciwnika, którą
widzi (tzn. nie jest np. zamaskowana lub zakopana). Jeśli tak, węzeł kończy pracę z sukcesem lub, w przeciwnym
wypadku, z porażką.
"""
def __init__(self, name: str, army: Army):
super().__init__(name)
self.army: Army = army
def update(self):
units = self.army.bot.units.tags_in(self.army.units)
for unit in units:
visible_enemies = (self.army.bot.enemy_units + self.army.bot.enemy_structures).filter(
lambda enemy: enemy.distance_to(unit) <= unit.sight_range and enemy.can_be_attacked
)
if visible_enemies.exists:
return py_trees.common.Status.SUCCESS
return py_trees.common.Status.FAILURE
class SeekEnemies(Behaviour):
"""
Węzeł odpowiedzialny za poszukiwanie jednostek lub budynków przeciwnika w lokacjach startowych, miejscach
zawierających złoża minerałów lub gazu oraz lokacjach, w których widziano budynki przeciwnika, ale są zakryte mgłą
wojny.
Zanim węzeł jest uruchamiany po raz pierwszy, następuje jego inicjalizacja, w której generowana jest lista miejsc,
które powinna odwiedzić armia w poszukiwaniu wrogiego gracza. Następnie, w każdym kolejnym uruchomieniu, armia
odwiedza kolejne miejsca oraz zwraca status *RUNNING* – ponieważ zadanie szukania jest w trakcie wykonywania.
Gdy wszystkie miejsca zostaną odwiedzone, węzeł kończy pracę ze statusem *SUCCESS*. Jeśli natomiast armia nie
posiada żadnej jednostki (jest pusta), węzeł kończy ze statusem *FAILURE*.
"""
def __init__(self, name: str, army: Army):
super().__init__(name)
self.army: Army = army
self.locations_to_check: List[Point2] = []
def initialise(self):
# Znajdowanie miejsc zawierających surowce.
expansions = self.army.bot.expansion_locations_list
random.shuffle(expansions)
# Znajdowanie miejsc, w których widziano budynki wroga.
snapshot_buildings = self.army.bot.enemy_structures.filter(lambda enemy: enemy.is_snapshot)
buildings_locations = [building.position for building in snapshot_buildings]
self.locations_to_check = buildings_locations + self.army.bot.enemy_start_locations + expansions
def update(self):
# Weź wszystkie jednostki bota o tagach z przechowywanej listy.
units = self.army.bot.units.tags_in(self.army.units)
if units.empty:
return py_trees.common.Status.FAILURE
# Jeśli jednostki dotarły do docelowego miejsca, usuń je z listy miejsc do odwiedzenia oraz kontynuuj
# eksplorację.
if len(self.locations_to_check) > 0:
if (units.center - self.locations_to_check[0]).length < 5:
self.locations_to_check.pop(0)
else:
return py_trees.common.Status.SUCCESS
# Każ jednostkom iść do pierwszego miejsca do odwiedzenia z listy miejsc do odwiedzenia. Jeśli jednostki są
# zbyt od siebie oddalone, rozkaż im zbić się w bardziej zwartą grupę.
mean_distance = np.mean([(unit.position - units.center).length for unit in units])
for unit in units:
unit_ai = self.army.get_unit_ai(unit.tag)
if mean_distance < self.army.army_cluster_size:
unit_ai.order = UnitAiOrder(UnitAiOrderType.Move, target=self.locations_to_check[0])
else:
unit_ai.order = UnitAiOrder(UnitAiOrderType.Move, target=units.center)
return py_trees.common.Status.RUNNING
class StayInBase(Behaviour):
"""
Węzeł rozkazujący jednostkom z armii bota powrót do bazy oraz atakowanie jednostek tylko, gdy już się w niej
znajdują. Węzeł zawsze kończy pracę ze statusem *SUCCESS*.
"""
def __init__(self, name: str, army: Army):
super().__init__(name)
self.army: Army = army
def update(self):
units = self.army.bot.units.tags_in(self.army.units)
base_buildings = self.army.bot.structures.in_distance_between(self.army.bot.start_location, 0, 25)
if base_buildings.empty:
target_location = self.army.bot.start_location
else:
target_location = base_buildings.center
for unit in units:
unit_ai = self.army.get_unit_ai(unit.tag)
unit_ai.order = UnitAiOrder(UnitAiOrderType.DefendLocation, target=target_location)
return py_trees.common.Status.SUCCESS
class Attack(Behaviour):
"""
Węzeł, którego zadaniem jest sprawdzenie, czy któraś z jednostek armii ma w zasięgu swojego wzroku (oraz widzi ją,
czyli nie jest zakopana lub zamaskowana) przeciwnika. W takim wypadku, armia rozkazuje wszystkim jednostkom
atakować jednostki znajdujące się w pobiżu zauważonego wroga. Węzeł zawsze kończy pracę ze statusem *SUCCESS*.
"""
def __init__(self, name: str, army: Army):
super().__init__(name)
self.army: Army = army
def update(self):
units = self.army.bot.units.tags_in(self.army.units)
for unit in units:
visible_enemies = (self.army.bot.enemy_units + self.army.bot.enemy_structures).filter(
lambda enemy: enemy.distance_to(unit) <= unit.sight_range and enemy.can_be_attacked
)
if visible_enemies.exists:
for unit in units:
unit_ai = self.army.get_unit_ai(unit.tag)
unit_ai.order = UnitAiOrder(UnitAiOrderType.MoveAttack,
target=visible_enemies.closest_to(units.center).position)
return py_trees.common.Status.SUCCESS
return py_trees.common.Status.SUCCESS
class ArmyBht:
"""
Klasa realizująca zachowanie armii składającej się z wielu jednostek bojowych. Armia przechowuje pomocniczy obiekt
typu *Army*, który zawiera informacje o kompozycji jednostek oraz inne dodatkowe dane. Klasa przechowuje także
drzewo zachowań, które w oparciu o dane z obiektu Army może podejmować decyzje dla całej gromady jednostek.
AI armii w obecnej formie poszukuje wrogów na mapie oraz, jeśli jest uważa, że jest dość silna, aby walczyć z
wrogiem. Jeśli zobaczy przeciwników i stwierdza, że są słabsi, atakuje przeciwników całą grupą. W przeciwnym
wypadku, wycofuje się do bazy. Po wycofaniu się, armia odczekuje chwilę zanim ponownie wyruszy do walki.
Warto zwrócić uwagę na węzeł *seek_enemies*. Jest to węzeł, którego wykonanie zajmuje bardzo dużo czasu (zwraca
wiele razy status *RUNNING*). Należy przerwać jego wykonywanie, gdyby okazało się, że przeciwnik posiada
silniejszą armię (bo np. w innej części mapy zauważono dużą ilość silnych jednostek) Żeby to zrobić, użyto
kombinacji węzłów tworzonej za pomocą metody *eternal guard*, co pozwala przerwać działanie węzła *seek_enemies*.
"""
def construct_behavior_tree(self) -> Behaviour:
"""
Metoda konstruująca drzewo zachowań ze zdefiniowanych wcześniej węzłów.
Returns
-------
out : Behaviour
instancja drzewa zachowań pozwalająca na sterowanie zachowaniem armii bota.
"""
is_army_strong_enough = IsArmyStrongEnough(name="Is army strong enough?", army=self.army)
are_enemies_visible = AreEnemiesVisible(name="Are enemies visible?", army=self.army)
stay_in_base = StayInBase(name="Stay in base", army=self.army)
seek_enemies = SeekEnemies(name="Seek enemies", army=self.army)
attack_visible_enemies = Sequence(name="Attack visible enemies")
attack = Attack(name="Attack", army=self.army)
attack_visible_enemies.add_children([are_enemies_visible, attack])
root = Selector(name="Army behavior")
move_out = Selector(name="Move out")
move_out.add_children([attack_visible_enemies, seek_enemies])
move_out_guard = eternal_guard(name="Move out guard", subtree=move_out, conditions=[is_army_strong_enough])
root.add_children([move_out_guard, stay_in_base])
return root
def __init__(self, bot: sc2.BotAI,
get_unit_ai: Callable[[int], UnitAiController],
delta_time: Callable[[], float]):
self.army: Army = Army(bot, get_unit_ai)
self.delta_time: Callable[[], float] = delta_time
self.behavior_tree: Behaviour = self.construct_behavior_tree()
self.forget_rate: float = 0.1
def calculate_units_strength(self, units: Units) -> float:
"""
Oblicza siłę grupy jednostek w oparciu o liczbę zadawanych obrażeń na sekundę (dps).
Parameters
----------
units : Units
grupa jednostek, której siłę należy obliczyć.
Returns
-------
out : float
obliczona siła grupy jednostek.
"""
return sum(unit.ground_dps for unit in units)
def update(self):
# Obliczaj siłę armii wroga w oparciu o posiadane przez niego jednostki (te które do tej pory zobaczono).
# Z czasem siła przeciwnika zmniejsza się (aż do 0), tak aby armia, jeśli się wycofała, mogła za jakiś czas
# jeszcze raz zaatakować.
self.army.enemy_strength = max(0.0,
self.army.enemy_strength - self.delta_time() * self.forget_rate,
self.calculate_units_strength(self.army.bot.enemy_units))
self.behavior_tree.tick_once()
| [
"numpy.mean",
"unit_ai_data.UnitAiOrder",
"random.shuffle",
"py_trees.idioms.eternal_guard",
"py_trees.composites.Sequence",
"py_trees.composites.Selector"
] | [((3850, 3876), 'random.shuffle', 'random.shuffle', (['expansions'], {}), '(expansions)\n', (3864, 3876), False, 'import random\n'), ((5044, 5110), 'numpy.mean', 'np.mean', (['[(unit.position - units.center).length for unit in units]'], {}), '([(unit.position - units.center).length for unit in units])\n', (5051, 5110), True, 'import numpy as np\n'), ((9490, 9529), 'py_trees.composites.Sequence', 'Sequence', ([], {'name': '"""Attack visible enemies"""'}), "(name='Attack visible enemies')\n", (9498, 9529), False, 'from py_trees.composites import Sequence, Selector\n'), ((9676, 9706), 'py_trees.composites.Selector', 'Selector', ([], {'name': '"""Army behavior"""'}), "(name='Army behavior')\n", (9684, 9706), False, 'from py_trees.composites import Sequence, Selector\n'), ((9726, 9751), 'py_trees.composites.Selector', 'Selector', ([], {'name': '"""Move out"""'}), "(name='Move out')\n", (9734, 9751), False, 'from py_trees.composites import Sequence, Selector\n'), ((9847, 9942), 'py_trees.idioms.eternal_guard', 'eternal_guard', ([], {'name': '"""Move out guard"""', 'subtree': 'move_out', 'conditions': '[is_army_strong_enough]'}), "(name='Move out guard', subtree=move_out, conditions=[\n is_army_strong_enough])\n", (9860, 9942), False, 'from py_trees.idioms import eternal_guard\n'), ((6295, 6362), 'unit_ai_data.UnitAiOrder', 'UnitAiOrder', (['UnitAiOrderType.DefendLocation'], {'target': 'target_location'}), '(UnitAiOrderType.DefendLocation, target=target_location)\n', (6306, 6362), False, 'from unit_ai_data import UnitAiOrderType, UnitAiOrder, UnitAiController\n'), ((5284, 5352), 'unit_ai_data.UnitAiOrder', 'UnitAiOrder', (['UnitAiOrderType.Move'], {'target': 'self.locations_to_check[0]'}), '(UnitAiOrderType.Move, target=self.locations_to_check[0])\n', (5295, 5352), False, 'from unit_ai_data import UnitAiOrderType, UnitAiOrder, UnitAiController\n'), ((5403, 5457), 'unit_ai_data.UnitAiOrder', 'UnitAiOrder', (['UnitAiOrderType.Move'], {'target': 'units.center'}), '(UnitAiOrderType.Move, target=units.center)\n', (5414, 5457), False, 'from unit_ai_data import UnitAiOrderType, UnitAiOrder, UnitAiController\n')] |
import math
import os
import argparse
import numpy as np
import cv2
import oneflow as flow
import fresnet100
import oneflow.typing as tp
from typing import Tuple
from scipy.spatial import distance
def get_val_args():
val_parser = argparse.ArgumentParser(description="flags for validation")
val_parser.add_argument(
"--val_img_dir",
type=str,
default="./woman.jpeg",
help="validation dataset dir",
)
# distribution config
val_parser.add_argument(
"--device_num_per_node",
type=int,
default=1,
required=False,
)
val_parser.add_argument(
"--num_nodes",
type=int,
default=1,
help="node/machine number for training",
)
val_parser.add_argument(
"--val_batch_size",
default=1,
type=int,
help="validation batch size totally",
)
# model and log
val_parser.add_argument(
"--log_dir", type=str, default="./log", help="log info save"
)
val_parser.add_argument(
"--model_load_dir", default="/insightface_nhwc", help="path to load model."
)
return val_parser.parse_args()
def load_image(image_path):
im = cv2.imread(image_path)
dsize = (112, 112)
rgb_mean = [127.5, 127.5, 127.5]
std_values = [128.0, 128.0, 128.0]
im = cv2.resize(im, dsize, interpolation = cv2.INTER_AREA)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = (im - rgb_mean) / std_values
im = np.transpose(im, (2, 0, 1))
im = np.expand_dims(im, axis=0)
im = np.transpose(im, (0, 2, 3, 1))
print("image size: ", im.shape)
return np.ascontiguousarray(im, 'float32')
def get_cambricon_config():
val_config = flow.function_config()
val_config.default_logical_view(flow.scope.consistent_view())
val_config.default_data_type(flow.float)
val_config.default_placement_scope(flow.scope.placement("cambricon", "0:0"))
return val_config
def validation_job(images, config):
@flow.global_function(type="predict", function_config=config)
def get_symbol_val_job(
images: flow.typing.Numpy.Placeholder(
(1, 112, 112, 3)
)
):
print("val batch data: ", images.shape)
embedding = fresnet100.get_symbol(images)
return embedding
return get_symbol_val_job
def do_validation(images, val_job, name_suffix):
print("Validation starts...")
batch_size = 1
total_images_num = 1
_em = val_job(images).get()
return _em
def load_checkpoint(model_load_dir):
print("=" * 20 + " model load begin " + "=" * 20)
flow.train.CheckPoint().load(model_load_dir)
print("=" * 20 + " model load end " + "=" * 20)
def main():
args = get_val_args()
flow.env.init()
flow.env.log_dir(args.log_dir)
# validation
print("args: ", args)
output_list = []
if os.path.exists(args.val_img_dir):
print("=" * 20 + " image load begin " + "=" * 20)
images = load_image(args.val_img_dir)
print("=" * 20 + " image load end " + "=" * 20)
else:
raise ValueError ("Image path for validation does NOT exist!")
flow.config.enable_legacy_model_io(True)
val_job = validation_job(images, get_cambricon_config())
load_checkpoint(args.model_load_dir)
print("=" * 20 + " Prediction begins " + "=" * 20)
mlu_res = do_validation(images, val_job, "mlu")
print("=" * 20 + " Prediction ends " + "=" * 20)
flow.clear_default_session()
if __name__ == "__main__":
main()
| [
"oneflow.env.init",
"oneflow.scope.consistent_view",
"oneflow.config.enable_legacy_model_io",
"numpy.ascontiguousarray",
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"os.path.exists",
"argparse.ArgumentParser",
"fresnet100.get_symbol",
"oneflow.env.log_dir",
"oneflow.train.Chec... | [((236, 295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for validation"""'}), "(description='flags for validation')\n", (259, 295), False, 'import argparse\n'), ((1229, 1251), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1239, 1251), False, 'import cv2\n'), ((1365, 1416), 'cv2.resize', 'cv2.resize', (['im', 'dsize'], {'interpolation': 'cv2.INTER_AREA'}), '(im, dsize, interpolation=cv2.INTER_AREA)\n', (1375, 1416), False, 'import cv2\n'), ((1428, 1463), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (1440, 1463), False, 'import cv2\n'), ((1511, 1538), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (1523, 1538), True, 'import numpy as np\n'), ((1548, 1574), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (1562, 1574), True, 'import numpy as np\n'), ((1584, 1614), 'numpy.transpose', 'np.transpose', (['im', '(0, 2, 3, 1)'], {}), '(im, (0, 2, 3, 1))\n', (1596, 1614), True, 'import numpy as np\n'), ((1662, 1697), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (1682, 1697), True, 'import numpy as np\n'), ((1744, 1766), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (1764, 1766), True, 'import oneflow as flow\n'), ((2023, 2083), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'config'}), "(type='predict', function_config=config)\n", (2043, 2083), True, 'import oneflow as flow\n'), ((2789, 2804), 'oneflow.env.init', 'flow.env.init', ([], {}), '()\n', (2802, 2804), True, 'import oneflow as flow\n'), ((2809, 2839), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (2825, 2839), True, 'import oneflow as flow\n'), ((2912, 2944), 'os.path.exists', 'os.path.exists', (['args.val_img_dir'], {}), '(args.val_img_dir)\n', (2926, 2944), False, 'import os\n'), ((3192, 3232), 'oneflow.config.enable_legacy_model_io', 'flow.config.enable_legacy_model_io', (['(True)'], {}), '(True)\n', (3226, 3232), True, 'import oneflow as flow\n'), ((3502, 3530), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3528, 3530), True, 'import oneflow as flow\n'), ((1803, 1831), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1829, 1831), True, 'import oneflow as flow\n'), ((1917, 1957), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cambricon"""', '"""0:0"""'], {}), "('cambricon', '0:0')\n", (1937, 1957), True, 'import oneflow as flow\n'), ((2289, 2318), 'fresnet100.get_symbol', 'fresnet100.get_symbol', (['images'], {}), '(images)\n', (2310, 2318), False, 'import fresnet100\n'), ((2132, 2179), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['(1, 112, 112, 3)'], {}), '((1, 112, 112, 3))\n', (2161, 2179), True, 'import oneflow as flow\n'), ((2648, 2671), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2669, 2671), True, 'import oneflow as flow\n')] |
import unittest
from numpy.testing import assert_allclose
import numpy as np
from sklearn.datasets import load_iris, make_multilabel_classification, load_diabetes
from skelm import ELMRegressor, ELMClassifier
import warnings
from sklearn.exceptions import DataDimensionalityWarning, DataConversionWarning
class TestAcceptance(unittest.TestCase):
def setUp(self) -> None:
self.data_class = load_iris(return_X_y=True)
self.data_ml = make_multilabel_classification()
self.data_reg = load_diabetes(return_X_y=True)
warnings.simplefilter("ignore", DataDimensionalityWarning)
warnings.simplefilter("ignore", DataConversionWarning)
def test_SineWave_Solves(self):
"""A highly non-linear regression problem, with added strong noise.
"""
X = np.linspace(-1, 1, num=1000)[:, None]
Y = np.sin(16 * X) * X + 0.2*np.random.randn(1000)[:, None]
elm = ELMRegressor(random_state=0)
elm.fit(X, Y)
Yt = elm.predict(X)
MSE = np.mean((Y - Yt) ** 2)
self.assertLess(MSE, 0.3)
def test_Xor_OneNeuron_Solved(self):
"""ELM should be able to solve XOR problem.
"""
X = np.array([[0, 0],
[1, 1],
[1, 0],
[0, 1]])
Y = np.array([1, 1, -1, -1])
elm = ELMClassifier(n_neurons=3, random_state=0)
elm.fit(X, Y)
Yh = elm.predict(X)
self.assertGreater(Yh[0], 0)
self.assertGreater(Yh[1], 0)
self.assertLess(Yh[2], 0)
self.assertLess(Yh[3], 0)
def test_ELMClassifier_ReportedScore_ActuallyIsClassificationScore(self):
X, Y = self.data_class
Yr = np.vstack((Y == 0, Y == 1, Y == 2)).T
elm_c = ELMClassifier(random_state=0).fit(X, Y)
elm_r = ELMRegressor(random_state=0).fit(X, Yr)
Yc_hat = elm_c.predict(X)
Yr_hat = elm_r.predict(X).argmax(1)
assert_allclose(Yc_hat, Yr_hat)
def test_ELMClassifier_MultilabelClassification_Works(self):
X, Y = self.data_ml
elm_c = ELMClassifier(random_state=0).fit(X, Y)
elm_r = ELMRegressor(random_state=0).fit(X, Y)
Yc_hat = elm_c.predict(X)
Yr_hat = (elm_r.predict(X) >= 0.5).astype(int)
assert_allclose(Yc_hat, Yr_hat)
def test_RegularizationL2_DifferentValue_ChangesPrediction(self):
X, Y = self.data_reg
Yh_1 = ELMRegressor(alpha=1e-7, random_state=0).fit(X, Y).predict(X)
Yh_2 = ELMRegressor(alpha=1e+3, random_state=0).fit(X, Y).predict(X)
self.assertFalse(np.allclose(Yh_1, Yh_2))
def test_Default_SetNumberOfNeurons(self):
X, y = self.data_reg
elm5 = ELMRegressor(n_neurons=5, random_state=0).fit(X, y)
elm50 = ELMRegressor(n_neurons=50, random_state=0).fit(X, y)
score5 = elm5.score(X, y)
score50 = elm50.score(X, y)
self.assertGreater(score50, score5)
self.assertGreater(score50, 0.33)
| [
"sklearn.datasets.load_iris",
"numpy.mean",
"numpy.allclose",
"sklearn.datasets.make_multilabel_classification",
"skelm.ELMRegressor",
"numpy.testing.assert_allclose",
"numpy.sin",
"skelm.ELMClassifier",
"numpy.array",
"numpy.linspace",
"sklearn.datasets.load_diabetes",
"numpy.vstack",
"warn... | [((405, 431), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (414, 431), False, 'from sklearn.datasets import load_iris, make_multilabel_classification, load_diabetes\n'), ((455, 487), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {}), '()\n', (485, 487), False, 'from sklearn.datasets import load_iris, make_multilabel_classification, load_diabetes\n'), ((512, 542), 'sklearn.datasets.load_diabetes', 'load_diabetes', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (525, 542), False, 'from sklearn.datasets import load_iris, make_multilabel_classification, load_diabetes\n'), ((551, 609), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'DataDimensionalityWarning'], {}), "('ignore', DataDimensionalityWarning)\n", (572, 609), False, 'import warnings\n'), ((618, 672), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'DataConversionWarning'], {}), "('ignore', DataConversionWarning)\n", (639, 672), False, 'import warnings\n'), ((931, 959), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (943, 959), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((1025, 1047), 'numpy.mean', 'np.mean', (['((Y - Yt) ** 2)'], {}), '((Y - Yt) ** 2)\n', (1032, 1047), True, 'import numpy as np\n'), ((1200, 1242), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [1, 0], [0, 1]]'], {}), '([[0, 0], [1, 1], [1, 0], [0, 1]])\n', (1208, 1242), True, 'import numpy as np\n'), ((1321, 1345), 'numpy.array', 'np.array', (['[1, 1, -1, -1]'], {}), '([1, 1, -1, -1])\n', (1329, 1345), True, 'import numpy as np\n'), ((1361, 1403), 'skelm.ELMClassifier', 'ELMClassifier', ([], {'n_neurons': '(3)', 'random_state': '(0)'}), '(n_neurons=3, random_state=0)\n', (1374, 1403), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((1958, 1989), 'numpy.testing.assert_allclose', 'assert_allclose', (['Yc_hat', 'Yr_hat'], {}), '(Yc_hat, Yr_hat)\n', (1973, 1989), False, 'from numpy.testing import assert_allclose\n'), ((2294, 2325), 'numpy.testing.assert_allclose', 'assert_allclose', (['Yc_hat', 'Yr_hat'], {}), '(Yc_hat, Yr_hat)\n', (2309, 2325), False, 'from numpy.testing import assert_allclose\n'), ((810, 838), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {'num': '(1000)'}), '(-1, 1, num=1000)\n', (821, 838), True, 'import numpy as np\n'), ((1719, 1754), 'numpy.vstack', 'np.vstack', (['(Y == 0, Y == 1, Y == 2)'], {}), '((Y == 0, Y == 1, Y == 2))\n', (1728, 1754), True, 'import numpy as np\n'), ((2606, 2629), 'numpy.allclose', 'np.allclose', (['Yh_1', 'Yh_2'], {}), '(Yh_1, Yh_2)\n', (2617, 2629), True, 'import numpy as np\n'), ((860, 874), 'numpy.sin', 'np.sin', (['(16 * X)'], {}), '(16 * X)\n', (866, 874), True, 'import numpy as np\n'), ((1774, 1803), 'skelm.ELMClassifier', 'ELMClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1787, 1803), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((1830, 1858), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1842, 1858), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((2100, 2129), 'skelm.ELMClassifier', 'ELMClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2113, 2129), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((2156, 2184), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2168, 2184), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((2723, 2764), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'n_neurons': '(5)', 'random_state': '(0)'}), '(n_neurons=5, random_state=0)\n', (2735, 2764), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((2791, 2833), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'n_neurons': '(50)', 'random_state': '(0)'}), '(n_neurons=50, random_state=0)\n', (2803, 2833), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((885, 906), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (900, 906), True, 'import numpy as np\n'), ((2441, 2482), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'alpha': '(1e-07)', 'random_state': '(0)'}), '(alpha=1e-07, random_state=0)\n', (2453, 2482), False, 'from skelm import ELMRegressor, ELMClassifier\n'), ((2518, 2560), 'skelm.ELMRegressor', 'ELMRegressor', ([], {'alpha': '(1000.0)', 'random_state': '(0)'}), '(alpha=1000.0, random_state=0)\n', (2530, 2560), False, 'from skelm import ELMRegressor, ELMClassifier\n')] |
import concurrent.futures
from functools import partial
import time
import numpy.testing
import numpy as np
import pytest
from cara.apps.calculator import make_app
from cara.apps.calculator.report_generator import ReportGenerator, readable_minutes
import cara.apps.calculator.report_generator as rep_gen
def test_generate_report(baseline_form):
# This is a simple test that confirms that given a model, we can actually
# generate a report for it. Because this is what happens in the cara
# calculator, we confirm that the generation happens within a reasonable
# time threshold.
time_limit: float = 20.0 # seconds
start = time.perf_counter()
generator: ReportGenerator = make_app().settings['report_generator']
report = generator.build_report("", baseline_form, partial(
concurrent.futures.ThreadPoolExecutor, 1,
))
end = time.perf_counter()
assert report != ""
assert end - start < time_limit
@pytest.mark.parametrize(
["test_input", "expected"],
[
[1, '1 minute'],
[2, '2 minutes'],
[60, '1 hour'],
[120, '2 hours'],
[150, '150 minutes'],
],
)
def test_readable_minutes(test_input, expected):
assert readable_minutes(test_input) == expected
def test_fill_big_gaps():
expected = [1, 1.75, 2, 2.75, 3.5, 4]
assert rep_gen.fill_big_gaps([1, 2, 4], gap_size=0.75) == expected
def test_fill_big_gaps__float_tolerance():
# Ensure that there is some float tolerance to the gap size check.
assert rep_gen.fill_big_gaps([0, 2 + 1e-15, 4], gap_size=2) == [0, 2 + 1e-15, 4]
assert rep_gen.fill_big_gaps([0, 2 + 1e-14, 4], gap_size=2) == [0, 2, 2 + 1e-14, 4]
def test_non_temp_transition_times(baseline_exposure_model):
expected = [0.0, 4.0, 5.0, 8.0]
result = rep_gen.non_temp_transition_times(baseline_exposure_model)
assert result == expected
def test_interesting_times_many(baseline_exposure_model):
result = rep_gen.interesting_times(baseline_exposure_model, approx_n_pts=100)
assert 100 <= len(result) <= 120
assert np.abs(np.diff(result)).max() < 8.1/100.
def test_interesting_times_small(baseline_exposure_model):
expected = [0.0, 0.8, 1.6, 2.4, 3.2, 4.0, 4.8, 5.0, 5.8, 6.6, 7.4, 8.0]
# Ask for more data than there is in the transition times.
result = rep_gen.interesting_times(baseline_exposure_model, approx_n_pts=10)
np.testing.assert_allclose(result, expected, atol=1e-04)
def test_interesting_times_w_temp(exposure_model_w_outside_temp_changes):
# Ensure that the state change times are returned (minus the temperature changes) by
# requesting n_points=1.
result = rep_gen.interesting_times(exposure_model_w_outside_temp_changes, approx_n_pts=1)
expected = [0., 1.8, 2.2, 4., 4.4, 5., 6.2, 6.6, 8.]
np.testing.assert_allclose(result, expected)
# Now request more than the state-change times.
result = rep_gen.interesting_times(exposure_model_w_outside_temp_changes, approx_n_pts=20)
expected = [
0., 0.4, 0.8, 1.2, 1.6, 1.8, 2.2, 2.6, 3., 3.4, 3.8, 4., 4.4, 4.8,
5., 5.4, 5.8, 6.2, 6.6, 7., 7.4, 7.8, 8.
]
np.testing.assert_allclose(result, expected)
| [
"cara.apps.calculator.report_generator.fill_big_gaps",
"cara.apps.calculator.make_app",
"numpy.testing.assert_allclose",
"time.perf_counter",
"numpy.diff",
"pytest.mark.parametrize",
"cara.apps.calculator.report_generator.readable_minutes",
"functools.partial",
"cara.apps.calculator.report_generator... | [((960, 1108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['test_input', 'expected']", "[[1, '1 minute'], [2, '2 minutes'], [60, '1 hour'], [120, '2 hours'], [150,\n '150 minutes']]"], {}), "(['test_input', 'expected'], [[1, '1 minute'], [2,\n '2 minutes'], [60, '1 hour'], [120, '2 hours'], [150, '150 minutes']])\n", (983, 1108), False, 'import pytest\n'), ((652, 671), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (669, 671), False, 'import time\n'), ((877, 896), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (894, 896), False, 'import time\n'), ((1806, 1864), 'cara.apps.calculator.report_generator.non_temp_transition_times', 'rep_gen.non_temp_transition_times', (['baseline_exposure_model'], {}), '(baseline_exposure_model)\n', (1839, 1864), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((1968, 2036), 'cara.apps.calculator.report_generator.interesting_times', 'rep_gen.interesting_times', (['baseline_exposure_model'], {'approx_n_pts': '(100)'}), '(baseline_exposure_model, approx_n_pts=100)\n', (1993, 2036), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((2339, 2406), 'cara.apps.calculator.report_generator.interesting_times', 'rep_gen.interesting_times', (['baseline_exposure_model'], {'approx_n_pts': '(10)'}), '(baseline_exposure_model, approx_n_pts=10)\n', (2364, 2406), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((2412, 2469), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {'atol': '(0.0001)'}), '(result, expected, atol=0.0001)\n', (2438, 2469), True, 'import numpy as np\n'), ((2676, 2761), 'cara.apps.calculator.report_generator.interesting_times', 'rep_gen.interesting_times', (['exposure_model_w_outside_temp_changes'], {'approx_n_pts': '(1)'}), '(exposure_model_w_outside_temp_changes, approx_n_pts=1\n )\n', (2701, 2761), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((2818, 2862), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2844, 2862), True, 'import numpy as np\n'), ((2929, 3014), 'cara.apps.calculator.report_generator.interesting_times', 'rep_gen.interesting_times', (['exposure_model_w_outside_temp_changes'], {'approx_n_pts': '(20)'}), '(exposure_model_w_outside_temp_changes,\n approx_n_pts=20)\n', (2954, 3014), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((3162, 3206), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3188, 3206), True, 'import numpy as np\n'), ((801, 850), 'functools.partial', 'partial', (['concurrent.futures.ThreadPoolExecutor', '(1)'], {}), '(concurrent.futures.ThreadPoolExecutor, 1)\n', (808, 850), False, 'from functools import partial\n'), ((1223, 1251), 'cara.apps.calculator.report_generator.readable_minutes', 'readable_minutes', (['test_input'], {}), '(test_input)\n', (1239, 1251), False, 'from cara.apps.calculator.report_generator import ReportGenerator, readable_minutes\n'), ((1345, 1392), 'cara.apps.calculator.report_generator.fill_big_gaps', 'rep_gen.fill_big_gaps', (['[1, 2, 4]'], {'gap_size': '(0.75)'}), '([1, 2, 4], gap_size=0.75)\n', (1366, 1392), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((1532, 1584), 'cara.apps.calculator.report_generator.fill_big_gaps', 'rep_gen.fill_big_gaps', (['[0, 2 + 1e-15, 4]'], {'gap_size': '(2)'}), '([0, 2 + 1e-15, 4], gap_size=2)\n', (1553, 1584), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((1617, 1669), 'cara.apps.calculator.report_generator.fill_big_gaps', 'rep_gen.fill_big_gaps', (['[0, 2 + 1e-14, 4]'], {'gap_size': '(2)'}), '([0, 2 + 1e-14, 4], gap_size=2)\n', (1638, 1669), True, 'import cara.apps.calculator.report_generator as rep_gen\n'), ((706, 716), 'cara.apps.calculator.make_app', 'make_app', ([], {}), '()\n', (714, 716), False, 'from cara.apps.calculator import make_app\n'), ((2092, 2107), 'numpy.diff', 'np.diff', (['result'], {}), '(result)\n', (2099, 2107), True, 'import numpy as np\n')] |
import argparse
import json
import os
import random
import matplotlib.pyplot as plt
import numpy as np
from igibson.object_states.utils import sample_kinematics
from igibson.objects.articulated_object import URDFObject
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import get_ig_avg_category_specs
def main(args):
scene_names = [
"Beechwood_0_int",
"Beechwood_1_int",
"Benevolence_1_int",
"Ihlen_0_int",
"Merom_0_int",
"Pomaria_0_int",
"Pomaria_2_int",
"Wainscott_0_int",
"Benevolence_0_int",
"Benevolence_2_int",
"Ihlen_1_int",
"Merom_1_int",
"Pomaria_1_int",
"Rs_int",
"Wainscott_1_int",
]
if args.scene_name not in scene_names and args.scene_name != "all":
print("%s is not a valid scene name" % args.scene_name)
return
objects_to_sample = []
object_id_dict = {}
object_cat_dirs = {}
with open(args.csv_name, "r") as f:
for line in f:
parts = line.split(",")
cat = parts[0]
count = int(parts[1])
object_cat_dir = "data/ig_dataset/objects/%s" % (cat)
if not os.path.isdir(object_cat_dir):
print("%s is not a valid object" % (cat))
return
object_cat_dirs[cat] = object_cat_dir
objects_to_sample.append((cat, count))
object_ids = os.listdir(object_cat_dir)
object_id_dict[cat] = object_ids
settings = MeshRendererSettings(enable_shadow=False, msaa=False, enable_pbr=True)
if args.scene_name == "all":
all_scene_names = scene_names
else:
all_scene_names = [args.scene_name]
for scene_name in all_scene_names:
s = Simulator(mode="headless", image_width=800, image_height=800, rendering_settings=settings)
support_categories = [
"dining_table",
"desk",
"pedestal_table",
"gaming_table",
"stand",
"console_table",
"coffee_table",
"fridge",
"countertop",
"top_cabinet",
"bookshelf",
"bottom_cabinet",
"bottom_cabinet_no_top",
"coffee_table",
"carpet",
]
simulator = s
scene = InteractiveIndoorScene(
scene_name,
texture_randomization=False,
object_randomization=False,
load_object_categories=support_categories,
)
s.import_scene(scene)
renderer = s.renderer
category_supporting_objects = {}
obj_counts = {}
placements_counts = {}
room_placements_counts = {}
for obj_name in scene.objects_by_name:
obj = scene.objects_by_name[obj_name]
if not obj.supporting_surfaces:
continue
obj_counts[obj] = 0
cat = obj.category
if "table" in cat or "stand" in cat:
cat = "table"
if "shelf" in cat:
cat = "shelf"
if "counter" in cat:
cat = "counter"
room = obj.in_rooms[0][:-2]
if (cat, room) not in category_supporting_objects:
category_supporting_objects[(cat, room)] = []
category_supporting_objects[(cat, room)].append(obj)
if room not in room_placements_counts:
room_placements_counts[room] = 0
if (cat, room) not in placements_counts:
placements_counts[(cat, room)] = 0
placement_count = 0
avg_category_spec = get_ig_avg_category_specs()
random.shuffle(objects_to_sample)
for category, count in objects_to_sample:
valid_placement_rules = []
for i in range(count):
object_id = random.choice(object_id_dict[category])
if len(object_id_dict[category]) > 1:
object_id_dict[category].remove(object_id)
urdf_path = "%s/%s/%s.urdf" % (object_cat_dirs[category], object_id, object_id)
while not os.path.isfile(urdf_path):
object_id = random.choice(object_id_dict[category])
if len(object_id_dict[category]) > 1:
object_id_dict[category].remove(object_id)
else:
break
urdf_path = "%s/%s/%s.urdf" % (object_cat_dirs[category], object_id, object_id)
if not os.path.isfile(urdf_path):
break
name = "%s|%s|%d" % (category, object_id, i)
urdf_object = URDFObject(
urdf_path,
name=name,
category=category,
overwrite_inertial=True,
avg_obj_dims=avg_category_spec.get(category),
fit_avg_dim_volume=True,
)
simulator.import_object(urdf_object)
for attempt in range(args.num_attempts):
urdf_path = "%s/%s/%s.urdf" % (object_cat_dirs[category], object_id, object_id)
placement_rules_path = os.path.join(urdf_object.model_path, "misc", "placement_probs.json")
if not os.path.isfile(placement_rules_path):
break
with open(placement_rules_path, "r") as f:
placement_rules = json.load(f)
valid_placement_rules = {}
for placement_rule in placement_rules.keys():
support_obj_cat, room, predicate = placement_rule.split("-")
if (support_obj_cat, room) in category_supporting_objects:
valid_placement_rules[placement_rule] = placement_rules[placement_rule]
if len(valid_placement_rules) == 0:
print("No valid rules for %s" % category)
print(placement_rules)
break
placement_rule = random.choices(
list(valid_placement_rules.keys()), weights=valid_placement_rules.values(), k=1
)[0]
support_obj_cat, room, predicate = placement_rule.split("-")
if predicate == "ontop":
predicate = "onTop"
support_objs = category_supporting_objects[(support_obj_cat, room)]
min_obj = None
min_obj_count = None
for obj in support_objs:
if min_obj is None or obj_counts[obj] < min_obj_count:
min_obj = obj
min_obj_count = obj_counts[obj]
if attempt < 2:
chosen_support_obj = min_obj
else:
chosen_support_obj = random.choice(support_objs)
print("Sampling %s %s %s %s in %s" % (category, object_id, predicate, support_obj_cat, room))
result = sample_kinematics(predicate, urdf_object, chosen_support_obj, True)
if not result:
print("Failed kinematic sampling! Attempt %d" % attempt)
continue
for i in range(10):
s.step()
obj_counts[chosen_support_obj] += 1
placement_count += 1
room_placements_counts[room] += 1
placements_counts[(support_obj_cat, room)] += 1
if args.save_images:
simulator.sync()
scene.open_one_obj(chosen_support_obj.body_ids[chosen_support_obj.main_body], "max")
pos = urdf_object.get_position()
offsets = [[-0.6, 0], [0.0, -0.6], [0.6, 0.0], [0.0, 0.6]]
for i in range(4):
camera_pos = np.array([pos[0] - offsets[i][0], pos[1] - offsets[i][1], pos[2] + 0.1])
renderer.set_camera(camera_pos, pos, [0, 0, 1])
frame = renderer.render(modes=("rgb"))[0]
plt.imshow(frame)
plt.savefig("placement_imgs/%s_placement_%d_%d.png" % (scene_name, placement_count, i))
plt.close()
scene.open_one_obj(chosen_support_obj.body_ids[chosen_support_obj.main_body], "zero")
urdf_object.in_rooms = chosen_support_obj.in_rooms
break
if len(valid_placement_rules) == 0:
break
print("Total %d objects placed" % placement_count)
if args.urdf_name:
scene.save_modified_urdf("%s_%s" % (scene_name, args.urdf_name))
if args.save_placement_txt:
with open("%s_placements.txt" % scene_name, "w") as f:
for room in room_placements_counts:
f.write("%s: %d\n" % (room, room_placements_counts[room]))
for support_cat in [cat for cat, r in placements_counts if r == room]:
f.write("\t%s: %d\n" % (support_cat, placements_counts[(support_cat, room)]))
s.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Configure which surfaces and containers in a scene an object might go in."
)
parser.add_argument("scene_name", type=str)
parser.add_argument("csv_name", type=str)
parser.add_argument("--urdf_name", type=str)
parser.add_argument("--num_attempts", type=int, default=10)
parser.add_argument("--save_images", action="store_true", default=False)
parser.add_argument("--save_placement_txt", action="store_true", default=False)
args = parser.parse_args()
main(args)
| [
"matplotlib.pyplot.imshow",
"os.listdir",
"random.choice",
"random.shuffle",
"argparse.ArgumentParser",
"igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene",
"matplotlib.pyplot.savefig",
"igibson.render.mesh_renderer.mesh_renderer_settings.MeshRendererSettings",
"os.path.join",
"os.path.is... | [((1708, 1778), 'igibson.render.mesh_renderer.mesh_renderer_settings.MeshRendererSettings', 'MeshRendererSettings', ([], {'enable_shadow': '(False)', 'msaa': '(False)', 'enable_pbr': '(True)'}), '(enable_shadow=False, msaa=False, enable_pbr=True)\n', (1728, 1778), False, 'from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings\n'), ((9651, 9773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Configure which surfaces and containers in a scene an object might go in."""'}), "(description=\n 'Configure which surfaces and containers in a scene an object might go in.'\n )\n", (9674, 9773), False, 'import argparse\n'), ((1955, 2049), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'image_width': '(800)', 'image_height': '(800)', 'rendering_settings': 'settings'}), "(mode='headless', image_width=800, image_height=800,\n rendering_settings=settings)\n", (1964, 2049), False, 'from igibson.simulator import Simulator\n'), ((2526, 2664), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['scene_name'], {'texture_randomization': '(False)', 'object_randomization': '(False)', 'load_object_categories': 'support_categories'}), '(scene_name, texture_randomization=False,\n object_randomization=False, load_object_categories=support_categories)\n', (2548, 2664), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((3838, 3865), 'igibson.utils.assets_utils.get_ig_avg_category_specs', 'get_ig_avg_category_specs', ([], {}), '()\n', (3863, 3865), False, 'from igibson.utils.assets_utils import get_ig_avg_category_specs\n'), ((3874, 3907), 'random.shuffle', 'random.shuffle', (['objects_to_sample'], {}), '(objects_to_sample)\n', (3888, 3907), False, 'import random\n'), ((1620, 1646), 'os.listdir', 'os.listdir', (['object_cat_dir'], {}), '(object_cat_dir)\n', (1630, 1646), False, 'import os\n'), ((1380, 1409), 'os.path.isdir', 'os.path.isdir', (['object_cat_dir'], {}), '(object_cat_dir)\n', (1393, 1409), False, 'import os\n'), ((4060, 4099), 'random.choice', 'random.choice', (['object_id_dict[category]'], {}), '(object_id_dict[category])\n', (4073, 4099), False, 'import random\n'), ((4339, 4364), 'os.path.isfile', 'os.path.isfile', (['urdf_path'], {}), '(urdf_path)\n', (4353, 4364), False, 'import os\n'), ((4398, 4437), 'random.choice', 'random.choice', (['object_id_dict[category]'], {}), '(object_id_dict[category])\n', (4411, 4437), False, 'import random\n'), ((4742, 4767), 'os.path.isfile', 'os.path.isfile', (['urdf_path'], {}), '(urdf_path)\n', (4756, 4767), False, 'import os\n'), ((5427, 5495), 'os.path.join', 'os.path.join', (['urdf_object.model_path', '"""misc"""', '"""placement_probs.json"""'], {}), "(urdf_object.model_path, 'misc', 'placement_probs.json')\n", (5439, 5495), False, 'import os\n'), ((7362, 7429), 'igibson.object_states.utils.sample_kinematics', 'sample_kinematics', (['predicate', 'urdf_object', 'chosen_support_obj', '(True)'], {}), '(predicate, urdf_object, chosen_support_obj, True)\n', (7379, 7429), False, 'from igibson.object_states.utils import sample_kinematics\n'), ((5523, 5559), 'os.path.isfile', 'os.path.isfile', (['placement_rules_path'], {}), '(placement_rules_path)\n', (5537, 5559), False, 'import os\n'), ((5696, 5708), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5705, 5708), False, 'import json\n'), ((7191, 7218), 'random.choice', 'random.choice', (['support_objs'], {}), '(support_objs)\n', (7204, 7218), False, 'import random\n'), ((8287, 8359), 'numpy.array', 'np.array', (['[pos[0] - offsets[i][0], pos[1] - offsets[i][1], pos[2] + 0.1]'], {}), '([pos[0] - offsets[i][0], pos[1] - offsets[i][1], pos[2] + 0.1])\n', (8295, 8359), True, 'import numpy as np\n'), ((8534, 8551), 'matplotlib.pyplot.imshow', 'plt.imshow', (['frame'], {}), '(frame)\n', (8544, 8551), True, 'import matplotlib.pyplot as plt\n'), ((8580, 8671), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('placement_imgs/%s_placement_%d_%d.png' % (scene_name, placement_count, i))"], {}), "('placement_imgs/%s_placement_%d_%d.png' % (scene_name,\n placement_count, i))\n", (8591, 8671), True, 'import matplotlib.pyplot as plt\n'), ((8696, 8707), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8705, 8707), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import cv2
from skimage.measure import compare_ssim
def check_YCbCr_YUV():
vc = cv2.VideoCapture(0)
if vc.isOpened():
is_capturing, _ = vc.read()
else:
is_capturing = False
while True:
is_capturing, frame = vc.read()
# shape, img = face_detector.detect_faces(frame)
#if shape != 0:
# frame = cv2.rectangle(frame, shape[0], shape[1],
# (255,0,0), thickness = 0)
frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)
cv2.imshow('YCrCb[0]',frame1[:,:,0])
cv2.imshow('YCrCb[1]',frame1[:,:,1])
cv2.imshow('YCrCb[2]',frame1[:,:,2])
cv2.imshow('YUV[0]',frame2[:,:,0])
cv2.imshow('YUV[1]',frame2[:,:,1])
cv2.imshow('YUV[2]',frame2[:,:,2])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vc.release()
cv2.destroyAllWindows()
def thresholded():
cap = cv2.VideoCapture(0)
ret, frame_old = cap.read()
while True:
frame_old_gray = cv2.cvtColor(frame_old, cv2.COLOR_BGR2GRAY)
ret, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(frame_old_gray, frame_gray, full=True)
diff[diff > 0.2] = 1
diff = (diff * 255).astype("uint8")
#ret, thresh = cv2.threshold(diff, 0,50,cv2.THRESH_BINARY)
thresh = cv2.threshold(diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cv2.imshow('frame',thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_old = np.copy(frame)
cap.release()
cv2.destroyAllWindows()
def test_colors():
cap = cv2.VideoCapture(0)
ret, frame_old = cap.read()
while True:
frame_old_r =frame_old[:,:,2]
frame_old_g =frame_old[:,:,1]
frame_old_b =frame_old[:,:,0]
ret, frame = cap.read()
frame_r =frame[:,:,2]
frame_g =frame[:,:,1]
frame_b =frame[:,:,0]
(score, diff1) = compare_ssim(frame_old_r, frame_r, full=True)
(score, diff2) = compare_ssim(frame_old_g, frame_g, full=True)
(score, diff3) = compare_ssim(frame_old_b, frame_b, full=True)
#diff[diff > 0.2] = 1
diff1 = (diff1 * 255).astype("uint8")
diff2 = (diff2 * 255).astype("uint8")
diff3 = (diff3 * 255).astype("uint8")
ret, thresh = cv2.threshold(diff, 0,50,cv2.THRESH_BINARY)
thresh1 = cv2.threshold(diff1, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
thresh2 = cv2.threshold(diff2, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
thresh3 = cv2.threshold(diff3, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cv2.imshow('red',thresh1)
cv2.imshow('green',thresh2)
cv2.imshow('blue',thresh3)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_old = np.copy(frame)
cap.release()
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
ret, frame_old = cap.read()
while True:
frame_old_r =frame_old[:,:,2]
ret, frame = cap.read()
frame_r = frame[:,:,2]
(score, diff1) = compare_ssim(frame_old_r, frame_r, full=True)
diff1[diff1 < 0.8] = 1
diff1 = (diff1 * 255).astype("uint8")
thresh1 = cv2.threshold(diff1, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cv2.imshow('red',thresh1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_old = np.copy(frame)
cap.release()
cv2.destroyAllWindows()
| [
"numpy.copy",
"cv2.threshold",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"skimage.measure.compare_ssim",
"cv2.waitKey"
] | [((3512, 3531), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3528, 3531), False, 'import cv2\n'), ((4114, 4137), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4135, 4137), False, 'import cv2\n'), ((109, 128), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (125, 128), False, 'import cv2\n'), ((1062, 1085), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1083, 1085), False, 'import cv2\n'), ((1119, 1138), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1135, 1138), False, 'import cv2\n'), ((1952, 1975), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1973, 1975), False, 'import cv2\n'), ((2009, 2028), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2025, 2028), False, 'import cv2\n'), ((3475, 3498), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3496, 3498), False, 'import cv2\n'), ((3710, 3755), 'skimage.measure.compare_ssim', 'compare_ssim', (['frame_old_r', 'frame_r'], {'full': '(True)'}), '(frame_old_r, frame_r, full=True)\n', (3722, 3755), False, 'from skimage.measure import compare_ssim\n'), ((3964, 3990), 'cv2.imshow', 'cv2.imshow', (['"""red"""', 'thresh1'], {}), "('red', thresh1)\n", (3974, 3990), False, 'import cv2\n'), ((4077, 4091), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (4084, 4091), True, 'import numpy as np\n'), ((563, 603), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2YCrCb'], {}), '(frame, cv2.COLOR_BGR2YCrCb)\n', (575, 603), False, 'import cv2\n'), ((623, 661), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2YUV'], {}), '(frame, cv2.COLOR_BGR2YUV)\n', (635, 661), False, 'import cv2\n'), ((682, 721), 'cv2.imshow', 'cv2.imshow', (['"""YCrCb[0]"""', 'frame1[:, :, 0]'], {}), "('YCrCb[0]', frame1[:, :, 0])\n", (692, 721), False, 'import cv2\n'), ((728, 767), 'cv2.imshow', 'cv2.imshow', (['"""YCrCb[1]"""', 'frame1[:, :, 1]'], {}), "('YCrCb[1]', frame1[:, :, 1])\n", (738, 767), False, 'import cv2\n'), ((774, 813), 'cv2.imshow', 'cv2.imshow', (['"""YCrCb[2]"""', 'frame1[:, :, 2]'], {}), "('YCrCb[2]', frame1[:, :, 2])\n", (784, 813), False, 'import cv2\n'), ((830, 867), 'cv2.imshow', 'cv2.imshow', (['"""YUV[0]"""', 'frame2[:, :, 0]'], {}), "('YUV[0]', frame2[:, :, 0])\n", (840, 867), False, 'import cv2\n'), ((874, 911), 'cv2.imshow', 'cv2.imshow', (['"""YUV[1]"""', 'frame2[:, :, 1]'], {}), "('YUV[1]', frame2[:, :, 1])\n", (884, 911), False, 'import cv2\n'), ((918, 955), 'cv2.imshow', 'cv2.imshow', (['"""YUV[2]"""', 'frame2[:, :, 2]'], {}), "('YUV[2]', frame2[:, :, 2])\n", (928, 955), False, 'import cv2\n'), ((1231, 1274), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_old', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_old, cv2.COLOR_BGR2GRAY)\n', (1243, 1274), False, 'import cv2\n'), ((1340, 1379), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1352, 1379), False, 'import cv2\n'), ((1415, 1466), 'skimage.measure.compare_ssim', 'compare_ssim', (['frame_old_gray', 'frame_gray'], {'full': '(True)'}), '(frame_old_gray, frame_gray, full=True)\n', (1427, 1466), False, 'from skimage.measure import compare_ssim\n'), ((1769, 1796), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'thresh'], {}), "('frame', thresh)\n", (1779, 1796), False, 'import cv2\n'), ((1903, 1917), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (1910, 1917), True, 'import numpy as np\n'), ((2386, 2431), 'skimage.measure.compare_ssim', 'compare_ssim', (['frame_old_r', 'frame_r'], {'full': '(True)'}), '(frame_old_r, frame_r, full=True)\n', (2398, 2431), False, 'from skimage.measure import compare_ssim\n'), ((2458, 2503), 'skimage.measure.compare_ssim', 'compare_ssim', (['frame_old_g', 'frame_g'], {'full': '(True)'}), '(frame_old_g, frame_g, full=True)\n', (2470, 2503), False, 'from skimage.measure import compare_ssim\n'), ((2530, 2575), 'skimage.measure.compare_ssim', 'compare_ssim', (['frame_old_b', 'frame_b'], {'full': '(True)'}), '(frame_old_b, frame_b, full=True)\n', (2542, 2575), False, 'from skimage.measure import compare_ssim\n'), ((2791, 2836), 'cv2.threshold', 'cv2.threshold', (['diff', '(0)', '(50)', 'cv2.THRESH_BINARY'], {}), '(diff, 0, 50, cv2.THRESH_BINARY)\n', (2804, 2836), False, 'import cv2\n'), ((3220, 3246), 'cv2.imshow', 'cv2.imshow', (['"""red"""', 'thresh1'], {}), "('red', thresh1)\n", (3230, 3246), False, 'import cv2\n'), ((3255, 3283), 'cv2.imshow', 'cv2.imshow', (['"""green"""', 'thresh2'], {}), "('green', thresh2)\n", (3265, 3283), False, 'import cv2\n'), ((3292, 3319), 'cv2.imshow', 'cv2.imshow', (['"""blue"""', 'thresh3'], {}), "('blue', thresh3)\n", (3302, 3319), False, 'import cv2\n'), ((3426, 3440), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (3433, 3440), True, 'import numpy as np\n'), ((3854, 3923), 'cv2.threshold', 'cv2.threshold', (['diff1', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(diff1, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (3867, 3923), False, 'import cv2\n'), ((1648, 1716), 'cv2.threshold', 'cv2.threshold', (['diff', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (1661, 1716), False, 'import cv2\n'), ((2854, 2923), 'cv2.threshold', 'cv2.threshold', (['diff1', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(diff1, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (2867, 2923), False, 'import cv2\n'), ((2976, 3045), 'cv2.threshold', 'cv2.threshold', (['diff2', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(diff2, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (2989, 3045), False, 'import cv2\n'), ((3098, 3167), 'cv2.threshold', 'cv2.threshold', (['diff3', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(diff3, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (3111, 3167), False, 'import cv2\n'), ((4004, 4018), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4015, 4018), False, 'import cv2\n'), ((975, 989), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (986, 989), False, 'import cv2\n'), ((1818, 1832), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1829, 1832), False, 'import cv2\n'), ((3341, 3355), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3352, 3355), False, 'import cv2\n')] |
"""
Module for image classification default handler
"""
import io
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from .vision_handler import VisionHandler
class ImageClassifier(VisionHandler):
"""
ImageClassifier handler class. This handler takes an image
and returns the name of object in that image.
"""
def __init__(self):
super(ImageClassifier, self).__init__()
def preprocess(self, data):
"""
Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
"""
image = data[0].get("data")
if image is None:
image = data[0].get("body")
my_preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
image = Image.open(io.BytesIO(image))
image = my_preprocess(image)
return image
def inference(self, data):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# Convert 2D image to 1D vector
topk = 5
data = np.expand_dims(data, 0)
data = torch.from_numpy(data)
inputs = Variable(data).to(self.device)
outputs = self.model.forward(inputs)
ps = F.softmax(outputs, dim=1)
topk = getattr(ps, self.device.type)().topk(topk)
probs, classes = (e.cpu().data.numpy().squeeze().tolist() for e in topk)
results = []
for index, elem in enumerate(probs):
if self.mapping:
tmp = dict()
if isinstance(self.mapping, dict) and isinstance(list(self.mapping.values())[0], list):
tmp[self.mapping[str(classes[index])][1]] = elem
elif isinstance(self.mapping, dict) and isinstance(list(self.mapping.values())[0], str):
tmp[self.mapping[str(classes[index])]] = elem
else:
raise Exception('index_to_name mapping should be in "class":"label" json format')
results.append(tmp)
else:
results.append({str(classes[i]):str(probs[i])})
return [results]
def postprocess(self, data):
return data
_service = ImageClassifier()
def handle(data, context):
"""
Entry point for image classifier default handler
"""
try:
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
except Exception as e:
raise Exception("Please provide a custom handler in the model archive." + e)
| [
"torchvision.transforms.CenterCrop",
"io.BytesIO",
"torch.from_numpy",
"numpy.expand_dims",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torch.nn.functional.softmax"
] | [((1372, 1395), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (1386, 1395), True, 'import numpy as np\n'), ((1411, 1433), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (1427, 1433), False, 'import torch\n'), ((1542, 1567), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (1551, 1567), True, 'import torch.nn.functional as F\n'), ((1087, 1104), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (1097, 1104), False, 'import io\n'), ((829, 851), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (846, 851), False, 'from torchvision import transforms\n'), ((865, 891), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (886, 891), False, 'from torchvision import transforms\n'), ((905, 926), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (924, 926), False, 'from torchvision import transforms\n'), ((940, 1015), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (960, 1015), False, 'from torchvision import transforms\n'), ((1452, 1466), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (1460, 1466), False, 'from torch.autograd import Variable\n')] |
#!/usr/bin/env python3
from pylab import *
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
def load_var(dir, t, var, nx, ny, nz):
x, y, n, dataRaw = np.loadtxt(dir + '/' + var + '_' + '{:.3f}'.format(t) + '.dat', unpack=True)
data = np.reshape(dataRaw, (nx, ny, nz))
return squeeze(data)
def forceAspect(ax,aspect=1):
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
def make_plots(dir, ifig, var, varstr, titleTxt, cmap, vmin, vmax):
labelSize = 16
fig = plt.figure(ifig)
#ax = fig.add_subplot(111)
im = plt.imshow(var, vmin=vmin, vmax=vmax, extent=[-xmax, xmax, t0, tf], cmap=cmap, origin='lower')
plt.ylim(ymax = tf, ymin = t0)
plt.tick_params(top='off', right='off')
plt.title(titleTxt, fontsize=22, y=1.01)
plt.xlabel(r'$x\,[\mathrm{fm}]$', fontsize=labelSize)
plt.ylabel(r'$\tau\,[\mathrm{fm/c}]$', fontsize=labelSize)
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax)
#############################################
#forceAspect(ax,aspect=1)
############################################
plt.tight_layout()
savefig(dir + '/' + varstr + '.pdf', bbox_inches='tight')
if __name__ == '__main__':
root = '/media/bazow/Data/fluid_dynamic_output_for_thesis/'
type_vh = 'gpu-vh/'
type_vah = 'cpu-vah/'
tag = 'Glb_1d_shear_isotropicIC_conformalEOS_etaOverS0p2_reg1-10'
dataDir_vh = root + type_vh + tag
tag = 'Glb_1d_shear_withPimunu_WTz_isotropicIC_conformalEOS_etaOverS0p2_reg1-10'
dataDir_vah = root + type_vah + tag
plotDir_vh = 'tests/figs/qgp/Glb/vh'
plotDir_vah = 'tests/figs/qgp/Glb/vah'
nx = 201
ny = 1
nz = 1
dx = 0.1
dy = 0.1
dz = 0.1
xmax = (nx - 1) / 2 * dx
x = np.linspace(-xmax, xmax, num=nx)
t0 = 0.5
tf = 10.0
dt = 0.05
nt = int((tf-t0)/dt)+1
t = np.linspace(t0, tf, num=nt)
hbarc = 0.197327
# Viscous hydro
KnTaupi_VH= np.zeros((nt, nx))
e_VH= np.zeros((nt, nx))
p_VH= np.zeros((nt, nx))
t2pinn_VH= np.zeros((nt, nx))
reg_VH= np.zeros((nt, nx))
# Anisotropic hydro
KnTaupi_VAH= np.zeros((nt, nx))
e_VAH= np.zeros((nt, nx))
pl_VAH= np.zeros((nt, nx))
fTSol_VAH= np.zeros((nt, nx))
reg_VAH= np.zeros((nt, nx))
for i in range(0, nt):
ti = t[i]
# Viscous hydro
KnTaupi_VH[i, :] = load_var(dataDir_vh, ti, 'KnTaupi', nx, ny, nz)
e_VH[i, :] = load_var(dataDir_vh, ti, 'e', nx, ny, nz)
p_VH[i, :] = load_var(dataDir_vh, ti, 'p', nx, ny, nz)
t2pinn_VH[i, :] = ti*ti*load_var(dataDir_vh, ti, 'pinn', nx, ny, nz)
reg_VH[i, :] = load_var(dataDir_vh, ti, 'regulations', nx, ny, nz)
# Anisotropic hydro
KnTaupi_VAH[i, :] = load_var(dataDir_vah, ti, 'knTaupi', nx, ny, nz)
e_VAH[i, :] = load_var(dataDir_vah, ti, 'e', nx, ny, nz)
pl_VAH[i, :] = load_var(dataDir_vah, ti, 'pl', nx, ny, nz)
fTSol_VAH[i, :] = load_var(dataDir_vah, ti, 'fTSol_2', nx, ny, nz)
reg_VAH[i, :] = load_var(dataDir_vah, ti, 'regulations', nx, ny, nz)
vmin_e = np.min(np.min(np.min(e_VH)))
vmax_e = np.max(np.max(np.max(e_VH)))
cmap = 'jet'
cmap2 = 'jet_r'
plt.rcParams['image.interpolation'] = 'none'
#####################################################################################################
# Plots
#####################################################################################################
# Viscous hydro
make_plots(plotDir_vh, 0, KnTaupi_VH, 'KnTaupi', r'$\mathrm{Kn}_{\theta\pi}\equiv\theta\tau_{\pi}$', cmap, 0, 1)
make_plots(plotDir_vh, 1, e_VH, 'e', r'$\mathcal{E}$', cmap, vmin_e, vmax_e)
make_plots(plotDir_vh, 2, np.divide(p_VH+t2pinn_VH,p_VH-t2pinn_VH/2), 'pLpT', r'$\mathcal{P}_{L}/\mathcal{P}_{\perp}$', cmap, 0, 1)
make_plots(plotDir_vh, 3, reg_VH, 'reg', r'\mathrm{tanh}\rho/\rho$', cmap2, 0, 1)
# Anisotropic hydro
make_plots(plotDir_vah, 4, KnTaupi_VAH, 'KnTaupi', r'$\mathrm{Kn}_{\theta\pi}\equiv\theta\tau_{\pi}$', cmap, 0, 1)
make_plots(plotDir_vah, 5, e_VAH, 'e', r'$\mathcal{E}$', cmap, vmin_e, vmax_e)
make_plots(plotDir_vah, 6, np.divide(pl_VAH,(e_VAH-pl_VAH)/2), 'pLpT', r'$\mathcal{P}_{L}/\mathcal{P}_{\perp}$', cmap, 0, 1)
make_plots(plotDir_vah, 7, fTSol_VAH, 'fTSol', r'$f_\perp(u_\perp)\neq 0\,\forall u_\perp\geq 0$', cmap, 0, 1)
plt.show()
| [
"numpy.reshape",
"numpy.max",
"numpy.linspace",
"numpy.zeros",
"numpy.min",
"numpy.divide"
] | [((269, 302), 'numpy.reshape', 'np.reshape', (['dataRaw', '(nx, ny, nz)'], {}), '(dataRaw, (nx, ny, nz))\n', (279, 302), True, 'import numpy as np\n'), ((1902, 1934), 'numpy.linspace', 'np.linspace', (['(-xmax)', 'xmax'], {'num': 'nx'}), '(-xmax, xmax, num=nx)\n', (1913, 1934), True, 'import numpy as np\n'), ((2012, 2039), 'numpy.linspace', 'np.linspace', (['t0', 'tf'], {'num': 'nt'}), '(t0, tf, num=nt)\n', (2023, 2039), True, 'import numpy as np\n'), ((2099, 2117), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2107, 2117), True, 'import numpy as np\n'), ((2128, 2146), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2136, 2146), True, 'import numpy as np\n'), ((2157, 2175), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2165, 2175), True, 'import numpy as np\n'), ((2191, 2209), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2199, 2209), True, 'import numpy as np\n'), ((2222, 2240), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2230, 2240), True, 'import numpy as np\n'), ((2282, 2300), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2290, 2300), True, 'import numpy as np\n'), ((2312, 2330), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2320, 2330), True, 'import numpy as np\n'), ((2343, 2361), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2351, 2361), True, 'import numpy as np\n'), ((2377, 2395), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2385, 2395), True, 'import numpy as np\n'), ((2409, 2427), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (2417, 2427), True, 'import numpy as np\n'), ((3887, 3936), 'numpy.divide', 'np.divide', (['(p_VH + t2pinn_VH)', '(p_VH - t2pinn_VH / 2)'], {}), '(p_VH + t2pinn_VH, p_VH - t2pinn_VH / 2)\n', (3896, 3936), True, 'import numpy as np\n'), ((4336, 4375), 'numpy.divide', 'np.divide', (['pl_VAH', '((e_VAH - pl_VAH) / 2)'], {}), '(pl_VAH, (e_VAH - pl_VAH) / 2)\n', (4345, 4375), True, 'import numpy as np\n'), ((3269, 3281), 'numpy.min', 'np.min', (['e_VH'], {}), '(e_VH)\n', (3275, 3281), True, 'import numpy as np\n'), ((3311, 3323), 'numpy.max', 'np.max', (['e_VH'], {}), '(e_VH)\n', (3317, 3323), True, 'import numpy as np\n')] |
"""
Author: <NAME>
Created: 14/10/2021 8:50 AM
"""
import pandas as pd
import numpy as np
def make_cumuliative_annual(inseries, start_day, start_month):
"""
:param inseries: pd.Series with
:param start_day: int start day of the month (e.g. 1-31 for jan for calendar or 1jun for water year)
:param start_month: int start month (e.g. jan for calendar or jun for water year)
:return:
"""
assert isinstance(start_day, int)
assert isinstance(start_month, int)
assert isinstance(inseries, pd.Series)
out_data = pd.Series(index=inseries.index)
years = pd.unique(inseries.index.year)
start_date = pd.to_datetime(f'{years.min()-1}-{start_month:02d}-{start_day:02d}')
for i in range(len(years)+1):
end_date = start_date + pd.DateOffset(years=1, days=-1)
idx = (inseries.index.date >= start_date) & (inseries.index.date <= end_date)
out_data.loc[idx] = inseries.loc[idx].cumsum()
start_date = end_date + pd.DateOffset(days=1)
return out_data
if __name__ == '__main__':
np.random.seed(55)
test_data = pd.DataFrame(index=pd.date_range('2002-01-01', '2005-08-25'))
test_data.loc[:, 'test_flow'] = np.random.randint(0,5,len(test_data))
test_data.loc[:,'cum_sum'] = make_cumuliative_annual(test_data.loc[:,'test_flow'], 1,7)
pass | [
"pandas.Series",
"pandas.DateOffset",
"numpy.random.seed",
"pandas.date_range",
"pandas.unique"
] | [((551, 582), 'pandas.Series', 'pd.Series', ([], {'index': 'inseries.index'}), '(index=inseries.index)\n', (560, 582), True, 'import pandas as pd\n'), ((596, 626), 'pandas.unique', 'pd.unique', (['inseries.index.year'], {}), '(inseries.index.year)\n', (605, 626), True, 'import pandas as pd\n'), ((1059, 1077), 'numpy.random.seed', 'np.random.seed', (['(55)'], {}), '(55)\n', (1073, 1077), True, 'import numpy as np\n'), ((779, 810), 'pandas.DateOffset', 'pd.DateOffset', ([], {'years': '(1)', 'days': '(-1)'}), '(years=1, days=-1)\n', (792, 810), True, 'import pandas as pd\n'), ((985, 1006), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (998, 1006), True, 'import pandas as pd\n'), ((1113, 1154), 'pandas.date_range', 'pd.date_range', (['"""2002-01-01"""', '"""2005-08-25"""'], {}), "('2002-01-01', '2005-08-25')\n", (1126, 1154), True, 'import pandas as pd\n')] |
# Copyright (C) 2017-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import argparse
from bench import (
parse_args, measure_function_time, load_data, print_output, getFPType
)
import numpy as np
from daal4py import kmeans
parser = argparse.ArgumentParser(description='daal4py K-Means clustering '
'benchmark')
parser.add_argument('-i', '--filei', '--fileI', '--init',
type=str, help='Initial clusters')
parser.add_argument('-t', '--tol', default=0., type=float,
help='Absolute threshold')
parser.add_argument('--maxiter', type=int, default=100,
help='Maximum number of iterations')
parser.add_argument('--n-clusters', type=int, help='Number of clusters')
params = parse_args(parser, prefix='daal4py')
# Load generated data
X_train, X_test, _, _ = load_data(params, add_dtype=True)
# Load initial centroids from specified path
if params.filei is not None:
X_init = np.load(params.filei).astype(params.dtype)
params.n_clusters = X_init.shape[0]
# or choose random centroids from training data
else:
np.random.seed(params.seed)
centroids_idx = np.random.randint(0, X_train.shape[0],
size=params.n_clusters)
if hasattr(X_train, "iloc"):
X_init = X_train.iloc[centroids_idx].values
else:
X_init = X_train[centroids_idx]
# Define functions to time
def test_fit(X, X_init):
algorithm = kmeans(
fptype=getFPType(X),
nClusters=params.n_clusters,
maxIterations=params.maxiter,
assignFlag=True,
accuracyThreshold=params.tol
)
return algorithm.compute(X, X_init)
def test_predict(X, X_init):
algorithm = kmeans(
fptype=getFPType(X),
nClusters=params.n_clusters,
maxIterations=0,
assignFlag=True,
accuracyThreshold=0.0
)
return algorithm.compute(X, X_init)
columns = ('batch', 'arch', 'prefix', 'function', 'threads', 'dtype', 'size',
'n_clusters', 'time')
# Time fit
fit_time, res = measure_function_time(test_fit, X_train, X_init, params=params)
train_inertia = float(res.objectiveFunction[0, 0])
# Time predict
predict_time, res = measure_function_time(
test_predict, X_test, X_init, params=params)
test_inertia = float(res.objectiveFunction[0, 0])
print_output(library='daal4py', algorithm='kmeans',
stages=['training', 'prediction'], columns=columns,
params=params, functions=['KMeans.fit', 'KMeans.predict'],
times=[fit_time, predict_time], accuracy_type='inertia',
accuracies=[train_inertia, test_inertia], data=[X_train, X_test])
| [
"bench.load_data",
"argparse.ArgumentParser",
"bench.print_output",
"bench.parse_args",
"numpy.random.randint",
"numpy.random.seed",
"bench.measure_function_time",
"bench.getFPType",
"numpy.load"
] | [((247, 322), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""daal4py K-Means clustering benchmark"""'}), "(description='daal4py K-Means clustering benchmark')\n", (270, 322), False, 'import argparse\n'), ((785, 821), 'bench.parse_args', 'parse_args', (['parser'], {'prefix': '"""daal4py"""'}), "(parser, prefix='daal4py')\n", (795, 821), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n'), ((869, 902), 'bench.load_data', 'load_data', (['params'], {'add_dtype': '(True)'}), '(params, add_dtype=True)\n', (878, 902), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n'), ((2094, 2157), 'bench.measure_function_time', 'measure_function_time', (['test_fit', 'X_train', 'X_init'], {'params': 'params'}), '(test_fit, X_train, X_init, params=params)\n', (2115, 2157), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n'), ((2245, 2311), 'bench.measure_function_time', 'measure_function_time', (['test_predict', 'X_test', 'X_init'], {'params': 'params'}), '(test_predict, X_test, X_init, params=params)\n', (2266, 2311), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n'), ((2368, 2671), 'bench.print_output', 'print_output', ([], {'library': '"""daal4py"""', 'algorithm': '"""kmeans"""', 'stages': "['training', 'prediction']", 'columns': 'columns', 'params': 'params', 'functions': "['KMeans.fit', 'KMeans.predict']", 'times': '[fit_time, predict_time]', 'accuracy_type': '"""inertia"""', 'accuracies': '[train_inertia, test_inertia]', 'data': '[X_train, X_test]'}), "(library='daal4py', algorithm='kmeans', stages=['training',\n 'prediction'], columns=columns, params=params, functions=['KMeans.fit',\n 'KMeans.predict'], times=[fit_time, predict_time], accuracy_type=\n 'inertia', accuracies=[train_inertia, test_inertia], data=[X_train, X_test]\n )\n", (2380, 2671), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n'), ((1132, 1159), 'numpy.random.seed', 'np.random.seed', (['params.seed'], {}), '(params.seed)\n', (1146, 1159), True, 'import numpy as np\n'), ((1180, 1242), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]'], {'size': 'params.n_clusters'}), '(0, X_train.shape[0], size=params.n_clusters)\n', (1197, 1242), True, 'import numpy as np\n'), ((991, 1012), 'numpy.load', 'np.load', (['params.filei'], {}), '(params.filei)\n', (998, 1012), True, 'import numpy as np\n'), ((1509, 1521), 'bench.getFPType', 'getFPType', (['X'], {}), '(X)\n', (1518, 1521), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n'), ((1776, 1788), 'bench.getFPType', 'getFPType', (['X'], {}), '(X)\n', (1785, 1788), False, 'from bench import parse_args, measure_function_time, load_data, print_output, getFPType\n')] |
################
#Import dependencies
################
from matplotlib import figure, style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
import calendar
from scipy import stats, mean
#Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect, asc , desc, and_
#Flask modules
from flask import Flask, jsonify, render_template
#Local modules
import helpers
################
#Database Setup
################
#SQL Use Tool
engine = create_engine("sqlite:///data/hawaii.sqlite")
#Reflect database into new model
Base = automap_base()
#Reflect the tables and pass in the engine
Base.prepare(engine, reflect=True)
#Label tables from classes
Station = Base.classes.station
Measurement = Base.classes.measurement
#Create a session and bind it to the engine
session = Session(engine)
#App.py prcp: Find the most recent date
recent = (session
.query(Measurement.date)
.order_by(Measurement.date.desc()).first())
#App.py prcp: Make most recent date a variable
date_parts = [int(date_part) for date_part in recent[0].split("-")]
#App.py prcp: Find the date from 1 year/12 months/365 days ago
year = datetime.date(*date_parts) - datetime.timedelta(days=365)
#Stop query session
session.close()
################
#Flask Setup
################
#Create an app for Flask setup
app = Flask(__name__)
################
#Flask Routes
################
#List all available api routes
@app.route("/")
def welcome():
print("Server received request for 'Welcome' page...")
return render_template('index.html')
# f"Welcome to the Vacation to Honolulu, Hawaii Trip Planner!<p>"
# f"Filter for your trip:<br/>"
# f"/api/v1.0/precitation<br/>"
# f"/api/v1.0/stations<br/>"
# f"/api/v1.0/tobs<br>"
# f"/api/v1.0/temp/[start]<br>"
# f"/api/v1.0/temp/[start]/[end]")
#API Route for Precipitation Data
@app.route("/api/v1.0/precipitation")
def precipitation():
print("""Return one year date from most recent in data as json""")
#Create a session and bind it to the engine
session = Session(engine)
#App.py: Find all dates and prcp within last 12 months
results_query = (session
.query(Measurement.date,func.avg(Measurement.prcp))
.filter(Measurement.date >= year, Measurement.prcp != None)
.group_by(Measurement.date)
.all())
#Stop query session
session.close()
#Create a dictonary comprehension to store year prcp data in json
results_precitation = {date: prcp for date, prcp in results_query}
return jsonify(results_precitation)
#API Route for Station Data
@app.route("/api/v1.0/stations")
def stations():
print("""Return station data as json""")
#Create a session and bind it to the engine
session = Session(engine)
#App.py station (option 2): List of station ids in station
locations_list = session.query(Station.station).all()
#Stop query session
session.close()
#App.py station (step 2): Unravel results from id array to list
id_list = np.ravel(locations_list, order='K')
return jsonify(list(id_list))
#API Route for tobs Data
@app.route("/api/v1.0/tobs")
def temp_monthly():
print("""Return tobs data as json""")
#Create a session and bind it to the engine
session = Session(engine)
#App.py tobs: Results for most recent year
tobs_most = helpers.trip_tobs(year, datetime.date(*date_parts), session, Measurement, Station)
#App.py tobs: Convert string results to dictionary
results_tobs = tobs_most._asdict()
#Stop query session
session.close()
return jsonify(results_tobs)
#API Route for trip dates Data
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats(start=None, end=None):
print("""Return tobs data as json by trip dates""")
#Create a session and bind it to the engine
session = Session(engine)
if end is not None:
#App.py tobs: Results for trip dates
date_tobs = helpers.trip_total(start, end, session, Measurement, Station)
#Unravel results from id array to list
selected_list = [result._asdict() for result in date_tobs]
else:
#Results for trip dates
date_tobs = helpers.trip_total(start, datetime.date(*date_parts), session, Measurement, Station)
#Unravel results from id array to list
selected_list = [result._asdict() for result in date_tobs]
#Stop query session
session.close()
return jsonify(selected_list)
#Define main behavior
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True) | [
"flask.render_template",
"helpers.trip_total",
"flask.Flask",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"matplotlib.style.use",
"datetime.date",
"sqlalchemy.func.avg",
"numpy.ravel",
"datetime.timedelta",
"flask.jsonify"
] | [((93, 121), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (102, 121), False, 'from matplotlib import figure, style\n'), ((649, 694), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///data/hawaii.sqlite"""'], {}), "('sqlite:///data/hawaii.sqlite')\n", (662, 694), False, 'from sqlalchemy import create_engine, func, inspect, asc, desc, and_\n'), ((736, 750), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (748, 750), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((983, 998), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (990, 998), False, 'from sqlalchemy.orm import Session\n'), ((1520, 1535), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1525, 1535), False, 'from flask import Flask, jsonify, render_template\n'), ((1338, 1364), 'datetime.date', 'datetime.date', (['*date_parts'], {}), '(*date_parts)\n', (1351, 1364), False, 'import datetime\n'), ((1367, 1395), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1385, 1395), False, 'import datetime\n'), ((1719, 1748), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1734, 1748), False, 'from flask import Flask, jsonify, render_template\n'), ((2292, 2307), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (2299, 2307), False, 'from sqlalchemy.orm import Session\n'), ((2839, 2867), 'flask.jsonify', 'jsonify', (['results_precitation'], {}), '(results_precitation)\n', (2846, 2867), False, 'from flask import Flask, jsonify, render_template\n'), ((3059, 3074), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (3066, 3074), False, 'from sqlalchemy.orm import Session\n'), ((3337, 3372), 'numpy.ravel', 'np.ravel', (['locations_list'], {'order': '"""K"""'}), "(locations_list, order='K')\n", (3345, 3372), True, 'import numpy as np\n'), ((3602, 3617), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (3609, 3617), False, 'from sqlalchemy.orm import Session\n'), ((3929, 3950), 'flask.jsonify', 'jsonify', (['results_tobs'], {}), '(results_tobs)\n', (3936, 3950), False, 'from flask import Flask, jsonify, render_template\n'), ((4220, 4235), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (4227, 4235), False, 'from sqlalchemy.orm import Session\n'), ((4849, 4871), 'flask.jsonify', 'jsonify', (['selected_list'], {}), '(selected_list)\n', (4856, 4871), False, 'from flask import Flask, jsonify, render_template\n'), ((3710, 3736), 'datetime.date', 'datetime.date', (['*date_parts'], {}), '(*date_parts)\n', (3723, 3736), False, 'import datetime\n'), ((4331, 4392), 'helpers.trip_total', 'helpers.trip_total', (['start', 'end', 'session', 'Measurement', 'Station'], {}), '(start, end, session, Measurement, Station)\n', (4349, 4392), False, 'import helpers\n'), ((4610, 4636), 'datetime.date', 'datetime.date', (['*date_parts'], {}), '(*date_parts)\n', (4623, 4636), False, 'import datetime\n'), ((2446, 2472), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.prcp'], {}), '(Measurement.prcp)\n', (2454, 2472), False, 'from sqlalchemy import create_engine, func, inspect, asc, desc, and_\n')] |
import ast
import hashlib
import importlib
import numpy as np
import pandas as pd
class Utils:
"""
Utils functions
"""
@classmethod
def md5_file(cls, filename):
"""
Calculate the md5 of a file
thanks <NAME> https://www.pythoncentral.io/hashing-files-with-python/
Raise FileNotFoundError if the file does not exist
"""
blocksize = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
@classmethod
def import_from_dotted_path(cls, dotted_names):
""" import_from_dotted_path('foo.bar') -> from foo import bar; return bar
"""
module_name, class_name = dotted_names.rsplit('.', 1)
module = importlib.import_module(module_name)
handler_class = getattr(module, class_name)
return handler_class
@classmethod
def make_meshgrid(cls, x, y, h=.02):
"""
Create a mesh of points to plot in
(src, thanks : https://scikit-learn.org/stable/auto_examples/svm/plot_iris.html)
:param x: data to base x-axis meshgrid on (type numpy.ndarray)
:param y: data to base y-axis meshgrid on (type numpy.ndarray)
:param h: stepsize for meshgrid, optional
:return: xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
@classmethod
def transform_to_dict(cls, dictionary: dict, tuple_to_string=False):
"""
Transform a dictionary containing dictionary such as
{ "__type__": "__tuple__", "__value__": "(1, 2, 3)"}
to dictionary containing the real type (tuple)
:param dictionary: dictionary containing __tuple__ values
:param tuple_to_string: if True the tuple identified with "__type__": "__tuple__" are store as string in the
dictionary. If False, the tuple is converted to a tuple type
:return dictionary containing the real type
"""
def change_one_dict_element(value):
if '__type__' in value:
if value['__type__'] == '__tuple__':
if tuple_to_string:
result_element = value['__value__']
else:
result_element = ast.literal_eval(value['__value__'])
if not isinstance(result_element, tuple):
raise TypeError(v['__value__'] + " is not a tuple")
else:
result_element = Utils.transform_to_dict(value, tuple_to_string)
else:
result_element = Utils.transform_to_dict(value, tuple_to_string)
return result_element
result = dictionary.copy()
for k, v in result.items():
if isinstance(v, dict):
result[k] = change_one_dict_element(v)
if isinstance(v, list):
result[k] = []
for e in v:
if isinstance(e, dict):
result[k].append(change_one_dict_element(e))
else:
result[k].append(e)
return result
@classmethod
def transform_to_json(cls, dictionary):
"""
Transform a dictionary containing tuple to dictionary
such as { "__type__": "__tuple__", "__value__": "(1, 2, 3)"}
:param dictionary: dictionary containing tuple
:return dictionary containing __tuple__ values
"""
def change_one_dict_element(value):
result_element = {'__type__': '__tuple__', '__value__': value.__str__()}
return result_element
result = dictionary.copy()
for k, v in result.items():
if isinstance(v, tuple):
result[k] = change_one_dict_element(v)
if isinstance(v, dict):
result[k] = Utils.transform_to_json(v)
if isinstance(v, list):
result[k] = []
for e in v:
if isinstance(e, tuple):
result[k].append(change_one_dict_element(e))
else:
if isinstance(e, dict):
result[k].append(Utils.transform_to_json(e))
else:
result[k].append(e)
return result
@classmethod
def check_dict_python_ready(cls, dictionary):
"""
Check if a dictionary (and nested) does not contains a __type__ key,
which means is not ready to be handle by python
:param dictionary: the dictionary to check
:return: False if the dictionary contains one __type__ key, True otherwise
"""
result = True
for k, v in dictionary.items():
if not isinstance(v, list):
v = [v]
for e in v:
if isinstance(e, dict):
if '__type__' in e:
result = False
else:
result = result & Utils.check_dict_python_ready(e)
return result
@classmethod
def flatten_dict(cls, dictionary, separator='_', prefix=''):
"""SRC : https://www.geeksforgeeks.org/python-convert-nested-dictionary-into-flattened-dictionary/"""
result = {prefix + separator + k if prefix else k: v
for kk, vv in dictionary.items()
for k, v in Utils.flatten_dict(vv, separator, kk).items()
} if isinstance(dictionary, dict) else {prefix: dictionary}
return result
@classmethod
def func_create_dataframe(cls, storage):
""" return the function that create a DataFrame from an array"""
if storage == 'Pandas':
return pd.DataFrame
@classmethod
def is_dataframe_empty(cls, df):
result = True
if isinstance(df, pd.DataFrame):
result = df.empty
return result
@classmethod
def str2bool(cls, v: str):
return v.lower() in ("yes", "true", "t", "1")
| [
"importlib.import_module",
"ast.literal_eval",
"hashlib.md5",
"numpy.arange"
] | [((425, 438), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (436, 438), False, 'import hashlib\n'), ((911, 947), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (934, 947), False, 'import importlib\n'), ((1595, 1621), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (1604, 1621), True, 'import numpy as np\n'), ((1652, 1678), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (1661, 1678), True, 'import numpy as np\n'), ((2602, 2638), 'ast.literal_eval', 'ast.literal_eval', (["value['__value__']"], {}), "(value['__value__'])\n", (2618, 2638), False, 'import ast\n')] |
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# $Id$
"""Toolbox for images from the Cornell SLAC Pixel Array Detector
(CSpad).
XXX Better named cspad_common?
XXX Read out detector temperature (see Hart et al., 2012)?
"""
from __future__ import absolute_import, division, print_function
from six.moves import range
import math
import numpy
import os
import time
from libtbx import easy_pickle
from scitbx.array_family import flex
from xfel.cxi.cspad_ana.parse_calib import Section
import six
from six.moves import zip
__version__ = "$Revision$"
# The CAMP and CSpad counters are both 14 bits wide (Strüder et al
# 2010; Philipp et al., 2007), which means the physical limit is 2**14 - 1.
# However, in practice, when the pixels are in the low gain mode, after
# correcting by a gain value of around 6.87, the pixels tend to saturate
# around 90000. See xpp experiment xppe0314, run 184 as evidence.
cspad_saturated_value = 90000
# The dark average for the CSPAD detector is around 1100-1500. A pixel
# histogram of a minimum projection of an uncorrected (raw) light run shows
# a mostly flat tail up to ~800 ADU with a few bumps in the tail which
# represent true underloads. Assume a dark average of 1200 ADU. After dark
# subtraction, 800 - 1200 gives a minimum trusted value of -400. Reject
# pixels less than this.
cspad_min_trusted_value = -400
# As long as the mask value is outside of the trusted range, the pixel should
# be ignored by any downstream software.
cspad_mask_value = -100000
# The side length of a square quadrant from the old XtcExplorer code.
# XXX This should be obsoleted!
npix_quad = 850
# The pixel size in mm. The pixel size is fixed and square, with side
# length of 110 µm (Philipp et al., 2007). XXX Should really clarify
# this with Sol and Chris.
#
# XXX Andor: 13.5 µm square, CAMP: 75 µm, square (Strüder et al.,
# 2010)
pixel_size = 110e-3
# origin of section in quad coordinate system. x-position
# correspond to column number. XXX Note/reference the source!
# XXX This should be obsoleted!
xpos_sec2x1 = [[ 414, 626, 0, 0, 213, 1, 418, 419], # 2:5 were not measured
[ 421, 634, 0, 0, 213, 1, 424, 425],
[ 417, 630, 0, 1, 212, 0, 425, 426],
[ 416, 630, 0, 0, 213, 1, 420, 421]] # 2:5 were not measured
# y-position correspond to maxrows - row number
ypos_sec2x1 = [[ 0, 0, 214, 1, 425, 425, 615, 402], # 2:5 were not measured
[ 0, 0, 214, 1, 425, 425, 615, 402],
[ 0, 0, 215, 3, 431, 431, 616, 403],
[ 0, 0, 214, 1, 425, 425, 615, 403]] # 2:5 were not measured
def address_split(address, env=None):
"""The address_split() function splits an address into its four
components. Address strings are on the form
detector-detectorID|device-deviceID, where the detectors must be in
dir(xtc.DetInfo.Detector) and device must be in
(xtc.DetInfo.Device).
@param address Full data source address of the DAQ device
@param env Optional env to dereference an alias into an address
@return Four-tuple of detector name, detector ID, device, and
device ID
"""
import re
# pyana
m = re.match(
r"^(?P<det>\S+)\-(?P<det_id>\d+)\|(?P<dev>\S+)\-(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana
m = re.match(
r"^(?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana DetInfo string
m = re.match(
r"^DetInfo\((?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)\)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
if env is not None:
# Try to see if this is a detector alias, and if so, dereference it. Code from psana's Detector/PyDetector.py
amap = env.aliasMap()
alias_src = amap.src(address) # string --> DAQ-style psana.Src
# if it is an alias, look up the full name
if amap.alias(alias_src) != '': # alias found
address = str(alias_src)
return address_split(address)
return (None, None, None, None)
def cbcaa(config, sections):
"""The cbcaa() function uses on-disk calibration data to estimate
the beam centre and the active detector areas. The beam centre is
approximated as the average of the four ASIC corners closest to the
detector centre. That is the first corner of the section 1 in every
quadrant. Note that first corner index is vertical coordinate,
second index is the horizontal coordinate. XXX Construct the active
areas in "spotfinder format", i.e. opposing corners. XXX This is a
really bad function name! XXX The beam centre may be extracted from
the ebeam object?
@param config XXX
@param sections XXX Directory with calibration information
@return Tuple of 2D beam centre, and active areas in
"spotfinder format"
"""
aa = flex.int()
if (sections is None):
# The active areas of the detector, (UL_slow, UL_fast, LR_slow,
# LR_fast) A two-by-one is 185-by-392 pixels with a 4-pixel gap.
# An ASIC is 185-by-194 pixels. XXX Still need to sort out the
# inclusive/exclusive detail. Upper-left corner is inclusive,
# lower-right corner is exclusive. XXX Should subtract one from
# x, y on lower-right corner and verify with zoom. XXX All this
# should probably go by now
for q in range(4): # loop over quadrants
for i in range(8): # loop over two-by-one:s XXX variable name!
# Skip this two-by-one if it is missing.
if not (config.roiMask(q) & 0x1 << i):
continue
# XXX Note the different coordinate systems in use here!
xpos = xpos_sec2x1[q][i] # x-value of lower, left corner
ypos = 850 - ypos_sec2x1[q][i] # y-value of lower, left corner
if (i == 0 or i == 1 or i == 4 or i == 5):
UL1_x = xpos
UL2_x = xpos
UL1_y = ypos - 194 - 4 - 194
UL2_y = ypos - 194
LR1_x = UL1_x + 185
LR2_x = UL2_x + 185
LR1_y = UL1_y + 194
LR2_y = UL2_y + 194
elif (i == 2 or i == 3 or i == 6 or i == 7):
UL1_x = xpos
UL2_x = xpos + 194 + 4
UL1_y = ypos - 185
UL2_y = ypos - 185
LR1_x = UL1_x + 194
LR2_x = UL2_x + 194
LR1_y = UL1_y + 185
LR2_y = UL2_y + 185
# Quadrant rotations, counter-clockwise. Zeroth quadrant
# needs no special action.
if (q == 0):
pass
elif (q == 1):
UL1_x, UL1_y = 850 + 850 - UL1_y, UL1_x
LR1_x, LR1_y = 850 + 850 - LR1_y, LR1_x
UL2_x, UL2_y = 850 + 850 - UL2_y, UL2_x
LR2_x, LR2_y = 850 + 850 - LR2_y, LR2_x
UL1_x, LR1_x = LR1_x, UL1_x
UL2_x, LR2_x = LR2_x, UL2_x
elif (q == 2):
UL1_x, UL1_y = 850 + 850 - UL1_x, 850 + 850 - UL1_y
LR1_x, LR1_y = 850 + 850 - LR1_x, 850 + 850 - LR1_y
UL2_x, UL2_y = 850 + 850 - UL2_x, 850 + 850 - UL2_y
LR2_x, LR2_y = 850 + 850 - LR2_x, 850 + 850 - LR2_y
UL1_x, UL1_y, LR1_x, LR1_y = LR1_x, LR1_y, UL1_x, UL1_y
UL2_x, UL2_y, LR2_x, LR2_y = LR2_x, LR2_y, UL2_x, UL2_y
elif (q == 3):
UL1_x, UL1_y = UL1_y, 850 + 850 - UL1_x
LR1_x, LR1_y = LR1_y, 850 + 850 - LR1_x
UL2_x, UL2_y = UL2_y, 850 + 850 - UL2_x
LR2_x, LR2_y = LR2_y, 850 + 850 - LR2_x
UL1_y, LR1_y = LR1_y, UL1_y
UL2_y, LR2_y = LR2_y, UL2_y
# This is row-major matrix layout; FAST <=> x, SLOW <=> y.
aa.extend(flex.int([UL1_y, UL1_x, LR1_y, LR1_x]))
aa.extend(flex.int([UL2_y, UL2_x, LR2_y, LR2_x]))
# The beam centre is estimated as the centre of the image.
return ([npix_quad, npix_quad], aa)
# Old way of computing beam center, phased out 05/19/15
#bc = [0, 0]
# XXX Make up a quadrant mask for the emission detector. Needs to
# be checked!
if len(sections) <= 1:
q_mask = 1
else:
q_mask = config.quadMask()
for q in range(len(sections)):
if (not((1 << q) & q_mask)):
continue
# Old way of computing beam center, phased out 05/19/15
#corner = sections[q][1].corners(True)[0]
#bc = [bc[0] + corner[1] / len(sections),
# bc[1] + corner[0] / len(sections)]
# XXX Make up section mask for the emission detector. Needs to be
# checked!
try:
import _pdsdata
types = _pdsdata.cspad2x2.ConfigV1, _pdsdata.cspad2x2.ConfigV2
except ImportError:
import psana
types = psana.CsPad2x2.ConfigV1, psana.CsPad2x2.ConfigV2
if len(sections) == 1 and type(config) in types:
s_mask = config.roiMask()
else:
s_mask = config.roiMask(q)
for s in range(len(sections[q])):
if (not((1 << s) & s_mask)):
continue
c = sections[q][s].corners_asic()
aa.extend(flex.int(c[0]))
aa.extend(flex.int(c[1]))
# The beam center was defined above as the center of the innermost 4 sensors. Recently,
# that center has drifted too much from the true image center (Spring 2015). So, here we
# use the true image center instead.
return [882.5,882.5], aa
def CsPad2x2Image(data, config, sections):
"""The CsPad2x2Image() function assembles a two-dimensional image
from the Sc1 detector readout in @p data.
@param data Detector readout from XTC stream
@param config XXX
@param sections XXX Directory with calibration information
@return Assembled detector image
"""
assert (data.shape[2] == 2)
det = numpy.zeros((2 * 185, 2 * 194 + 3))
# XXX config.sections is now a function returning a list? Since the
# masking was disabled in December commenting out this bit does not
# cause any further breakage XXX Does this still work for runs 4 and
# 5?
# s = config.sections
# mask = map(s, range(2))
# For this detector, the quadrant index is always zero.
q_idx = 0
for s in range(2):
# XXX DAQ misconfiguration? This mask appears not to work
# reliably for the Sc1 detector.
# if (s not in mask[q_idx]):
# continue
asics = numpy.vsplit(numpy.rot90(data[:, :, s], -1), 2)
gap = numpy.zeros((3, 185), dtype = data.dtype)
s_data = numpy.vstack((asics[0], gap, asics[1]))
angle = sections[q_idx][s].angle
center = sections[q_idx][s].center
rplace(det, s_data, angle, center)
return (det)
def evt_get_quads(address, evt, env):
try:
# pyana
quads = evt.getCsPadQuads(address, env)
except AttributeError:
# psana
from psana import Source, CsPad
src = Source(address)
cspad = evt.get(CsPad.DataV2, src)
if cspad is None:
return None
quads = [cspad.quads(i) for i in range(cspad.quads_shape()[0])]
return quads
def CsPadDetector(address, evt, env, sections, right=True, quads=None):
"""The CsPadDetector() function assembles a two-dimensional image
from the Ds1 detector readout in @p data3d and the calibration
information in @p sections. XXX General question: do
variable/function names make sense?
@param address Full data source address of the DAQ device
@param evt Event data object, a configure object
@param env Environment object
@param sections XXX Directory with calibration information
@param right @c True to restrict rotations to right angles
@return Assembled detector image
"""
device = address_split(address)[2]
if device is None or device != 'Cspad':
return None
# Get a current configure object for the detector
config = getConfig(address, env)
if config is None:
return None
# For consistency, one could/should verify that len(quads) is equal
# to len(sections).
if quads is None:
quads = evt_get_quads(address, evt, env)
if quads is None or len(quads) != len(sections):
return None
# This is from <NAME>'s
# HDF5Explorer/src/ConfigCSpad.py, which uses a detector size of
# 1765-by-1765 pixels.
extra_space = (1765 - 2 * Section.q_size[0],
1765 - 2 * Section.q_size[1])
# Start out with a blank image of the detector. This assumes that
# the type of the first section in the first quadrant is identical
# to the type of all the other sections.
det = numpy.zeros((2 * Section.q_size[0] + extra_space[0],
2 * Section.q_size[1] + extra_space[1]),
dtype=quads[0].data()[0].dtype)
### need to swap the quadrants for data collected mid October, 2013
evttime = time.gmtime(evt_time(evt)[0])
swap = evttime.tm_year == 2013 and evttime.tm_mon == 10 and evttime.tm_mday >= 20 and evttime.tm_mday <= 25
for quad in quads:
q_data = quad.data()
q_idx = quad.quad()
if swap:
q_idx = [0,1,3,2][q_idx]
try:
# pyana
# example: if the third sensor (2x1) is disabled, q_mask = [0,1,3,4,5,6,7]
q_mask = config.sections(q_idx)
except AttributeError:
# psana
# as above, using config.roiMask, a bitstring where the ith bit is true if the ith sensor is active. x << y means bitwise shift
# x, y times, and & is the bitwise AND operator
q_mask = [i for i in range(len(sections[q_idx])) if 1 << i & config.roiMask(q_idx)]
# For consistency, assert that there is data for each unmasked
# section.
assert len(q_data) == len(q_mask)
for (s_data, s_idx) in zip(q_data, q_mask):
# Rotate the section from the XTC-stream by -90 degrees to
# conform to the "standing up" convention used by the
# calibration data, and insert a 3-pixel gap between the ASIC:s.
# This requires the horizontal dimension of the unrotated
# section to be even.
assert s_data.shape[1] % 2 == 0
asics = numpy.vsplit(numpy.rot90(s_data, -1), 2)
gap = numpy.zeros((3, s_data.shape[0]), dtype=s_data.dtype)
s_data = numpy.vstack((asics[0], gap, asics[1]))
# Place the section in the detector image, either by forcing
# rotation to right angles or by interpolating.
angle = sections[q_idx][s_idx].angle
center = sections[q_idx][s_idx].center
if right:
rplace(det, s_data, angle, center)
else:
iplace(det, s_data, angle, center)
return det
def CsPadElement(data3d, qn, config):
"""Construct one image for each quadrant, each with 8 sections from
a data3d = 3 x 2*194 x 185 data array. This function was originally
written by <NAME> for pyana's XtcExplorer module. XXX
Documentation!
"""
# If any sections are missing, insert zeros.
mask = [config.sections(i) for i in range(4)]
if (len(data3d) < 8):
zsec = numpy.zeros((185, 388), dtype = data3d.dtype)
for i in range(8) :
if (i not in mask[qn]):
data3d = numpy.insert(data3d, i, zsec, axis = 0)
pairs = []
for i in range(8) :
# Insert gap between ASIC:s in the 2x1.
asics = numpy.hsplit(data3d[i], 2)
gap = numpy.zeros((185, 4), dtype = data3d.dtype)
pair = numpy.hstack((asics[0], gap, asics[1]))
# Sections 2,3 and 6,7 are as is. The others need some rotation,
# implemented as a matrix transposition here.
if (i == 0 or i == 1):
pair = pair[:, ::-1].T
if (i == 4 or i == 5):
pair = pair[::-1, :].T
pairs.append(pair)
# Make the array for this quadrant, and insert the 2x1 sections.
quadrant = numpy.zeros((npix_quad, npix_quad), dtype = data3d.dtype)
for sec in range(8):
nrows, ncols = pairs[sec].shape
# x,y in quadrant coordinate system
xpos = xpos_sec2x1[qn][sec]
ypos = ypos_sec2x1[qn][sec]
colp = xpos
rowp = npix_quad - ypos
quadrant[(rowp - nrows):rowp, colp:(colp + ncols)] = \
pairs[sec][0:nrows, 0:ncols]
# Finally, rotate the quadrant as needed.
if (qn > 0):
quadrant = numpy.rot90(quadrant, 4 - qn)
return quadrant
def dpack(active_areas=None,
address=None,
beam_center_x=None,
beam_center_y=None,
ccd_image_saturation=None,
data=None,
distance=None,
pixel_size=pixel_size,
saturated_value=None,
timestamp=None,
wavelength=None,
xtal_target=None,
min_trusted_value=None):
"""XXX Check completeness. Should fill in sensible defaults."""
# Must have data.
if data is None:
return None
# Create a time stamp of the current time if none was supplied.
if timestamp is None:
timestamp = evt_timestamp()
# For unknown historical reasons, the dictionary must contain both
# CCD_IMAGE_SATURATION and SATURATED_VALUE items.
if ccd_image_saturation is None:
if saturated_value is None:
ccd_image_saturation = cspad_saturated_value
else:
ccd_image_saturation = saturated_value
if saturated_value is None:
saturated_value = ccd_image_saturation
# Use a minimum value if provided for the pixel range
if min_trusted_value is None:
min_trusted_value = cspad_min_trusted_value
# By default, the beam center is the center of the image. The slow
# (vertical) and fast (horizontal) axes correspond to x and y,
# respectively.
if beam_center_x is None:
beam_center_x = pixel_size * data.focus()[1] / 2
if beam_center_y is None:
beam_center_y = pixel_size * data.focus()[0] / 2
# By default, the entire detector image is an active area. There is
# no sensible default for distance nor wavelength. XXX But setting
# wavelength to zero may be disastrous?
if active_areas is None:
# XXX Verify order with non-square detector
active_areas = flex.int((0, 0, data.focus()[0], data.focus()[1]))
if distance is None:
distance = 0
if wavelength is None:
wavelength = 0
# The size must match the image dimensions. The length along the
# slow (vertical) axis is SIZE1, the length along the fast
# (horizontal) axis is SIZE2.
return {'ACTIVE_AREAS': active_areas,
'BEAM_CENTER_X': beam_center_x,
'BEAM_CENTER_Y': beam_center_y,
'CCD_IMAGE_SATURATION': ccd_image_saturation,
'DATA': data,
'DETECTOR_ADDRESS': address,
'DISTANCE': distance,
'PIXEL_SIZE': pixel_size,
'SATURATED_VALUE': saturated_value,
'MIN_TRUSTED_VALUE': min_trusted_value,
'SIZE1': data.focus()[0],
'SIZE2': data.focus()[1],
'TIMESTAMP': timestamp,
'SEQUENCE_NUMBER': 0, # XXX Deprecated
'WAVELENGTH': wavelength,
'xtal_target': xtal_target}
def hdf5pack(hdf5_file,
active_areas=None,
address=None,
attenuation=None,
beam_center_x=None,
beam_center_y=None,
ccd_image_saturation=None,
data=None,
distance=None,
pixel_size=None,
pulse_length=None,
saturated_value=None,
timestamp=None,
wavelength=None,
xtal_target=None):
"""Similar but far from identical to the HDF5 output from CASS. XXX
Poor diagnostics--we don't know if it failed or not.
@note Does not include the deprecated SEQUENCE_NUMBER attribute.
While some redundant items are written in order to keep the
HDF5 synchronised to the pickle format, neither SIZE1 nor
SIZE2 are included.
"""
# Need this because we cannot write None values to the HDF5 file.
if address is None:
address = repr(None)
if attenuation is None:
attenuation = 0
if xtal_target is None:
xtal_target = repr(None)
if pixel_size is None:
pixel_size = globals()['pixel_size'] # XXX CSpad-specific!
if pulse_length is None:
pulse_length = 0
d = dpack(address=address,
active_areas=active_areas,
beam_center_x=beam_center_x,
beam_center_y=beam_center_y,
ccd_image_saturation=ccd_image_saturation,
data=data,
distance=distance,
pixel_size=pixel_size,
saturated_value=saturated_value,
timestamp=timestamp,
wavelength=wavelength,
xtal_target=xtal_target)
if d is None:
return
grp_event = hdf5_file.create_group(d['TIMESTAMP'])
grp_detector = grp_event.create_group(address)
for (key, value) in six.iteritems(d):
if key == 'ACTIVE_AREAS':
grp_detector.create_dataset(key, data=value.as_numpy_array())
elif key == 'DATA':
# Compress the image data with gzip at the default level (4).
# CASS seems to use maximum compression level (9), which gives a
# moderate decrease in file size at the price of much longer
# running time.
grp_detector.create_dataset(
key, compression='gzip', data=value.as_numpy_array())
else:
grp_event.create_dataset(key, data=[value])
grp_event.create_dataset('ATTENUATION', data=[attenuation])
grp_event.create_dataset('PULSE_LENGTH', data=[pulse_length])
def write_tiff(d, dirname=None, basename=None):
"""The write an image tiff. Basic implementation no frills, no metadata
"""
if basename is None:
basename = ""
if dirname is None:
dirname = "."
if not os.path.isdir(dirname):
os.makedirs(dirname)
# The output path should not contain any funny characters which may
# not work in all environments. This constructs a sequence number à
# la evt_seqno() from the dictionary's timestamp.
t = d['TIMESTAMP']
s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
path = os.path.join(dirname, basename + s + '.tiff')
#assure that the 2-byte data are within the unsigned limits
selecthi = d["DATA"]>65535
d["DATA"].set_selected(selecthi,0)
selectlo = d["DATA"]<0
d["DATA"].set_selected(selectlo,0)
idata = d["DATA"].as_numpy_array()
idata = idata.astype("uint16")
import cv2 # psdm install should have this extension
cv2.imwrite(path,idata)
return path
def dwritef(d, dirname=None, basename=None):
"""The dwritef() function pickles the dictionary pointed to by @p d
to the file whose directory and filename portions are pointed to by
@p dirname and @p basename, respectively. The directory at @p
dirname, as well as any intermediate directories, are recursively
created if they do not already exist. The name of the written file
is the concatenation of the @p basename parameter and a sequence
number derived from the timestamp in the dictionary, @p d.
@param d Dictionary, as created by e.g. dpack()
@param dirname Directory portion of output file
@param basename Filename prefix of output file
@return Path of output file
"""
if basename is None:
basename = ""
if dirname is None:
dirname = "."
if not os.path.isdir(dirname):
os.makedirs(dirname)
# The output path should not contain any funny characters which may
# not work in all environments. This constructs a sequence number à
# la evt_seqno() from the dictionary's timestamp.
t = d['TIMESTAMP']
s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
# XXX Several non-pyana tools rely on the .pickle extension. Fix
# those before migrating to .pkl.
path = os.path.join(dirname, basename + s + '.pickle')
easy_pickle.dump(path, d)
return path
def dwritef2(obj, path):
"""The dwritef2() function writes the object @p obj to the Python
pickle file whose path is pointed to by @p path. Non-existent
directories of @p path are created as necessary.
@param obj Object to write, as created by e.g. dpack()
@param path Path of output file
@return Path of output file
"""
dirname = os.path.dirname(path)
if dirname != "" and not os.path.isdir(dirname):
os.makedirs(dirname)
easy_pickle.dump(path, obj)
return path
def pathsubst(format_string, evt, env, **kwargs):
"""The pathsubst() function provides variable substitution and value
formatting as described in PEP 3101. The function returns a copy of
the input string, @p format_string, with field names replaced by
their appropriate values as determined by either @p evt, @p env, or
the user-supplied keyworded arguments, @p kwargs.
chunk: Chunk number or -1 if unknown.
epoch: Time of the event, in number of seconds since midnight,
1 January 1970 UTC (Unix time), to millisecond
precision.
experiment: Experiment name, or empty string if unknown.
expNum: Experiment number or -1 if unknown.
instrument: Instrument name, or empty string if unknown.
iso8601: The time of the event as an extended human-readable ISO
8601 timestamp, to millisecond precision, or the empty
string if unknown. Not suitable for file names, because
it contains characters that do not play well with
certain file systems (e.g. NTFS).
jobName: Job name.
jobNameSub: Combination of job name and subprocess index as a string
which is unique for all subprocesses in a job.
run: Run number or -1 if unknown.
seqno: Sequence number or -1 if unknown.
stream: Stream number or -1 if unknown.
subprocess: Subprocess number. This is a non-negative integer in
the range [0, nproc) when multiprocessing, or -1 for a
single-process job.
user: The "login name" of the user.
In addition to the standard conversion flags, the pathsubst()
function implements the <code>!u</code> and <code>!l</code> flags
for conversion to upper- and lower-case strings, respectively.
Literal braces can be escaped by doubling, i.e. <code>{</code> is
written <code>{{</code>, and <code>}</code> as <code>}}</code>.
@note Chunk number, expNum, run number, and stream number are
determined from the input XTC file name. If a file does not
adhere to the standard format, it may not be possible to
determine these quantities.
@note String substitution requires PSDM pyana version 0.10.3 or
greater.
@param format_string String containing replacement fields
@param evt Event data object, a configure object
@param env Environment object
@param kwargs User-supplied replacements, on the form
<code>field_name=value</code>
@return Copy of @p format_string, with replacement
fields substituted by their appropriate values
"""
from getpass import getuser
from string import Formatter
class CaseFormatter(Formatter):
def convert_field(self, value, conversion):
# Extends the stock Formatter class with lower() and upper()
# conversion types.
if conversion == 'l':
return str(value).lower()
elif conversion == 'u':
return str(value).upper()
return super(CaseFormatter, self).convert_field(value, conversion)
def get_value(self, key, args, kwargs_local):
# The get_value() function sequentially applies user-supplied
# and standard substitutions, and implements suitable defaults
# in case a field name evaluates to None. XXX Emit a warning
# when this happens?
if key in kwargs:
return kwargs[key]
value = super(CaseFormatter, self).get_value(key, args, kwargs_local)
if value is None:
if key == 'chunk':
return -1
elif key == 'expNum':
return -1
elif key == 'iso8601':
return ''
elif key == 'run':
return -1
elif key == 'seqno':
return -1
elif key == 'stream':
return -1
return value
t = evt_time(evt)
if t is not None:
epoch = t[0] + t[1] / 1000
else:
epoch = None
fmt = CaseFormatter()
try:
# psana
expNum = env.expNum()
except AttributeError:
# pyana
expNum = evt.expNum()
try:
# pyana
chunk = evt.chunk()
except AttributeError:
# not supported in psana
chunk = None
try:
# pyana
stream = evt.stream()
except AttributeError:
# not supported in psana
stream = None
# If chunk or stream numbers cannot be determined, which may happen
# if the XTC file has a non-standard name, evt.chunk() and
# evt.stream() will return None.
return fmt.format(format_string,
chunk=chunk,
epoch=epoch,
experiment=env.experiment(),
expNum=expNum,
instrument=env.instrument(),
iso8601=evt_timestamp(t),
jobName=env.jobName(),
jobNameSub=env.jobNameSub(),
run=evt.run(),
seqno=int(evt_seqno(evt)),
stream=stream,
subprocess=env.subprocess(),
user=getuser())
def get_ebeam(evt):
try:
# pyana
ebeam = evt.getEBeam()
except AttributeError as e:
from psana import Source, Bld
src = Source('BldInfo(EBeam)')
ebeam = evt.get(Bld.BldDataEBeamV6, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV5, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV4, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV3, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV2, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV1, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV0, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeam, src) # recent version of psana will return a V7 event or higher if this type is asked for
return ebeam
def env_laser_status(env, laser_id):
"""The return value is a bool that indicates whether the laser in
question was on for that particular shot. Bear in mind that sample
hit by the laser will only encounter the X-rays some time after,
depending on the flow rate.
"""
if env is not None:
pv_in = env.epicsStore().value('CXI:LAS:SHT:%02i:IN' % laser_id)
pv_out = env.epicsStore().value('CXI:LAS:SHT:%02i:OUT' % laser_id)
if pv_in is None or pv_out is None:
return
if hasattr(pv_in, "values"):
if len(pv_in.values) != 1:
return
laser_off = pv_in.values[0]
else:
laser_off = pv_in
if hasattr(pv_out, "values"):
if len(pv_out.values) != 1:
return
laser_on = pv_out.values[0]
else:
laser_on = pv_out
if laser_on and laser_off:
# According to LCLS staff, this means the laser is not plugged in
return False
return bool(laser_on)
def env_injector_xyz(env):
"""Returns the coordinates of the sample injector. XXX units unknown?"""
if env is not None:
return tuple([
env.epicsStore().value("CXI:USR:MZM:0%i:ENCPOSITIONGET" %(i+1))
for i in range(3)])
def env_detz(address, env):
"""The env_detz() function returns the position of the detector with
the given address string on the z-axis in mm. The zero-point is as
far away as possible from the sample, and values decrease as the
detector is moved towards the sample.
@param address Full data source address of the DAQ device
@param env Environment object
@return Detector z-position, in mm
"""
if env is not None:
detector = address_split(address, env)[0]
if detector is None:
return None
elif detector == 'CxiDs1':
pv = env.epicsStore().value('CXI:DS1:MMS:06.RBV')
if pv is None:
# Even though potentially unsafe, fall back on the commanded
# value if the corresponding read-back value cannot be read.
# According to <NAME>, this particular motor has not
# caused any problem in the past.
pv = env.epicsStore().value('CXI:DS1:MMS:06')
if pv is None:
# Try the other detector. These are sometimes inconsistent
pv = env.epicsStore().value('CXI:DS2:MMS:06.RBV')
elif detector == 'CxiDsd' or detector == 'CxiDs2':
# XXX Note inconsistency in naming: Dsd vs Ds2!
pv = env.epicsStore().value('CXI:DS2:MMS:06.RBV')
if pv is None:
# Try the other detector. These are sometimes inconsistent
pv = env.epicsStore().value('CXI:DS1:MMS:06.RBV')
elif detector == 'XppGon':
# There is no distance recorded for the XPP's CSPAD on the robot
# arm. Always return zero to allow the distance to be set using
# the offset.
return 0
elif detector == 'XppEndstation' or \
detector == 'MfxEndstation':
# There is no distance recorded for the XPP's or MFX's Rayonix
# on the robot arm. Always return zero to allow the distance to
# be set using the offset.
return 0
else:
return None
if pv is None:
return None
if hasattr(pv, "values"):
if len(pv.values) == 1:
return pv.values[0]
else:
return None
return pv
return None
def env_distance(address, env, offset):
"""The env_distance() function returns the distance between the
sample and the detector with the given address string in mm. The
distance between the sample and the the detector's zero-point can
vary by an inch or more between different LCLS runs. According to
<NAME> the offset should be stable to within ±0.5 mm
during a normal experiment.
@param address Full data source address of the DAQ device
@param env Environment object
@param offset Detector-sample offset in mm, corresponding to
longest detector-sample distance
@return Detector-sample distance, in mm
"""
detz = env_detz(address, env)
if detz is not None:
return detz + offset
return None
def env_sifoil(env):
"""The env_sifoil() function returns the total thickness of Si-foil,
in um, that attenuates the beam. According to an e-mail from <NAME>, the centres of the attenuators are in the beam at around 0
mm, and leave the beam at something like -7 mm. The "out" position
is at approximately -15 mm.
@param env Environment object
@return Total thickness of attenuating Si-foil
"""
if (env is None):
return (None)
# the pv name (? XXX) and the length of Si-foil it corresponds to
# XXX static?
dia = { "XRT:DIA:MMS:02.RBV": 20,
"XRT:DIA:MMS:03.RBV": 40,
"XRT:DIA:MMS:04.RBV": 80,
"XRT:DIA:MMS:05.RBV": 160,
"XRT:DIA:MMS:06.RBV": 320,
"XRT:DIA:MMS:07.RBV": 640,
"XRT:DIA:MMS:08.RBV": 1280,
"XRT:DIA:MMS:09.RBV": 2560,
"XRT:DIA:MMS:10.RBV": 5120,
"XRT:DIA:MMS:11.RBV": 10240 }
si_tot = 0
for pvname, si_len in six.iteritems(dia):
pv = env.epicsStore().value(pvname)
# XXX Why is this an EpicsPvTime object? The absorption
# coefficient of Si is E-18 * n_{0} * lambda^2, (for lambda >= 5
# um, Schroder, <NAME>., <NAME>, and <NAME>, IEEE
# Trans. Electron. Dev. ED-25, 2(1978) 254-261). See also
# http://henke.lbl.gov/optical_constants/filter2.html
#print "For ", pvname, " got ", pv, " and ", pv.values[0]
if pv is not None: # and pv.units == "mm"
if hasattr(pv, "values"):
# pyana
if len(pv.values) == 1 and abs(pv.values[0]) < 7:
si_tot += si_len
else:
# psana
if abs(pv) < 7:
si_tot += si_len
return (si_tot)
def env_wavelength_sxr(evt, env):
"""The env_wavelength_sxr() function returns the wavelength in
Ångström of the environment pointed to by @p env at the time of the
event @p evt. The function returns a positive value or @c None if
no wavelength is available for the event. See Heimann et al. (2011)
Rev. Sci. Instrum. 82, 093104.
@note The wavelength in eV is 12398.4187 divided by the value
returned from env_wavelength_sxr().
@param evt Event data object, a configure object
@param env Environment object
@return Wavelength, in Ångström
"""
from calendar import timegm
from time import strptime
if evt is None or env is None:
return None
t = evt.getTime()
if t is None:
return None
es = env.epicsStore()
if es is None:
return None
# Note that the monochromator coefficients could change from day to
# day. Unless specific values for the requested time are available,
# attempt to retrieve them from EPICS.
#
# The compiler could recognize that strptime() and timegm() are pure
# and reduce the test expression to an integer comparison.
f = '%Y-%m-%d, %H:%M %Z'
s = t.seconds()
if s is None:
return None
elif s < timegm(strptime('2012-11-12, 17:00 UTC', f)):
return None
elif s < timegm(strptime('2012-11-17, 17:00 UTC', f)):
abc = [+3.65920, -0.76851, +0.02105]
elif s < timegm(strptime('2012-11-20, 17:00 UTC', f)):
abc = [+4.18190, -0.77650, +0.01020]
if 'abc' not in locals():
pv = []
for name in ['SXR:IOC:POLY:POLY:Lambda:O1:G3:A',
'SXR:IOC:POLY:POLY:Lambda:O1:G3:B',
'SXR:IOC:POLY:POLY:Lambda:O1:G3:C']:
pv.append(es.value(name))
if pv[-1] is None or len(pv[-1].values) != 1:
return None
pv[-1] = pv[-1].values[0]
if pv[-1] is None:
return None
abc = [pv[i] for i in range(3)]
# Get the grating motor position from EPICS.
pv = es.value('SXR:MON:MMS:06.RBV')
if pv is not None and len(pv.values) == 1:
x = pv.values[0]
e = 10 * (abc[0] + abc[1] * x + abc[2] * x**2)
if e > 0:
return e
return None
def evt_pulse_energy(evt):
"""The evt_pulse_energy() function returns the energy, or the
intensity, of the pulse in arbitrary units. The returned value
should be proportional to the number of photons in the pulse, and
may be negative due to noise.
@note An absolute, but less accurate, estimate of the number of
photons in the pulse may be obtained from the gas monitor
detector's fMilliJoulesPerPulse value.
@param evt Event data object, a configure object
@return Pulse intensity, in arbitrary units
"""
from pypdsdata.xtc import TypeId
if evt is None:
return None
gmd = evt.get(key=TypeId.Type.Id_GMD)
if hasattr(gmd, 'fRelativeEnergyPerPulse') and evt.expNum() == 208:
# Note that for L632 (experiment number 208)
# fRelativeEnergyPerPulse actually gives the negated value
# sought. Details are given in Moeller, S. (2012) "GMD Look
# up Sheet for variable names in the DAQ (BLD) versus the C++
# code".
return -gmd.fRelativeEnergyPerPulse
elif hasattr(gmd, 'fCorrectedSumPerPulse'):
# This relatively pressure-independent quantity in arbitrary
# units is preferable. It is also known as
# SXR:GMD:BLD:CumSumAllPeaks.
return gmd.fCorrectedSumPerPulse
return None
def evt_pulse_length(evt):
"""The evt_pulse_length() function returns the pulse length in fs.
It is calculated as the ratio of the charge (in nC) and the peak
current (in A).
@param evt Event data object, a configure object
@return Pulse length, in fs
"""
if (evt is not None):
ebeam = get_ebeam(evt)
if ebeam is None:
return
try:
if ebeam.fEbeamPkCurrBC2 > 0:
return 1e6 * ebeam.fEbeamCharge / ebeam.fEbeamPkCurrBC2
except AttributeError:
if ebeam.ebeamPkCurrBC2() > 0:
return 1e6 * ebeam.ebeamCharge() / ebeam.ebeamPkCurrBC2()
return None
def evt_repetition_rate(evt, address='*'):
"""The evt_repetition_rate() function returns the repetition rate of
the instrument in Hz. See
https://confluence.slac.stanford.edu/display/PCDS/EVR+Event+Codes
@param evt Event data object, a configure object
@param address Data source address of the DAQ device
@return Integer repetition rate, in Hz
"""
evr = evt.getEvrData(address)
if evr is not None:
event_code_map = [120, 60, 30, 10, 5, 1]
for i in range(evr.numFifoEvents() - 1, -1, -1):
# Search for the last repetition rate event code.
j = evr.fifoEvent(i).EventCode
if j >= 40 and j <= 45:
# These are the NO BEAM event codes.
return event_code_map[j - 40]
if j >= 140 and j <= 145:
# These are the undocumented BEAM event codes.
return event_code_map[j - 140]
return None
def evt_beam_charge(evt):
"""The evt_beam_charge() function returns the charge of the pulse in
nC.
@param evt Event data object, a configure object
@return Pulse charge, in nC
"""
if evt is not None:
ebeam = get_ebeam(evt)
if ebeam is None:
return
try:
ebeam = evt.getEBeam()
return ebeam.fEbeamCharge
except AttributeError:
return ebeam.ebeamCharge()
return None
def evt_seqno(evt=None):
"""The evt_seqno() function returns string representation of a
sequence number. If @p evt is not @c None the return value reflects
the time at which @p evt occurred, otherwise the current time is
used. If @p evt does not contain a time, evt_seqno() returns @c
None. XXX Should probably return an integer type instead?
@param evt Event data object, a configure object
@return String representation of sequence number
"""
t = evt_time(evt=evt)
if t is None:
return None
return time.strftime("%Y%m%d%H%M%S", time.gmtime(t[0])) + ("%03d" % t[1])
def evt_time(evt=None):
"""The evt_time() function returns the time of the event @p evt since
midnight, 1 January 1970 UTC (Unix time) to millisecond precision.
If @p evt does not contain a time, evt_time() returns @c None. If
@p evt is @c None the return value reflects current time is used.
@note Millisecond precision is sufficient, because at 120 Hz, shots
are taken at 8.3 ms intervals.
@param evt Event data object, a configure object
@return Unix time as a tuple of seconds and milliseconds
"""
if evt is None:
t = time.time()
s = int(math.floor(t))
return (s, int(round((t - s) * 1000)))
if hasattr(evt, "getTime"):
t = evt.getTime()
if t is None:
return None
return (t.seconds(), t.nanoseconds() // 1000000)
else:
from psana import EventId
id = evt.get(EventId)
return (id.time()[0], id.time()[1] // 1000000)
def evt_timestamp(t=None):
"""The evt_timestamp() function returns a string representation of
an extended human-readable ISO 8601 timestamp. If @p t is @c None
the current time is used. The function returns @c None on failure.
@param t Tuple of the time in seconds and milliseconds
@return Human-readable ISO 8601 timestamp in string representation
"""
if t is None:
t = evt_time(evt=None)
if t is None:
return None
return time.strftime("%Y-%m-%dT%H:%MZ%S", time.gmtime(t[0])) + \
(".%03d" % t[1])
def evt_wavelength(evt, delta_k=0):
"""The evt_wavelength() function returns the wavelength in Ångström
of the event pointed to by @p evt. From Margaritondo & <NAME> (2011): the dimensionless relativistic γ-factor is derived
from beam energy in MeV and the electron rest mass, K is a
dimensionless "undulator parameter", and L is the macroscopic
undulator period in Ångström. See also
https://people.eecs.berkeley.edu/~attwood/srms/2007/Lec10.pdf
@param evt Event data object, a configure object
@param delta_k Optional K-value correction
@return Wavelength, in Ångström
"""
if evt is not None:
ebeam = get_ebeam(evt)
if hasattr(ebeam, 'fEbeamPhotonEnergy') and ebeam.fEbeamPhotonEnergy > 0:
# pyana
return 12398.4187 / ebeam.fEbeamPhotonEnergy
if hasattr(ebeam, 'ebeamPhotonEnergy') and ebeam.ebeamPhotonEnergy() > 0:
# psana
return 12398.4187 / ebeam.ebeamPhotonEnergy()
if hasattr(ebeam, 'fEbeamL3Energy') and ebeam.fEbeamL3Energy > 0:
# pyana
gamma = ebeam.fEbeamL3Energy / 0.510998910
elif hasattr(ebeam, 'ebeamL3Energy') and ebeam.ebeamL3Energy() > 0:
# psana
gamma = ebeam.ebeamL3Energy() / 0.510998910
else:
return None
K = 3.5 + delta_k
L = 3.0e8
return L / (2 * gamma**2) * (1 + K**2 / 2)
return None
def old_address_to_new_address(address):
""" Change between old and new style detector addresses.
I.E. CxiDs1-0|Cspad-0 becomes CxiDs1.0:Cspad.0
@param address detector address to convert
"""
return address.replace('-','.').replace('|',':')
def getConfig(address, env):
""" Given a detector address, find the config object in an env object
that goes with it.
@param address detector address
@param env environment object to search"""
if hasattr(env, 'configStore'):
good_key = None
address = old_address_to_new_address(address)
for key in env.configStore().keys():
if address in str(key.src()) and key.type() is not None:
good_key = key
break
if good_key is None:
return None
return env.configStore().get(good_key.type(),good_key.src())
else:
# Try the pyana method for older data
from pypdsdata.xtc import TypeId
return env.getConfig(TypeId.Type.Id_CspadConfig, address)
def getOptBool(s):
if s is None or s == "None": return False
elif isinstance(s, bool):
return s
s = s.strip().lower()
return s == "true"
def getOptEvalOrString(s) :
"""Allow python code macros in the pyana configuration file, e.g.
dark_path = "/location_of_darks/r%%04d/Ds1-avg.pickle"%%(max([{True:dark,False:0}[3 > dark] for dark in [1,2,6,9,12,14,17,19]]))
"""
possible_string = getOptString(s)
try:
eval_string = eval(possible_string,{},{})
return eval_string
except (SyntaxError, TypeError):
return possible_string
def getOptString(s) :
"""XXX Return the string, strip of any white space (make sure there
are no newline characters here). This function was originally
written by In<NAME> for pyana's XtcExplorer module.
"""
if (s is None):
return (None)
s = s.strip()
if (s == "" or s == "No" or s == "None"):
return (None)
return (s)
def getOptStrings(s, default=None) :
"""XXX Return a list of strings. This function was originally
written by In<NAME>te for pyana's XtcExplorer module.
"""
if (s is None):
return default
# strip off any leading or trailing whitespace
s = s.strip()
# make sure there are no newline characters here
s = s.split("\n")
s = " ".join(s)
# make a list
l = s.split()
if (len(l) == 0 or (len(l) == 1 and (s == "" or s == "No" or s == "None"))):
return ([])
# all other cases:
return (l)
def getOptInteger(s):
"""XXX Return a single integer. This function was originally
written by In<NAME> for pyana's XtcExplorer module. XXX What if
conversion fails?
"""
if (s is None or s == "" or s == "None"):
return None
return (int(s))
def getOptFloat(s):
"""Return a single float.
"""
if (s is None or s == "" or s == "None"):
return None
return (float(s))
def getOptROI(s):
"""Return a tuple of the region of interest.
Format: roi = fast_low:fast_high,slow_low:slow_high
"""
roi_str = getOptString(s)
if (roi_str is not None and roi_str != ""):
ivl = roi_str.strip("()").split(",")
ivl_x = ivl[0].split(":")
ivl_y = ivl[1].split(":")
roi = [ivl_x[0], ivl_x[1], ivl_y[0], ivl_y[1]]
for i in range(4):
if roi[i] == "": roi[i] = None
else: roi[i] = int(roi[i])
return tuple(roi)
def image(address, config, evt, env, sections=None):
"""Assemble the uint16 detector image, and sum it up as int32. Sum
the image of squared intensities as uint64. XXX Documentation! XXX
Would be nice to get rid of the constant string names. XXX Better
named evt_image()?
@param address Full data source address of the DAQ device
@param config XXX This should go--get up-to-date object on the fly!
@param evt Event data object, a configure object
@param env Environment object
@param sections XXX
@return XXX
"""
device = address_split(address)[2]
if device is None:
return None
elif device == 'Andor':
# XXX There is no proper getter for Andor frames yet, and
# evt.getFrameValue(address) does not appear to work.
from pypdsdata.xtc import TypeId
value = evt.get(TypeId.Type.Id_AndorFrame, address)
if value is not None:
img = value.data()
return img
elif device == 'Cspad':
if sections is not None:
return CsPadDetector(address, evt, env, sections)
else:
# XXX This is obsolete code, provided for backwards
# compatibility with the days before detector metrology was
# used.
assert False # sections always required now as of Sep 1 2014
quads = evt.getCsPadQuads(address, env)
qimages = numpy.empty((4, npix_quad, npix_quad), dtype='uint16')
for q in quads:
qimages[q.quad()] = CsPadElement(q.data(), q.quad(), config)
return numpy.vstack((numpy.hstack((qimages[0], qimages[1])),
numpy.hstack((qimages[3], qimages[2]))))
elif device == 'Cspad2x2':
from pypdsdata.xtc import TypeId
quads = evt.get(TypeId.Type.Id_Cspad2x2Element, address)
if quads is not None:
return CsPad2x2Image(quads.data(), config, sections)
elif device == 'pnCCD':
value = evt.getPnCcdValue(address, env)
if value is not None:
# Returns the image data as a numpy 1024-by-1024 uint16 array
# XXX Should be split up into tiles (halves) to allow metrology
# to be adjusted? Will require a sections parameter!
img = value.data()
# Deal with overflows. XXX This might be dependent on the
# particular version of pyana. CASS ignores the two most
# significant bits, which is different from what is done below,
# but <NAME> says they do contain data which could be used.
img[img > 2**14 - 1] = 2**14 - 1
return img
return None
def image_xpp(address, evt, env, aa, quads = None):
"""Assemble the uint16 detector image, see also
cspad_tbx.CsPadDetector(). XXX Documentation! XXX Would be nice to
get rid of the constant string names. XXX Better named evt_image()?
@param address Full data source address of the DAQ device
@param evt Event data object, a configure object
@param env Environment object
@param aa Active areas, in lieu of full metrology object
@param quads Data, if None get it from the event
@return XXX
"""
if address != 'XppGon-0|Cspad-0':
return None
# Get a current configure object for the detector
config = getConfig(address, env)
if config is None:
return None
if quads is None:
# For consistency, one could/should verify that len(quads) is equal
# to len(sections).
quads = evt_get_quads(address, evt, env)
if quads is None or len(quads) != len(aa) // (8 * 2 * 4):
return None
# Start out with a blank image of the detector. Mikhail
# <NAME>'s HDF5Explorer/src/ConfigCSpad.py uses a detector
# size of 1765-by-1765 pixels. This assumes that the type of the
# first section in the first quadrant is identical to the type of
# all the other sections.
det = numpy.zeros((1765, 1765), dtype=quads[0].data()[0].dtype)
for quad in quads:
q_data = quad.data()
q_idx = quad.quad()
try:
# pyana
# example: if the third sensor (2x1) is disabled, q_mask = [0,1,3,4,5,6,7]
q_mask = config.sections(q_idx)
except AttributeError:
# psana
# as above, using config.roiMask, a bitstring where the ith bit is true if the ith sensor is active. x << y means bitwise shift
# x, y times, and & is the bitwise AND operator
q_mask = [i for i in range(config.numSect()//config.numQuads()) if 1 << i & config.roiMask(q_idx)]
# For consistency, one could/should verify that len(q_data) is
# equal to len(sections[q_idx]).
assert len(q_data) == len(q_mask)
for (s_data, s_idx) in zip(q_data, q_mask):
# Rotate the "lying down" sensor readout from the XTC stream by
# an integer multiple of 90 degrees to match the orientation on
# the detector. This assumes that the horizontal dimension of
# the unrotated sensor is even. Note that the XPP CSPAD is
# rotated by 180 degrees with respect to the optical metrology
# measurements.
assert s_data.shape[1] % 2 == 0
if q_idx == 0 and s_idx in [2, 3, 6, 7] or \
q_idx == 1 and s_idx in [0, 1] or \
q_idx == 3 and s_idx in [4, 5]:
asics = numpy.hsplit(numpy.rot90(s_data, 0 + 2), 2)
asics.reverse()
elif q_idx == 0 and s_idx in [0, 1] or \
q_idx == 2 and s_idx in [4, 5] or \
q_idx == 3 and s_idx in [2, 3, 6, 7]:
asics = numpy.vsplit(numpy.rot90(s_data, 1 + 2), 2)
elif q_idx == 1 and s_idx in [4, 5] or \
q_idx == 2 and s_idx in [2, 3, 6, 7] or \
q_idx == 3 and s_idx in [0, 1]:
asics = numpy.hsplit(numpy.rot90(s_data, 2 + 2), 2)
elif q_idx == 0 and s_idx in [4, 5] or \
q_idx == 1 and s_idx in [2, 3, 6, 7] or \
q_idx == 2 and s_idx in [0, 1]:
asics = numpy.vsplit(numpy.rot90(s_data, 3 + 2), 2)
asics.reverse()
else:
# NOTREACHED
return None
# Use the active areas to place the two ASICS on the
# destination detector image.
for a_idx in range(len(asics)):
aa_idx = q_idx * (8 * 2 * 4) + s_idx * (2 * 4) + a_idx * 4
det[aa[aa_idx + 0]:aa[aa_idx + 2],
aa[aa_idx + 1]:aa[aa_idx + 3]] = asics[a_idx]
return det
def iplace(dst, src, angle, center):
"""The iplace() function places @p src in @p dst centred on @p
center after rotating it by @p angle degrees counter-clockwise.
The source image is mapped onto the destination image by bilinear
interpolation. While this may introduce interpolation artifacts
it is significantly simpler than many other interpolation
methods--and bog slow.
@p dst Destination image
@p src Source image
@p angle Rotation angle, in degrees
@p center Centre of @p src in @p dst, after rotation
"""
a = math.radians(angle)
c = math.cos(a)
s = math.sin(a)
# Find the origin-centred bounding box of the rotated source
# image. Due to the symmetry of a rectangle, the extrema can be
# determined by the transformed coordinates of two adjacent
# corners.
hsize = [0.5 * max(abs(c * src.shape[0] - s * src.shape[1]),
abs(c * src.shape[0] + s * src.shape[1])),
0.5 * max(abs(s * src.shape[0] + c * src.shape[1]),
abs(s * src.shape[0] - c * src.shape[1]))]
xlim = [int(math.floor(-hsize[0])),
int(math.ceil( +hsize[0])) + 1]
ylim = [int(math.floor(-hsize[1])),
int(math.ceil( +hsize[1])) + 1]
# For each pixel in the bounding box, determine the real-valued
# components in coordinate system of the untransformed source
# image, (xp, yp). Then do bilinear interpolation based on the
# four pixels with integer coordinates around (xp, yp).
for x in range(xlim[0], xlim[1]):
for y in range(ylim[0], ylim[1]):
xp = c * x + s * y + 0.5 * src.shape[0]
yp = -s * x + c * y + 0.5 * src.shape[1]
if (xp >= 0 and math.ceil(xp) < src.shape[0] and
yp >= 0 and math.ceil(yp) < src.shape[1]):
xi =[int(math.floor(xp)), int(math.ceil(xp))]
yi =[int(math.floor(yp)), int(math.ceil(yp))]
xf = xp - xi[0]
yf = yp - yi[0]
dst[int(round(x + center[0])),
int(round(y + center[1]))] = \
src[xi[0], yi[0]] * (1 - xf) * (1 - yf) + \
src[xi[1], yi[0]] * xf * (1 - yf) + \
src[xi[0], yi[1]] * (1 - xf) * yf + \
src[xi[1], yi[1]] * xf * yf
def rplace(dst, src, angle, center):
"""The rplace() function places @p src in @p dst centred on @p
centre after rotating it by @p angle degrees counter-clockwise.
The rotation angle is rounded to the nearest integer multiple of
90 degrees before transformation.
@p dst Destination image
@p src Source image
@p angle Rotation angle, in degrees
@p center Centre of @p src in @p dst, after rotation
"""
# Rotate the source image, and determine the upper, left corner of
# its location in the destination image.
rot = numpy.rot90(src, int(round(angle / 90.0)) % 4)
ulc = [int(round(center[0] - 0.5 * rot.shape[0])),
int(round(center[1] - 0.5 * rot.shape[1]))]
dst[ulc[0]:(ulc[0] + rot.shape[0]),
ulc[1]:(ulc[1] + rot.shape[1])] = rot
# For the moment, the XPP CSPAD detector's metrology is stored here
# as a series of active areas
_xpp_active_areas = {
'XPP 7.1': { # metrology recorded 1/24/13 and processed by flatfile.py
'active_areas': flex.int([
865, 1121, 1059, 1306, 1062, 1121, 1256, 1306,
864, 909, 1058, 1094, 1061, 909, 1255, 1094,
1083, 1534, 1268, 1728, 1083, 1337, 1268, 1531,
871, 1538, 1056, 1732, 871, 1341, 1056, 1535,
1495, 1326, 1689, 1511, 1298, 1326, 1492, 1511,
1496, 1539, 1690, 1724, 1299, 1539, 1493, 1724,
1482, 1105, 1667, 1299, 1482, 908, 1667, 1102,
1270, 1107, 1455, 1301, 1270, 910, 1455, 1104,
1123, 706, 1308, 900, 1123, 509, 1308, 703,
910, 706, 1095, 900, 910, 509, 1095, 703,
1535, 498, 1729, 683, 1338, 498, 1532, 683,
1534, 711, 1728, 896, 1337, 711, 1531, 896,
1324, 77, 1509, 271, 1324, 274, 1509, 468,
1537, 75, 1722, 269, 1537, 272, 1722, 466,
1104, 97, 1298, 282, 907, 97, 1101, 282,
1105, 310, 1299, 495, 908, 310, 1102, 495,
706, 457, 900, 642, 509, 457, 703, 642,
705, 669, 899, 854, 508, 669, 702, 854,
496, 36, 681, 230, 496, 233, 681, 427,
709, 38, 894, 232, 709, 235, 894, 429,
77, 256, 271, 441, 274, 256, 468, 441,
77, 44, 271, 229, 274, 44, 468, 229,
98, 467, 283, 661, 98, 664, 283, 858,
311, 467, 496, 661, 311, 664, 496, 858,
457, 867, 642, 1061, 457, 1064, 642, 1258,
670, 865, 855, 1059, 670, 1062, 855, 1256,
37, 1084, 231, 1269, 234, 1084, 428, 1269,
37, 871, 231, 1056, 234, 871, 428, 1056,
256, 1495, 441, 1689, 256, 1298, 441, 1492,
43, 1497, 228, 1691, 43, 1300, 228, 1494,
469, 1481, 663, 1666, 666, 1481, 860, 1666,
467, 1269, 661, 1454, 664, 1269, 858, 1454]),
'rotations' : flex.int([
3,3,3,3,2,2,2,2,1,1,1,1,2,2,2,2,
2,2,2,2,1,1,1,1,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,2,2,2,2,3,3,3,3
])
},
'XPP 7.2': { # metrology recorded 1/29/13 and processed by flatfile.py
'active_areas': flex.int([
868, 1122, 1062, 1307, 1065, 1122, 1259, 1307,
868, 910, 1062, 1095, 1065, 910, 1259, 1095,
1087, 1534, 1272, 1728, 1087, 1337, 1272, 1531,
874, 1536, 1059, 1730, 874, 1339, 1059, 1533,
1497, 1328, 1691, 1513, 1300, 1328, 1494, 1513,
1499, 1541, 1693, 1726, 1302, 1541, 1496, 1726,
1483, 1105, 1668, 1299, 1483, 908, 1668, 1102,
1271, 1106, 1456, 1300, 1271, 909, 1456, 1103,
1122, 705, 1307, 899, 1122, 508, 1307, 702,
909, 705, 1094, 899, 909, 508, 1094, 702,
1534, 497, 1728, 682, 1337, 497, 1531, 682,
1533, 710, 1727, 895, 1336, 710, 1530, 895,
1323, 76, 1508, 270, 1323, 273, 1508, 467,
1536, 75, 1721, 269, 1536, 272, 1721, 466,
1103, 97, 1297, 282, 906, 97, 1100, 282,
1103, 310, 1297, 495, 906, 310, 1100, 495,
705, 456, 899, 641, 508, 456, 702, 641,
704, 669, 898, 854, 507, 669, 701, 854,
495, 35, 680, 229, 495, 232, 680, 426,
707, 38, 892, 232, 707, 235, 892, 429,
75, 256, 269, 441, 272, 256, 466, 441,
75, 43, 269, 228, 272, 43, 466, 228,
97, 467, 282, 661, 97, 664, 282, 858,
310, 466, 495, 660, 310, 663, 495, 857,
456, 866, 641, 1060, 456, 1063, 641, 1257,
669, 865, 854, 1059, 669, 1062, 854, 1256,
36, 1084, 230, 1269, 233, 1084, 427, 1269,
35, 870, 229, 1055, 232, 870, 426, 1055,
254, 1494, 439, 1688, 254, 1297, 439, 1491,
42, 1496, 227, 1690, 42, 1299, 227, 1493,
468, 1481, 662, 1666, 665, 1481, 859, 1666,
465, 1268, 659, 1453, 662, 1268, 856, 1453]),
'rotations' : flex.int([
3,3,3,3,2,2,2,2,1,1,1,1,2,2,2,2,
2,2,2,2,1,1,1,1,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,2,2,2,2,3,3,3,3
])
},
'XPP 8.1': { # metrology recorded 10/09/13 and processed by flatfile.py
'active_areas': flex.int([
863, 1118, 1057, 1303, 1060, 1118, 1254, 1303,
865, 913, 1059, 1098, 1062, 913, 1256, 1098,
1070, 1532, 1255, 1726, 1070, 1335, 1255, 1529,
863, 1532, 1048, 1726, 863, 1335, 1048, 1529,
1484, 1335, 1678, 1520, 1287, 1335, 1481, 1520,
1484, 1543, 1678, 1728, 1287, 1543, 1481, 1728,
1475, 1110, 1660, 1304, 1475, 913, 1660, 1107,
1268, 1109, 1453, 1303, 1268, 912, 1453, 1106,
1119, 707, 1304, 901, 1119, 510, 1304, 704,
912, 707, 1097, 901, 912, 510, 1097, 704,
1533, 506, 1727, 691, 1336, 506, 1530, 691,
1533, 715, 1727, 900, 1336, 715, 1530, 900,
1334, 84, 1519, 278, 1334, 281, 1519, 475,
1541, 85, 1726, 279, 1541, 282, 1726, 476,
1108, 103, 1302, 288, 911, 103, 1105, 288,
1108, 311, 1302, 496, 911, 311, 1105, 496,
706, 460, 900, 645, 509, 460, 703, 645,
706, 666, 900, 851, 509, 666, 703, 851,
507, 38, 692, 232, 507, 235, 692, 429,
713, 38, 898, 232, 713, 235, 898, 429,
82, 241, 276, 426, 279, 241, 473, 426,
82, 37, 276, 222, 279, 37, 473, 222,
103, 459, 288, 653, 103, 656, 288, 850,
310, 460, 495, 654, 310, 657, 495, 851,
460, 862, 645, 1056, 460, 1059, 645, 1253,
666, 863, 851, 1057, 666, 1060, 851, 1254,
38, 1070, 232, 1255, 235, 1070, 429, 1255,
38, 864, 232, 1049, 235, 864, 429, 1049,
242, 1484, 427, 1678, 242, 1287, 427, 1481,
37, 1484, 222, 1678, 37, 1287, 222, 1481,
458, 1475, 652, 1660, 655, 1475, 849, 1660,
459, 1267, 653, 1452, 656, 1267, 850, 1452]),
'rotations' : flex.int([
3,3,3,3,2,2,2,2,1,1,1,1,2,2,2,2,
2,2,2,2,1,1,1,1,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,2,2,2,2,3,3,3,3
])
},
#SOME BIG ISSUES REMAIN WITH Sacla.MPCCD.8tile format
# Evidently the data from Takanori 22 Sep 2015 already has slight rotation
# applied to the MPCCD modules, as the data rectangles displayed in cctbx.image_viewer are tilted
# This is inconsistent with the expectation that npy.py should get the raw data, not preprocessed.
'Sacla.MPCCD.8tile': { # as given by Takanori 22 Sep 2015
'active_areas': flex.int([
112, 189, 622, 1212, 647, 188, 1156, 1212,
1180, 140, 1691, 1163, 1714, 140, 2226, 1163,
159, 1231, 671, 2254, 694, 1230, 1206, 2253,
1229, 1180, 1740, 2203, 1762, 1180, 2274, 2202,
]),
'rotations' : flex.int([
0,0,0,0,0,0,0,0,
])
},
}
_xpp_active_areas['XPP 11.1'] = _xpp_active_areas['XPP 9.1'] = _xpp_active_areas['XPP 8.1']
xpp_active_areas = _xpp_active_areas
| [
"numpy.hstack",
"math.floor",
"math.cos",
"numpy.rot90",
"getpass.getuser",
"os.path.isdir",
"numpy.empty",
"numpy.vstack",
"six.moves.zip",
"time.strptime",
"re.match",
"math.radians",
"os.path.dirname",
"libtbx.easy_pickle.dump",
"time.time",
"time.gmtime",
"six.iteritems",
"nump... | [((3306, 3405), 're.match', 're.match', (['"""^(?P<det>\\\\S+)\\\\-(?P<det_id>\\\\d+)\\\\|(?P<dev>\\\\S+)\\\\-(?P<dev_id>\\\\d+)$"""', 'address'], {}), "(\n '^(?P<det>\\\\S+)\\\\-(?P<det_id>\\\\d+)\\\\|(?P<dev>\\\\S+)\\\\-(?P<dev_id>\\\\d+)$',\n address)\n", (3314, 3405), False, 'import re\n'), ((3515, 3614), 're.match', 're.match', (['"""^(?P<det>\\\\S+)\\\\.(?P<det_id>\\\\d+)\\\\:(?P<dev>\\\\S+)\\\\.(?P<dev_id>\\\\d+)$"""', 'address'], {}), "(\n '^(?P<det>\\\\S+)\\\\.(?P<det_id>\\\\d+)\\\\:(?P<dev>\\\\S+)\\\\.(?P<dev_id>\\\\d+)$',\n address)\n", (3523, 3614), False, 'import re\n'), ((3739, 3852), 're.match', 're.match', (['"""^DetInfo\\\\((?P<det>\\\\S+)\\\\.(?P<det_id>\\\\d+)\\\\:(?P<dev>\\\\S+)\\\\.(?P<dev_id>\\\\d+)\\\\)$"""', 'address'], {}), "(\n '^DetInfo\\\\((?P<det>\\\\S+)\\\\.(?P<det_id>\\\\d+)\\\\:(?P<dev>\\\\S+)\\\\.(?P<dev_id>\\\\d+)\\\\)$'\n , address)\n", (3747, 3852), False, 'import re\n'), ((5186, 5196), 'scitbx.array_family.flex.int', 'flex.int', ([], {}), '()\n', (5194, 5196), False, 'from scitbx.array_family import flex\n'), ((10115, 10150), 'numpy.zeros', 'numpy.zeros', (['(2 * 185, 2 * 194 + 3)'], {}), '((2 * 185, 2 * 194 + 3))\n', (10126, 10150), False, 'import numpy\n'), ((10503, 10511), 'six.moves.range', 'range', (['(2)'], {}), '(2)\n', (10508, 10511), False, 'from six.moves import range\n'), ((15352, 15360), 'six.moves.range', 'range', (['(8)'], {}), '(8)\n', (15357, 15360), False, 'from six.moves import range\n'), ((15891, 15946), 'numpy.zeros', 'numpy.zeros', (['(npix_quad, npix_quad)'], {'dtype': 'data3d.dtype'}), '((npix_quad, npix_quad), dtype=data3d.dtype)\n', (15902, 15946), False, 'import numpy\n'), ((15962, 15970), 'six.moves.range', 'range', (['(8)'], {}), '(8)\n', (15967, 15970), False, 'from six.moves import range\n'), ((20807, 20823), 'six.iteritems', 'six.iteritems', (['d'], {}), '(d)\n', (20820, 20823), False, 'import six\n'), ((22029, 22074), 'os.path.join', 'os.path.join', (['dirname', "(basename + s + '.tiff')"], {}), "(dirname, basename + s + '.tiff')\n", (22041, 22074), False, 'import os\n'), ((22395, 22419), 'cv2.imwrite', 'cv2.imwrite', (['path', 'idata'], {}), '(path, idata)\n', (22406, 22419), False, 'import cv2\n'), ((23697, 23744), 'os.path.join', 'os.path.join', (['dirname', "(basename + s + '.pickle')"], {}), "(dirname, basename + s + '.pickle')\n", (23709, 23744), False, 'import os\n'), ((23747, 23772), 'libtbx.easy_pickle.dump', 'easy_pickle.dump', (['path', 'd'], {}), '(path, d)\n', (23763, 23772), False, 'from libtbx import easy_pickle\n'), ((24144, 24165), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (24159, 24165), False, 'import os\n'), ((24245, 24272), 'libtbx.easy_pickle.dump', 'easy_pickle.dump', (['path', 'obj'], {}), '(path, obj)\n', (24261, 24272), False, 'from libtbx import easy_pickle\n'), ((35169, 35187), 'six.iteritems', 'six.iteritems', (['dia'], {}), '(dia)\n', (35182, 35187), False, 'import six\n'), ((54636, 54655), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (54648, 54655), False, 'import math\n'), ((54664, 54675), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (54672, 54675), False, 'import math\n'), ((54684, 54695), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (54692, 54695), False, 'import math\n'), ((55620, 55643), 'six.moves.range', 'range', (['xlim[0]', 'xlim[1]'], {}), '(xlim[0], xlim[1])\n', (55625, 55643), False, 'from six.moves import range\n'), ((5677, 5685), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (5682, 5685), False, 'from six.moves import range\n'), ((10735, 10774), 'numpy.zeros', 'numpy.zeros', (['(3, 185)'], {'dtype': 'data.dtype'}), '((3, 185), dtype=data.dtype)\n', (10746, 10774), False, 'import numpy\n'), ((10790, 10829), 'numpy.vstack', 'numpy.vstack', (['(asics[0], gap, asics[1])'], {}), '((asics[0], gap, asics[1]))\n', (10802, 10829), False, 'import numpy\n'), ((13918, 13937), 'six.moves.zip', 'zip', (['q_data', 'q_mask'], {}), '(q_data, q_mask)\n', (13921, 13937), False, 'from six.moves import zip\n'), ((15170, 15213), 'numpy.zeros', 'numpy.zeros', (['(185, 388)'], {'dtype': 'data3d.dtype'}), '((185, 388), dtype=data3d.dtype)\n', (15181, 15213), False, 'import numpy\n'), ((15229, 15237), 'six.moves.range', 'range', (['(8)'], {}), '(8)\n', (15234, 15237), False, 'from six.moves import range\n'), ((15419, 15445), 'numpy.hsplit', 'numpy.hsplit', (['data3d[i]', '(2)'], {}), '(data3d[i], 2)\n', (15431, 15445), False, 'import numpy\n'), ((15458, 15499), 'numpy.zeros', 'numpy.zeros', (['(185, 4)'], {'dtype': 'data3d.dtype'}), '((185, 4), dtype=data3d.dtype)\n', (15469, 15499), False, 'import numpy\n'), ((15514, 15553), 'numpy.hstack', 'numpy.hstack', (['(asics[0], gap, asics[1])'], {}), '((asics[0], gap, asics[1]))\n', (15526, 15553), False, 'import numpy\n'), ((16329, 16358), 'numpy.rot90', 'numpy.rot90', (['quadrant', '(4 - qn)'], {}), '(quadrant, 4 - qn)\n', (16340, 16358), False, 'import numpy\n'), ((21679, 21701), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (21692, 21701), False, 'import os\n'), ((21707, 21727), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (21718, 21727), False, 'import os\n'), ((23243, 23265), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (23256, 23265), False, 'import os\n'), ((23271, 23291), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (23282, 23291), False, 'import os\n'), ((24221, 24241), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (24232, 24241), False, 'import os\n'), ((42366, 42377), 'time.time', 'time.time', ([], {}), '()\n', (42375, 42377), False, 'import time\n'), ((47761, 47769), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (47766, 47769), False, 'from six.moves import range\n'), ((52375, 52394), 'six.moves.zip', 'zip', (['q_data', 'q_mask'], {}), '(q_data, q_mask)\n', (52378, 52394), False, 'from six.moves import zip\n'), ((55662, 55685), 'six.moves.range', 'range', (['ylim[0]', 'ylim[1]'], {}), '(ylim[0], ylim[1])\n', (55667, 55685), False, 'from six.moves import range\n'), ((57493, 58967), 'scitbx.array_family.flex.int', 'flex.int', (['[865, 1121, 1059, 1306, 1062, 1121, 1256, 1306, 864, 909, 1058, 1094, 1061,\n 909, 1255, 1094, 1083, 1534, 1268, 1728, 1083, 1337, 1268, 1531, 871, \n 1538, 1056, 1732, 871, 1341, 1056, 1535, 1495, 1326, 1689, 1511, 1298, \n 1326, 1492, 1511, 1496, 1539, 1690, 1724, 1299, 1539, 1493, 1724, 1482,\n 1105, 1667, 1299, 1482, 908, 1667, 1102, 1270, 1107, 1455, 1301, 1270, \n 910, 1455, 1104, 1123, 706, 1308, 900, 1123, 509, 1308, 703, 910, 706, \n 1095, 900, 910, 509, 1095, 703, 1535, 498, 1729, 683, 1338, 498, 1532, \n 683, 1534, 711, 1728, 896, 1337, 711, 1531, 896, 1324, 77, 1509, 271, \n 1324, 274, 1509, 468, 1537, 75, 1722, 269, 1537, 272, 1722, 466, 1104, \n 97, 1298, 282, 907, 97, 1101, 282, 1105, 310, 1299, 495, 908, 310, 1102,\n 495, 706, 457, 900, 642, 509, 457, 703, 642, 705, 669, 899, 854, 508, \n 669, 702, 854, 496, 36, 681, 230, 496, 233, 681, 427, 709, 38, 894, 232,\n 709, 235, 894, 429, 77, 256, 271, 441, 274, 256, 468, 441, 77, 44, 271,\n 229, 274, 44, 468, 229, 98, 467, 283, 661, 98, 664, 283, 858, 311, 467,\n 496, 661, 311, 664, 496, 858, 457, 867, 642, 1061, 457, 1064, 642, 1258,\n 670, 865, 855, 1059, 670, 1062, 855, 1256, 37, 1084, 231, 1269, 234, \n 1084, 428, 1269, 37, 871, 231, 1056, 234, 871, 428, 1056, 256, 1495, \n 441, 1689, 256, 1298, 441, 1492, 43, 1497, 228, 1691, 43, 1300, 228, \n 1494, 469, 1481, 663, 1666, 666, 1481, 860, 1666, 467, 1269, 661, 1454,\n 664, 1269, 858, 1454]'], {}), '([865, 1121, 1059, 1306, 1062, 1121, 1256, 1306, 864, 909, 1058, \n 1094, 1061, 909, 1255, 1094, 1083, 1534, 1268, 1728, 1083, 1337, 1268, \n 1531, 871, 1538, 1056, 1732, 871, 1341, 1056, 1535, 1495, 1326, 1689, \n 1511, 1298, 1326, 1492, 1511, 1496, 1539, 1690, 1724, 1299, 1539, 1493,\n 1724, 1482, 1105, 1667, 1299, 1482, 908, 1667, 1102, 1270, 1107, 1455, \n 1301, 1270, 910, 1455, 1104, 1123, 706, 1308, 900, 1123, 509, 1308, 703,\n 910, 706, 1095, 900, 910, 509, 1095, 703, 1535, 498, 1729, 683, 1338, \n 498, 1532, 683, 1534, 711, 1728, 896, 1337, 711, 1531, 896, 1324, 77, \n 1509, 271, 1324, 274, 1509, 468, 1537, 75, 1722, 269, 1537, 272, 1722, \n 466, 1104, 97, 1298, 282, 907, 97, 1101, 282, 1105, 310, 1299, 495, 908,\n 310, 1102, 495, 706, 457, 900, 642, 509, 457, 703, 642, 705, 669, 899, \n 854, 508, 669, 702, 854, 496, 36, 681, 230, 496, 233, 681, 427, 709, 38,\n 894, 232, 709, 235, 894, 429, 77, 256, 271, 441, 274, 256, 468, 441, 77,\n 44, 271, 229, 274, 44, 468, 229, 98, 467, 283, 661, 98, 664, 283, 858, \n 311, 467, 496, 661, 311, 664, 496, 858, 457, 867, 642, 1061, 457, 1064,\n 642, 1258, 670, 865, 855, 1059, 670, 1062, 855, 1256, 37, 1084, 231, \n 1269, 234, 1084, 428, 1269, 37, 871, 231, 1056, 234, 871, 428, 1056, \n 256, 1495, 441, 1689, 256, 1298, 441, 1492, 43, 1497, 228, 1691, 43, \n 1300, 228, 1494, 469, 1481, 663, 1666, 666, 1481, 860, 1666, 467, 1269,\n 661, 1454, 664, 1269, 858, 1454])\n', (57501, 58967), False, 'from scitbx.array_family import flex\n'), ((59380, 59590), 'scitbx.array_family.flex.int', 'flex.int', (['[3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0,\n 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,\n 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3]'], {}), '([3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,\n 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0,\n 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3])\n', (59388, 59590), False, 'from scitbx.array_family import flex\n'), ((59717, 61191), 'scitbx.array_family.flex.int', 'flex.int', (['[868, 1122, 1062, 1307, 1065, 1122, 1259, 1307, 868, 910, 1062, 1095, 1065,\n 910, 1259, 1095, 1087, 1534, 1272, 1728, 1087, 1337, 1272, 1531, 874, \n 1536, 1059, 1730, 874, 1339, 1059, 1533, 1497, 1328, 1691, 1513, 1300, \n 1328, 1494, 1513, 1499, 1541, 1693, 1726, 1302, 1541, 1496, 1726, 1483,\n 1105, 1668, 1299, 1483, 908, 1668, 1102, 1271, 1106, 1456, 1300, 1271, \n 909, 1456, 1103, 1122, 705, 1307, 899, 1122, 508, 1307, 702, 909, 705, \n 1094, 899, 909, 508, 1094, 702, 1534, 497, 1728, 682, 1337, 497, 1531, \n 682, 1533, 710, 1727, 895, 1336, 710, 1530, 895, 1323, 76, 1508, 270, \n 1323, 273, 1508, 467, 1536, 75, 1721, 269, 1536, 272, 1721, 466, 1103, \n 97, 1297, 282, 906, 97, 1100, 282, 1103, 310, 1297, 495, 906, 310, 1100,\n 495, 705, 456, 899, 641, 508, 456, 702, 641, 704, 669, 898, 854, 507, \n 669, 701, 854, 495, 35, 680, 229, 495, 232, 680, 426, 707, 38, 892, 232,\n 707, 235, 892, 429, 75, 256, 269, 441, 272, 256, 466, 441, 75, 43, 269,\n 228, 272, 43, 466, 228, 97, 467, 282, 661, 97, 664, 282, 858, 310, 466,\n 495, 660, 310, 663, 495, 857, 456, 866, 641, 1060, 456, 1063, 641, 1257,\n 669, 865, 854, 1059, 669, 1062, 854, 1256, 36, 1084, 230, 1269, 233, \n 1084, 427, 1269, 35, 870, 229, 1055, 232, 870, 426, 1055, 254, 1494, \n 439, 1688, 254, 1297, 439, 1491, 42, 1496, 227, 1690, 42, 1299, 227, \n 1493, 468, 1481, 662, 1666, 665, 1481, 859, 1666, 465, 1268, 659, 1453,\n 662, 1268, 856, 1453]'], {}), '([868, 1122, 1062, 1307, 1065, 1122, 1259, 1307, 868, 910, 1062, \n 1095, 1065, 910, 1259, 1095, 1087, 1534, 1272, 1728, 1087, 1337, 1272, \n 1531, 874, 1536, 1059, 1730, 874, 1339, 1059, 1533, 1497, 1328, 1691, \n 1513, 1300, 1328, 1494, 1513, 1499, 1541, 1693, 1726, 1302, 1541, 1496,\n 1726, 1483, 1105, 1668, 1299, 1483, 908, 1668, 1102, 1271, 1106, 1456, \n 1300, 1271, 909, 1456, 1103, 1122, 705, 1307, 899, 1122, 508, 1307, 702,\n 909, 705, 1094, 899, 909, 508, 1094, 702, 1534, 497, 1728, 682, 1337, \n 497, 1531, 682, 1533, 710, 1727, 895, 1336, 710, 1530, 895, 1323, 76, \n 1508, 270, 1323, 273, 1508, 467, 1536, 75, 1721, 269, 1536, 272, 1721, \n 466, 1103, 97, 1297, 282, 906, 97, 1100, 282, 1103, 310, 1297, 495, 906,\n 310, 1100, 495, 705, 456, 899, 641, 508, 456, 702, 641, 704, 669, 898, \n 854, 507, 669, 701, 854, 495, 35, 680, 229, 495, 232, 680, 426, 707, 38,\n 892, 232, 707, 235, 892, 429, 75, 256, 269, 441, 272, 256, 466, 441, 75,\n 43, 269, 228, 272, 43, 466, 228, 97, 467, 282, 661, 97, 664, 282, 858, \n 310, 466, 495, 660, 310, 663, 495, 857, 456, 866, 641, 1060, 456, 1063,\n 641, 1257, 669, 865, 854, 1059, 669, 1062, 854, 1256, 36, 1084, 230, \n 1269, 233, 1084, 427, 1269, 35, 870, 229, 1055, 232, 870, 426, 1055, \n 254, 1494, 439, 1688, 254, 1297, 439, 1491, 42, 1496, 227, 1690, 42, \n 1299, 227, 1493, 468, 1481, 662, 1666, 665, 1481, 859, 1666, 465, 1268,\n 659, 1453, 662, 1268, 856, 1453])\n', (59725, 61191), False, 'from scitbx.array_family import flex\n'), ((61604, 61814), 'scitbx.array_family.flex.int', 'flex.int', (['[3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0,\n 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,\n 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3]'], {}), '([3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,\n 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0,\n 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3])\n', (61612, 61814), False, 'from scitbx.array_family import flex\n'), ((61942, 63423), 'scitbx.array_family.flex.int', 'flex.int', (['[863, 1118, 1057, 1303, 1060, 1118, 1254, 1303, 865, 913, 1059, 1098, 1062,\n 913, 1256, 1098, 1070, 1532, 1255, 1726, 1070, 1335, 1255, 1529, 863, \n 1532, 1048, 1726, 863, 1335, 1048, 1529, 1484, 1335, 1678, 1520, 1287, \n 1335, 1481, 1520, 1484, 1543, 1678, 1728, 1287, 1543, 1481, 1728, 1475,\n 1110, 1660, 1304, 1475, 913, 1660, 1107, 1268, 1109, 1453, 1303, 1268, \n 912, 1453, 1106, 1119, 707, 1304, 901, 1119, 510, 1304, 704, 912, 707, \n 1097, 901, 912, 510, 1097, 704, 1533, 506, 1727, 691, 1336, 506, 1530, \n 691, 1533, 715, 1727, 900, 1336, 715, 1530, 900, 1334, 84, 1519, 278, \n 1334, 281, 1519, 475, 1541, 85, 1726, 279, 1541, 282, 1726, 476, 1108, \n 103, 1302, 288, 911, 103, 1105, 288, 1108, 311, 1302, 496, 911, 311, \n 1105, 496, 706, 460, 900, 645, 509, 460, 703, 645, 706, 666, 900, 851, \n 509, 666, 703, 851, 507, 38, 692, 232, 507, 235, 692, 429, 713, 38, 898,\n 232, 713, 235, 898, 429, 82, 241, 276, 426, 279, 241, 473, 426, 82, 37,\n 276, 222, 279, 37, 473, 222, 103, 459, 288, 653, 103, 656, 288, 850, \n 310, 460, 495, 654, 310, 657, 495, 851, 460, 862, 645, 1056, 460, 1059,\n 645, 1253, 666, 863, 851, 1057, 666, 1060, 851, 1254, 38, 1070, 232, \n 1255, 235, 1070, 429, 1255, 38, 864, 232, 1049, 235, 864, 429, 1049, \n 242, 1484, 427, 1678, 242, 1287, 427, 1481, 37, 1484, 222, 1678, 37, \n 1287, 222, 1481, 458, 1475, 652, 1660, 655, 1475, 849, 1660, 459, 1267,\n 653, 1452, 656, 1267, 850, 1452]'], {}), '([863, 1118, 1057, 1303, 1060, 1118, 1254, 1303, 865, 913, 1059, \n 1098, 1062, 913, 1256, 1098, 1070, 1532, 1255, 1726, 1070, 1335, 1255, \n 1529, 863, 1532, 1048, 1726, 863, 1335, 1048, 1529, 1484, 1335, 1678, \n 1520, 1287, 1335, 1481, 1520, 1484, 1543, 1678, 1728, 1287, 1543, 1481,\n 1728, 1475, 1110, 1660, 1304, 1475, 913, 1660, 1107, 1268, 1109, 1453, \n 1303, 1268, 912, 1453, 1106, 1119, 707, 1304, 901, 1119, 510, 1304, 704,\n 912, 707, 1097, 901, 912, 510, 1097, 704, 1533, 506, 1727, 691, 1336, \n 506, 1530, 691, 1533, 715, 1727, 900, 1336, 715, 1530, 900, 1334, 84, \n 1519, 278, 1334, 281, 1519, 475, 1541, 85, 1726, 279, 1541, 282, 1726, \n 476, 1108, 103, 1302, 288, 911, 103, 1105, 288, 1108, 311, 1302, 496, \n 911, 311, 1105, 496, 706, 460, 900, 645, 509, 460, 703, 645, 706, 666, \n 900, 851, 509, 666, 703, 851, 507, 38, 692, 232, 507, 235, 692, 429, \n 713, 38, 898, 232, 713, 235, 898, 429, 82, 241, 276, 426, 279, 241, 473,\n 426, 82, 37, 276, 222, 279, 37, 473, 222, 103, 459, 288, 653, 103, 656,\n 288, 850, 310, 460, 495, 654, 310, 657, 495, 851, 460, 862, 645, 1056, \n 460, 1059, 645, 1253, 666, 863, 851, 1057, 666, 1060, 851, 1254, 38, \n 1070, 232, 1255, 235, 1070, 429, 1255, 38, 864, 232, 1049, 235, 864, \n 429, 1049, 242, 1484, 427, 1678, 242, 1287, 427, 1481, 37, 1484, 222, \n 1678, 37, 1287, 222, 1481, 458, 1475, 652, 1660, 655, 1475, 849, 1660, \n 459, 1267, 653, 1452, 656, 1267, 850, 1452])\n', (61950, 63423), False, 'from scitbx.array_family import flex\n'), ((63829, 64039), 'scitbx.array_family.flex.int', 'flex.int', (['[3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0,\n 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,\n 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3]'], {}), '([3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,\n 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0,\n 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3])\n', (63837, 64039), False, 'from scitbx.array_family import flex\n'), ((64489, 64690), 'scitbx.array_family.flex.int', 'flex.int', (['[112, 189, 622, 1212, 647, 188, 1156, 1212, 1180, 140, 1691, 1163, 1714, \n 140, 2226, 1163, 159, 1231, 671, 2254, 694, 1230, 1206, 2253, 1229, \n 1180, 1740, 2203, 1762, 1180, 2274, 2202]'], {}), '([112, 189, 622, 1212, 647, 188, 1156, 1212, 1180, 140, 1691, 1163,\n 1714, 140, 2226, 1163, 159, 1231, 671, 2254, 694, 1230, 1206, 2253, \n 1229, 1180, 1740, 2203, 1762, 1180, 2274, 2202])\n', (64497, 64690), False, 'from scitbx.array_family import flex\n'), ((64759, 64793), 'scitbx.array_family.flex.int', 'flex.int', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (64767, 64793), False, 'from scitbx.array_family import flex\n'), ((5724, 5732), 'six.moves.range', 'range', (['(8)'], {}), '(8)\n', (5729, 5732), False, 'from six.moves import range\n'), ((10687, 10717), 'numpy.rot90', 'numpy.rot90', (['data[:, :, s]', '(-1)'], {}), '(data[:, :, s], -1)\n', (10698, 10717), False, 'import numpy\n'), ((11147, 11162), 'psana.Source', 'Source', (['address'], {}), '(address)\n', (11153, 11162), False, 'from psana import Source, Bld\n'), ((14332, 14385), 'numpy.zeros', 'numpy.zeros', (['(3, s_data.shape[0])'], {'dtype': 's_data.dtype'}), '((3, s_data.shape[0]), dtype=s_data.dtype)\n', (14343, 14385), False, 'import numpy\n'), ((14401, 14440), 'numpy.vstack', 'numpy.vstack', (['(asics[0], gap, asics[1])'], {}), '((asics[0], gap, asics[1]))\n', (14413, 14440), False, 'import numpy\n'), ((15125, 15133), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (15130, 15133), False, 'from six.moves import range\n'), ((24193, 24215), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (24206, 24215), False, 'import os\n'), ((29336, 29345), 'getpass.getuser', 'getuser', ([], {}), '()\n', (29343, 29345), False, 'from getpass import getuser\n'), ((29488, 29512), 'psana.Source', 'Source', (['"""BldInfo(EBeam)"""'], {}), "('BldInfo(EBeam)')\n", (29494, 29512), False, 'from psana import Source, Bld\n'), ((41768, 41785), 'time.gmtime', 'time.gmtime', (['t[0]'], {}), '(t[0])\n', (41779, 41785), False, 'import time\n'), ((42390, 42403), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (42400, 42403), False, 'import math\n'), ((43200, 43217), 'time.gmtime', 'time.gmtime', (['t[0]'], {}), '(t[0])\n', (43211, 43217), False, 'import time\n'), ((55189, 55210), 'math.floor', 'math.floor', (['(-hsize[0])'], {}), '(-hsize[0])\n', (55199, 55210), False, 'import math\n'), ((55275, 55296), 'math.floor', 'math.floor', (['(-hsize[1])'], {}), '(-hsize[1])\n', (55285, 55296), False, 'import math\n'), ((9437, 9451), 'scitbx.array_family.flex.int', 'flex.int', (['c[0]'], {}), '(c[0])\n', (9445, 9451), False, 'from scitbx.array_family import flex\n'), ((9469, 9483), 'scitbx.array_family.flex.int', 'flex.int', (['c[1]'], {}), '(c[1])\n', (9477, 9483), False, 'from scitbx.array_family import flex\n'), ((14292, 14315), 'numpy.rot90', 'numpy.rot90', (['s_data', '(-1)'], {}), '(s_data, -1)\n', (14303, 14315), False, 'import numpy\n'), ((15287, 15324), 'numpy.insert', 'numpy.insert', (['data3d', 'i', 'zsec'], {'axis': '(0)'}), '(data3d, i, zsec, axis=0)\n', (15299, 15324), False, 'import numpy\n'), ((37099, 37135), 'time.strptime', 'strptime', (['"""2012-11-12, 17:00 UTC"""', 'f'], {}), "('2012-11-12, 17:00 UTC', f)\n", (37107, 37135), False, 'from time import strptime\n'), ((37758, 37766), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (37763, 37766), False, 'from six.moves import range\n'), ((55230, 55250), 'math.ceil', 'math.ceil', (['(+hsize[0])'], {}), '(+hsize[0])\n', (55239, 55250), False, 'import math\n'), ((55316, 55336), 'math.ceil', 'math.ceil', (['(+hsize[1])'], {}), '(+hsize[1])\n', (55325, 55336), False, 'import math\n'), ((8136, 8174), 'scitbx.array_family.flex.int', 'flex.int', (['[UL1_y, UL1_x, LR1_y, LR1_x]'], {}), '([UL1_y, UL1_x, LR1_y, LR1_x])\n', (8144, 8174), False, 'from scitbx.array_family import flex\n'), ((8194, 8232), 'scitbx.array_family.flex.int', 'flex.int', (['[UL2_y, UL2_x, LR2_y, LR2_x]'], {}), '([UL2_y, UL2_x, LR2_y, LR2_x])\n', (8202, 8232), False, 'from scitbx.array_family import flex\n'), ((31342, 31350), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (31347, 31350), False, 'from six.moves import range\n'), ((37172, 37208), 'time.strptime', 'strptime', (['"""2012-11-17, 17:00 UTC"""', 'f'], {}), "('2012-11-17, 17:00 UTC', f)\n", (37180, 37208), False, 'from time import strptime\n'), ((49197, 49251), 'numpy.empty', 'numpy.empty', (['(4, npix_quad, npix_quad)'], {'dtype': '"""uint16"""'}), "((4, npix_quad, npix_quad), dtype='uint16')\n", (49208, 49251), False, 'import numpy\n'), ((52978, 53004), 'numpy.rot90', 'numpy.rot90', (['s_data', '(0 + 2)'], {}), '(s_data, 0 + 2)\n', (52989, 53004), False, 'import numpy\n'), ((55821, 55834), 'math.ceil', 'math.ceil', (['xp'], {}), '(xp)\n', (55830, 55834), False, 'import math\n'), ((55882, 55895), 'math.ceil', 'math.ceil', (['yp'], {}), '(yp)\n', (55891, 55895), False, 'import math\n'), ((37270, 37306), 'time.strptime', 'strptime', (['"""2012-11-20, 17:00 UTC"""', 'f'], {}), "('2012-11-20, 17:00 UTC', f)\n", (37278, 37306), False, 'from time import strptime\n'), ((53217, 53243), 'numpy.rot90', 'numpy.rot90', (['s_data', '(1 + 2)'], {}), '(s_data, 1 + 2)\n', (53228, 53243), False, 'import numpy\n'), ((55939, 55953), 'math.floor', 'math.floor', (['xp'], {}), '(xp)\n', (55949, 55953), False, 'import math\n'), ((55960, 55973), 'math.ceil', 'math.ceil', (['xp'], {}), '(xp)\n', (55969, 55973), False, 'import math\n'), ((56001, 56015), 'math.floor', 'math.floor', (['yp'], {}), '(yp)\n', (56011, 56015), False, 'import math\n'), ((56022, 56035), 'math.ceil', 'math.ceil', (['yp'], {}), '(yp)\n', (56031, 56035), False, 'import math\n'), ((49370, 49408), 'numpy.hstack', 'numpy.hstack', (['(qimages[0], qimages[1])'], {}), '((qimages[0], qimages[1]))\n', (49382, 49408), False, 'import numpy\n'), ((49437, 49475), 'numpy.hstack', 'numpy.hstack', (['(qimages[3], qimages[2])'], {}), '((qimages[3], qimages[2]))\n', (49449, 49475), False, 'import numpy\n'), ((53426, 53452), 'numpy.rot90', 'numpy.rot90', (['s_data', '(2 + 2)'], {}), '(s_data, 2 + 2)\n', (53437, 53452), False, 'import numpy\n'), ((53635, 53661), 'numpy.rot90', 'numpy.rot90', (['s_data', '(3 + 2)'], {}), '(s_data, 3 + 2)\n', (53646, 53661), False, 'import numpy\n')] |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#author: wu.zheng midday.me
import os
import json
import cv2
import random
import numpy as np
import time
from dataset import image_augmation
from dataset import data_util
import xml.etree.ElementTree as ET
IMAGE_WIDTH=1024
IMAGE_HEIGHT=1024
NUM_CLASS=3
def load_image_list(image_data_path):
image_path_list = []
with open(image_data_path) as f:
for _, line in enumerate(f):
line = line.strip("\n")
image_path_list.append(line)
return image_path_list
def get_int_value(item, name):
return int(item.get(name))
def get_item_box(item):
x = get_int_value(item, 'left')
y = get_int_value(item, 'top')
w = get_int_value(item, 'width')
h = get_int_value(item, 'height')
return x, y, w, h
def load_label_data(label_file_path):
tree = ET.parse(label_file_path)
root = tree.getroot()
label_data = {}
image_list= root.findall('image')
text_list= root.findall('text')
label_data['image_path']=root.get("image_path")
page_height = int(root.get('height'))
page_width = int(root.get('width'))
label_data['width'] = page_width
label_data['height'] = page_height
label_data['images'] = []
label_data['texts'] = []
for image in image_list:
box = get_item_box(image)
label_data['images'].append(box)
for text in text_list:
box = get_item_box(text)
label_data['texts'].append(box)
return label_data
def get_shape_by_type(shapes, label):
shape_list = []
for shape in shapes:
if shape['label'] == label:
shape_list.append(shape)
return shape_list
def fill_image(label_image, boxs, label_value, w_factor, h_factor):
for box in boxs:
x, y, w, h = box
min_x, min_y = x, y
max_x, max_y = x + w , y + h
area = (max_x - min_x) * (max_y - min_y)
point_box = [(min_x, min_y), (max_x, min_y), (max_x, max_y), (min_x, max_y)]
point_box = np.array(point_box)
point_box = point_box.reshape((4,2))
point_box[:,0] = point_box[:,0] * w_factor
point_box[:,1] = point_box[:,1] * h_factor
label_image = cv2.fillPoly(label_image, point_box.astype(np.int32)[np.newaxis, :,: ], label_value)
return label_image
def data_generator(list_path, image_dir, batch_size, mode='train'):
label_file_list = load_image_list(list_path)
print("example size:", len(label_file_list))
image_batch = []
label_batch = []
mask_batch = []
xml_path_batch = []
scal_list=[0.3, 0.5, 1.0, 2.0, 3.0]
while True:
random.shuffle(label_file_list)
for xml_path in label_file_list:
xml_path = os.path.join(image_dir, xml_path)
label_data=load_label_data(xml_path)
image_path = os.path.join(image_dir, label_data['image_path'])
image = cv2.imread(image_path)
image_labels = np.array(label_data['images'])
text_labels = np.array(label_data['texts'])
# todo 图像增强
#aug_image = image_augmation.image_aug(image.copy())
aug_image = image.copy()
#rd_scale = np.random.choice(scal_list, 1)
##rd_scale = 1.0
#r_image = cv2.resize(aug_image, dsize=None, fx=rd_scale, fy=rd_scale)
#image_labels = image_labels * rd_scale
#text_labels = text_labels * rd_scale
r_image = aug_image
if image is None:
continue
#h, w = image.shape[:2]
h = label_data['height']
w = label_data['width']
image_h, image_w = r_image.shape[:2]
h_ratio = image_h / h
w_ratio = image_w / w
if image_h > image_w:
factor = IMAGE_HEIGHT / image_h
new_h = IMAGE_HEIGHT
new_w = int(image_w * factor)
else:
factor = IMAGE_WIDTH / image_w
new_w = IMAGE_WIDTH
new_h = int(image_h * factor)
# todo resize
w_factor = new_w / w
h_factor = new_h / h
r_image = cv2.resize(r_image, (new_w, new_h))
label_image = np.zeros((new_h, new_w))
mask = np.ones((new_h, new_w))
label_image = fill_image(label_image, image_labels , 1, w_factor, h_factor)
label_image = fill_image(label_image, text_labels, 2, w_factor, h_factor)
train_image = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, 3))
train_image[0:new_h, 0:new_w] = r_image
train_label = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH))
train_label[0:new_h, 0:new_w] = label_image
mask = np.ones((new_h, new_w))
train_mask = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH))
train_mask[0:new_h, 0:new_w] = mask
label_batch.append(train_label)
train_image = train_image / 255.0
image_batch.append(train_image)
mask_batch.append(train_mask)
xml_path_batch.append(xml_path)
if len(image_batch) == batch_size:
yield image_batch, label_batch, mask_batch, xml_path_batch
image_batch = []
label_batch = []
mask_batch = []
xml_path_batch = []
if mode!='train':
break
def get_batch(list_dir, image_dir, batch_size, mode='train', workers=1, max_queue_size=32):
enqueuer = data_util.GeneratorEnqueuer(data_generator(list_dir, image_dir, batch_size, mode))
enqueuer.start(max_queue_size=max_queue_size, workers=workers)
enqueuer.is_running()
generator_output = None
while True:
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(0.01)
yield generator_output
generator_output = None
import random
def get_random_color():
color = (int(random.random()*255), int(random.random()*255), int(random.random()*255))
return color
def mask_to_bbox(mask, im, num_class, out_path=None, out_file_name=None):
bbox_list = []
mask = mask.astype(np.uint8)
for i in range(1, num_class, 1):
c_bbox_list = []
c_mask = np.zeros_like(mask)
c_mask[np.where(mask==i)] = 255
contours, hierarchy = cv2.findContours(c_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
color = get_random_color()
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 50:
continue
epsilon = 0.005 * cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
(x, y, w, h) = cv2.boundingRect(approx)
c_bbox_list.append([x, y, x+w, y+h])
if out_path is not None:
cv2.putText(im, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
im=cv2.rectangle(im, pt1=(x, y), pt2=(x+w, y+h), color=color, thickness=1)
bbox_list.append(c_bbox_list)
if out_path is not None:
outf = os.path.join(out_path, out_file_name)
print(outf)
cv2.imwrite(outf, im)
return bbox_list
| [
"cv2.rectangle",
"time.sleep",
"numpy.array",
"cv2.approxPolyDP",
"xml.etree.ElementTree.parse",
"numpy.where",
"cv2.arcLength",
"cv2.contourArea",
"random.shuffle",
"numpy.ones",
"cv2.resize",
"cv2.imread",
"cv2.imwrite",
"os.path.join",
"numpy.zeros",
"cv2.findContours",
"random.ra... | [((813, 838), 'xml.etree.ElementTree.parse', 'ET.parse', (['label_file_path'], {}), '(label_file_path)\n', (821, 838), True, 'import xml.etree.ElementTree as ET\n'), ((1884, 1903), 'numpy.array', 'np.array', (['point_box'], {}), '(point_box)\n', (1892, 1903), True, 'import numpy as np\n'), ((2461, 2492), 'random.shuffle', 'random.shuffle', (['label_file_list'], {}), '(label_file_list)\n', (2475, 2492), False, 'import random\n'), ((5737, 5756), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (5750, 5756), True, 'import numpy as np\n'), ((5819, 5887), 'cv2.findContours', 'cv2.findContours', (['c_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(c_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (5835, 5887), False, 'import cv2\n'), ((6489, 6526), 'os.path.join', 'os.path.join', (['out_path', 'out_file_name'], {}), '(out_path, out_file_name)\n', (6501, 6526), False, 'import os\n'), ((6547, 6568), 'cv2.imwrite', 'cv2.imwrite', (['outf', 'im'], {}), '(outf, im)\n', (6558, 6568), False, 'import cv2\n'), ((2547, 2580), 'os.path.join', 'os.path.join', (['image_dir', 'xml_path'], {}), '(image_dir, xml_path)\n', (2559, 2580), False, 'import os\n'), ((2643, 2692), 'os.path.join', 'os.path.join', (['image_dir', "label_data['image_path']"], {}), "(image_dir, label_data['image_path'])\n", (2655, 2692), False, 'import os\n'), ((2707, 2729), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2717, 2729), False, 'import cv2\n'), ((2751, 2781), 'numpy.array', 'np.array', (["label_data['images']"], {}), "(label_data['images'])\n", (2759, 2781), True, 'import numpy as np\n'), ((2803, 2832), 'numpy.array', 'np.array', (["label_data['texts']"], {}), "(label_data['texts'])\n", (2811, 2832), True, 'import numpy as np\n'), ((3787, 3822), 'cv2.resize', 'cv2.resize', (['r_image', '(new_w, new_h)'], {}), '(r_image, (new_w, new_h))\n', (3797, 3822), False, 'import cv2\n'), ((3843, 3867), 'numpy.zeros', 'np.zeros', (['(new_h, new_w)'], {}), '((new_h, new_w))\n', (3851, 3867), True, 'import numpy as np\n'), ((3881, 3904), 'numpy.ones', 'np.ones', (['(new_h, new_w)'], {}), '((new_h, new_w))\n', (3888, 3904), True, 'import numpy as np\n'), ((4089, 4129), 'numpy.zeros', 'np.zeros', (['(IMAGE_HEIGHT, IMAGE_WIDTH, 3)'], {}), '((IMAGE_HEIGHT, IMAGE_WIDTH, 3))\n', (4097, 4129), True, 'import numpy as np\n'), ((4196, 4233), 'numpy.zeros', 'np.zeros', (['(IMAGE_HEIGHT, IMAGE_WIDTH)'], {}), '((IMAGE_HEIGHT, IMAGE_WIDTH))\n', (4204, 4233), True, 'import numpy as np\n'), ((4298, 4321), 'numpy.ones', 'np.ones', (['(new_h, new_w)'], {}), '((new_h, new_w))\n', (4305, 4321), True, 'import numpy as np\n'), ((4341, 4378), 'numpy.zeros', 'np.zeros', (['(IMAGE_HEIGHT, IMAGE_WIDTH)'], {}), '((IMAGE_HEIGHT, IMAGE_WIDTH))\n', (4349, 4378), True, 'import numpy as np\n'), ((5768, 5787), 'numpy.where', 'np.where', (['(mask == i)'], {}), '(mask == i)\n', (5776, 5787), True, 'import numpy as np\n'), ((5957, 5977), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (5972, 5977), False, 'import cv2\n'), ((6078, 6114), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', 'epsilon', '(True)'], {}), '(cnt, epsilon, True)\n', (6094, 6114), False, 'import cv2\n'), ((6134, 6158), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (6150, 6158), False, 'import cv2\n'), ((5328, 5344), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (5338, 5344), False, 'import time\n'), ((5456, 5471), 'random.random', 'random.random', ([], {}), '()\n', (5469, 5471), False, 'import random\n'), ((5482, 5497), 'random.random', 'random.random', ([], {}), '()\n', (5495, 5497), False, 'import random\n'), ((5508, 5523), 'random.random', 'random.random', ([], {}), '()\n', (5521, 5523), False, 'import random\n'), ((6039, 6063), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (6052, 6063), False, 'import cv2\n'), ((6345, 6420), 'cv2.rectangle', 'cv2.rectangle', (['im'], {'pt1': '(x, y)', 'pt2': '(x + w, y + h)', 'color': 'color', 'thickness': '(1)'}), '(im, pt1=(x, y), pt2=(x + w, y + h), color=color, thickness=1)\n', (6358, 6420), False, 'import cv2\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
dataset = np.array([
[1, 2],
[1.5, 2],
[3, 1],
[12, 10],
[9, 12],
[7, 11]
])
class KMeans(object):
def __init__(self, n_cluster=1, tolerance=.001, max_iteration=300):
self.n_cluster = n_cluster
self.tolerance = tolerance
self.max_iteration = max_iteration
self.classification = {}
self.centroids = {}
self._iterated = 0
def fit(self, data):
# Random
for i in range(self.n_cluster):
self.centroids[i] = data[i]
# Classify data
for i in range(self.max_iteration):
self._iterated += 1
self.classification = {}
for ii in range(self.n_cluster):
self.classification[ii] = []
# Iterate over dataset and classify it for current centroids
for featureset in data:
distances = []
for centroid in self.centroids:
distances.append(np.linalg.norm(featureset - self.centroids[centroid]))
# Get the distance from closest centroid
closest_dist = min(distances)
# Get the index (centroid index)
classification = distances.index(closest_dist)
# Add current featureset to an centroid array
self.classification[classification].append(featureset)
prev_centroid = dict(self.centroids)
optimized = True
for classification in self.classification:
'''
So we should get average of X and Y coordinates
Xavg = (X1 + X2 + X3 + ...Xn) / n
Yavg = (Y1 + Y2 + Y3 + ...Yn) / n
^
| x1
|---*
|x2
|- *
|x3
|--*
0--------------------->
|
'''
self.centroids[classification] = np.average(self.classification[classification], axis=0)
for centroid in self.centroids:
orig_cent = prev_centroid[centroid]
curr_cent = self.centroids[centroid]
if np.sum((curr_cent - orig_cent) / orig_cent * 100.0) > self.tolerance:
optimized = False
if optimized:
break
clf = KMeans(n_cluster=2, max_iteration=10000)
clf.fit(dataset)
colors = ['g', 'r']
for centroid in clf.centroids:
plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1])
for classification in clf.classification:
plt.scatter(clf.classification[classification][0], clf.classification[classification][1], color=colors[classification])
plt.show() | [
"numpy.average",
"numpy.array",
"numpy.sum",
"matplotlib.style.use",
"matplotlib.pyplot.scatter",
"numpy.linalg.norm",
"matplotlib.pyplot.show"
] | [((81, 100), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (90, 100), False, 'from matplotlib import style\n'), ((112, 176), 'numpy.array', 'np.array', (['[[1, 2], [1.5, 2], [3, 1], [12, 10], [9, 12], [7, 11]]'], {}), '([[1, 2], [1.5, 2], [3, 1], [12, 10], [9, 12], [7, 11]])\n', (120, 176), True, 'import numpy as np\n'), ((2856, 2866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2864, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2620, 2687), 'matplotlib.pyplot.scatter', 'plt.scatter', (['clf.centroids[centroid][0]', 'clf.centroids[centroid][1]'], {}), '(clf.centroids[centroid][0], clf.centroids[centroid][1])\n', (2631, 2687), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2859), 'matplotlib.pyplot.scatter', 'plt.scatter', (['clf.classification[classification][0]', 'clf.classification[classification][1]'], {'color': 'colors[classification]'}), '(clf.classification[classification][0], clf.classification[\n classification][1], color=colors[classification])\n', (2746, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2110, 2165), 'numpy.average', 'np.average', (['self.classification[classification]'], {'axis': '(0)'}), '(self.classification[classification], axis=0)\n', (2120, 2165), True, 'import numpy as np\n'), ((2336, 2387), 'numpy.sum', 'np.sum', (['((curr_cent - orig_cent) / orig_cent * 100.0)'], {}), '((curr_cent - orig_cent) / orig_cent * 100.0)\n', (2342, 2387), True, 'import numpy as np\n'), ((1098, 1151), 'numpy.linalg.norm', 'np.linalg.norm', (['(featureset - self.centroids[centroid])'], {}), '(featureset - self.centroids[centroid])\n', (1112, 1151), True, 'import numpy as np\n')] |
from pathlib2 import Path
import pathlib2
import os
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
PROJECT_DIR = Path(__file__).resolve().parents[1]
DATA_DIR = PROJECT_DIR / "data"
MODEL_DIR = PROJECT_DIR / "models"
FIGURE_DIR = PROJECT_DIR / "figures"
def ensure_dir(file_path):
""" create a safely nested folder
"""
if type(file_path) == str:
if "." in os.path.basename(os.path.normpath(file_path)):
directory = os.path.dirname(file_path)
else:
directory = os.path.normpath(file_path)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except FileExistsError as e:
# multiprocessing can cause directory creation problems
print(e)
elif type(file_path) == pathlib2.PosixPath:
# if this is a file
if len(file_path.suffix) > 0:
file_path.parent.mkdir(parents=True, exist_ok=True)
else:
file_path.mkdir(parents=True, exist_ok=True)
def most_recent_subdirectory(dataset_loc):
""" return the subdirectory that has been generated most
recently with the "%Y-%m-%d_%H-%M-%S" time scheme used in AVGN
"""
subdir_list = list((dataset_loc).iterdir())
directory_dates = [
datetime.strptime(i.name, "%Y-%m-%d_%H-%M-%S") for i in subdir_list
]
return subdir_list[np.argsort(directory_dates)[-1]]
def save_fig(
loc,
dpi=300,
save_pdf=False,
save_svg=False,
save_png=False,
save_jpg=True,
pad_inches=0.0,
):
if save_pdf:
plt.savefig(
str(loc) + ".pdf", dpi=dpi, bbox_inches="tight", pad_inches=pad_inches
)
if save_svg:
plt.savefig(
str(loc) + ".svg",
dpi=dpi,
bbox_inches="tight",
pad_inches=pad_inches,
transparent=True,
)
if save_png:
plt.savefig(
str(loc) + ".png",
dpi=dpi,
bbox_inches="tight",
pad_inches=pad_inches,
transparent=True,
)
if save_jpg:
plt.savefig(
str(loc) + ".jpg",
dpi=int(dpi / 2),
bbox_inches="tight",
pad_inches=pad_inches,
)
| [
"os.path.exists",
"os.makedirs",
"datetime.datetime.strptime",
"os.path.normpath",
"os.path.dirname",
"numpy.argsort",
"pathlib2.Path"
] | [((1323, 1369), 'datetime.datetime.strptime', 'datetime.strptime', (['i.name', '"""%Y-%m-%d_%H-%M-%S"""'], {}), "(i.name, '%Y-%m-%d_%H-%M-%S')\n", (1340, 1369), False, 'from datetime import datetime\n'), ((483, 509), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (498, 509), False, 'import os\n'), ((548, 575), 'os.path.normpath', 'os.path.normpath', (['file_path'], {}), '(file_path)\n', (564, 575), False, 'import os\n'), ((591, 616), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (605, 616), False, 'import os\n'), ((1420, 1447), 'numpy.argsort', 'np.argsort', (['directory_dates'], {}), '(directory_dates)\n', (1430, 1447), True, 'import numpy as np\n'), ((148, 162), 'pathlib2.Path', 'Path', (['__file__'], {}), '(__file__)\n', (152, 162), False, 'from pathlib2 import Path\n'), ((429, 456), 'os.path.normpath', 'os.path.normpath', (['file_path'], {}), '(file_path)\n', (445, 456), False, 'import os\n'), ((651, 673), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (662, 673), False, 'import os\n')] |
import numpy as np
from sklearn.neighbors import BallTree, KDTree
import os
from baselines.deepq.experiments.atari.lru_knn_gpu_bp import LRU_KNN_GPU_BP
import gc
from baselines.deepq.experiments.atari.knn_cuda_fixmem import knn as knn_cuda_fixmem
import copy
class LRU_KNN_COMBINE_BP(object):
def __init__(self, num_actions, buffer_size, latent_dim, hash_dim, gamma=0.99, bp=True):
self.ec_buffer = []
self.num_actions = num_actions
self.gamma = gamma
self.rmax = 100000
self.bp = bp
for i in range(num_actions):
self.ec_buffer.append(LRU_KNN_GPU_BP(buffer_size, latent_dim, hash_dim, 'game'))
def add(self, action, key, value, reward, done, brothers):
buffer = self.ec_buffer[action]
if buffer.curr_capacity >= buffer.capacity:
# find the LRU entry
index = np.argmin(buffer.lru)
self.ec_buffer[action].prev_id[index] = []
self.ec_buffer[action].internal_value[index] = 0
for bro in self.ec_buffer[action].brothers[index]:
self.ec_buffer[bro[1]].brothers[bro[0]].remove((index, action))
self.ec_buffer[action].brothers[index] = []
else:
index = buffer.curr_capacity
buffer.curr_capacity += 1
buffer.states[index] = key
for a in range(len(brothers)):
if brothers[a] > 0:
buffer.brothers[index].append((brothers[a], a))
self.ec_buffer[a].brothers[brothers[a]].append((index, action))
buffer.external_value[index] = value
buffer.reward[index] = reward
buffer.done[index] = done
buffer.lru[index] = buffer.tm
buffer.tm += 0.01
# print("here")
# print(action, index, buffer.capacity)
# print(buffer.address)
key = np.array(key, copy=True)
# key = copy.deepcopy(key)
# print(key.shape)
knn_cuda_fixmem.add(buffer.address, index, key)
return index
def act_value(self, keys, action, knn, bp):
return self.ec_buffer[action].act_value(keys, knn, bp)
def ancestor(self, state, action):
sao_pair = copy.deepcopy(self.ec_buffer[action].prev_id[state])
for bro in self.ec_buffer[action].brothers[state]:
sao_pair_bro = self.ec_buffer[bro[1]].prev_id[bro[0]]
for pair in sao_pair_bro:
if pair not in sao_pair:
sao_pair.append(pair)
# print("bro", (state, action))
# print(self.ec_buffer[action].prev_id[state])
# print(sa_pair)
sa_pair = [(pair[0], pair[1]) for pair in sao_pair]
return sa_pair
def intrinsic_value(self, state, action):
sa_pair = self.ec_buffer[action].brothers[state]
sa_pair.append((state, action))
actions = np.unique([a for s, a in sa_pair])
# actions =
if len(actions) < self.num_actions:
return 0
intrinsic_values = [self.ec_buffer[a].internal_value[s] for s, a in sa_pair]
return np.max(intrinsic_values)
def reward_update(self, stack):
while len(stack) > 0:
s0, s, a, r_i, r_e, d, r_loop = stack.pop(-1)
old_r_e = copy.deepcopy(self.ec_buffer[a].external_value[s])
# old_r_i = self.ec_buffer[a].internal_value[s]
r = self.ec_buffer[a].reward[s]
if self.bp:
if s == s0 and d > 0:
# print("loop update", s, a, (r + self.gamma * r_loop) / (1 - self.gamma ** d),
# self.ec_buffer[a].external_value[s])
self.ec_buffer[a].external_value[s] = max(self.ec_buffer[a].external_value[s],
(r + self.gamma * r_loop) / (1 - self.gamma ** d))
d = 0
r_loop = 0
self.ec_buffer[a].external_value[s] = max(self.ec_buffer[a].external_value[s], r + self.gamma * r_e)
# self.ec_buffer[a].internal_value[s] = min(self.ec_buffer[a].internal_value[s], r_i)
if self.ec_buffer[a].external_value[s] > (old_r_e + 1e-7):
# r_i = max([buffer.internal_value[s] for buffer in self.ec_buffer])
# r_i = 0 if len(self.ancestor(s, a)) > 1 else r_i
print("extrinsic update", s, a, self.ec_buffer[a].external_value[s], old_r_e)
if d > 0:
r_loop = r_loop * self.gamma + r
for sa_pair in self.ancestor(s, a):
state_tm1, action_tm1 = sa_pair
stack.append((s0, state_tm1, action_tm1, r_i, self.ec_buffer[a].external_value[s], d + 1, r_loop))
def intrinsic_reward_update_iter(self, stack):
while len(stack) > 0:
s, a, v = stack.pop(-1)
old_v_i = self.intrinsic_value(s, a)
self.ec_buffer[a].internal_value[s] = min(v, self.ec_buffer[a].internal_value[s])
v_i = self.intrinsic_value(s, a)
if v_i < old_v_i and v_i < -self.rmax:
for sa_pair in self.ancestor(s, a):
s_prev, a_prev = sa_pair
stack.append((s_prev, a_prev, v_i))
def get_order(self, state, action, state_tp1, action_tp1):
sao_pair = copy.deepcopy(self.ec_buffer[action_tp1].prev_id[state_tp1])
for bro in self.ec_buffer[action_tp1].brothers[state_tp1]:
sao_pair_bro = self.ec_buffer[bro[1]].prev_id[bro[0]]
for pair in sao_pair_bro:
if pair not in sao_pair:
sao_pair.append(pair)
for s, a, order in sao_pair:
if s == state and a == action:
return order
return -1
def intrinsic_reward_update(self, sa_pair, sa_pair_tm1,debug=True):
z_t, action_t, reward_t, z_tp1, done_t = sa_pair
index = self.peek(z_t)
if debug:
print("intrinsic_reward_update_t", index)
if index[action_t] < 0:
ind_t = self.add(action_t, z_t, 0, reward_t, done_t, index)
print("add",ind_t,action_t)
else:
ind_t = index[action_t]
prev_s, prev_a = sa_pair_tm1
if (prev_s, prev_a) not in self.ancestor(ind_t, action_t):
order = len(self.ancestor(ind_t, action_t))
self.ec_buffer[action_t].prev_id[ind_t].append((prev_s, prev_a, order))
index_tp1 = self.peek(z_tp1)
state_tp1, action_tp1 = np.max(index_tp1), np.argmax(index_tp1)
if np.sqrt(np.sum(np.square(z_t - z_tp1))) < 1e-7:
# self loop
if debug:
print("self loop")
diminish_reward = -2 * self.rmax
elif state_tp1 > 0:
order = self.get_order(ind_t, action_t, state_tp1, action_tp1)
ancestors = self.ancestor(state_tp1, action_tp1)
if (state_tp1, action_tp1) in ancestors:
# remove self loop
ancestors.remove((state_tp1, action_tp1))
if debug:
print(ancestors)
print("intrinsic update tp1 s{} ,a {},len(ancestors) {},order {}".format(state_tp1, action_tp1, len(ancestors), order), flush=True)
diminish_reward = -2 * self.rmax if (len(ancestors) > 1 and order != 0) else 0
diminish_reward = -2 * self.rmax if done_t else diminish_reward
else:
diminish_reward = -2 * self.rmax if done_t else 0
if diminish_reward < 0:
stack = [(ind_t, action_t, diminish_reward)]
self.intrinsic_reward_update_iter(stack)
return ind_t, action_t
def peek(self, state):
index = []
for a in range(self.num_actions):
ind = self.ec_buffer[a].peek(state)
index.append(ind)
return index
def state_value(self, state):
# TODO:optimize this using brothers
act_values = []
index = self.peek(state)
for a in range(self.num_actions):
ind = index[a]
if ind > 0:
act_values.append(self.ec_buffer[a].external_value[ind])
else:
act_values.append(-self.rmax)
return np.max(act_values)
def update_sequence(self, sequence, debug):
Rtn = [0]
state_index = []
peek_count = 0
for s, a, r, sp, done in reversed(sequence):
if done or not self.bp:
rtn = self.gamma * Rtn[-1] + r
else:
rtn = max(self.gamma * Rtn[-1] + r, self.gamma * self.state_value(sp) + r)
Rtn.append(rtn)
index = self.peek(s)
ind = index[a]
if ind < 0:
if debug:
print("wierd")
ind = self.add(a, s, rtn, r, done, index)
else:
peek_count += 1
if self.ec_buffer[a].newly_added[ind]:
if debug:
print("sequence update new", ind, a, self.ec_buffer[a].external_value[ind], rtn)
self.ec_buffer[a].external_value[ind] = rtn
self.ec_buffer[a].newly_added[ind] = False
else:
if debug:
print("sequence update", ind, a, max(self.ec_buffer[a].external_value[ind], rtn),
self.ec_buffer[a].external_value[ind])
self.ec_buffer[a].external_value[ind] = max(self.ec_buffer[a].external_value[ind], rtn)
# self.ec_buffer[a].internal_value[ind] = min(self.ec_buffer[a].internal_value[ind], - done * self.rmax)
state_index.append(ind)
# prev_s, prev_a = None, None
# for i, sample in enumerate(sequence):
# s, a, r, sp, done = sample
# ind = state_index[-i - 1]
# if prev_s is not None and (prev_s, prev_a) not in self.ec_buffer[a].prev_id[ind]:
# self.ec_buffer[a].prev_id[ind].append((prev_s, prev_a))
# prev_s, prev_a = ind, a
print("peek count", peek_count / len(sequence))
if self.bp:
stack = []
Rtn.pop()
for i in range(len(sequence)):
rtn = Rtn.pop()
_, a, r, _, done = sequence[i]
s = state_index[-i - 1]
# print("put ",s,a)
stack.append((s, s, a, (1 - done) * self.rmax, rtn, 0, 0))
self.reward_update(stack)
| [
"numpy.unique",
"numpy.argmax",
"numpy.max",
"baselines.deepq.experiments.atari.knn_cuda_fixmem.knn.add",
"numpy.array",
"numpy.square",
"copy.deepcopy",
"numpy.argmin",
"baselines.deepq.experiments.atari.lru_knn_gpu_bp.LRU_KNN_GPU_BP"
] | [((1849, 1873), 'numpy.array', 'np.array', (['key'], {'copy': '(True)'}), '(key, copy=True)\n', (1857, 1873), True, 'import numpy as np\n'), ((1944, 1991), 'baselines.deepq.experiments.atari.knn_cuda_fixmem.knn.add', 'knn_cuda_fixmem.add', (['buffer.address', 'index', 'key'], {}), '(buffer.address, index, key)\n', (1963, 1991), True, 'from baselines.deepq.experiments.atari.knn_cuda_fixmem import knn as knn_cuda_fixmem\n'), ((2185, 2237), 'copy.deepcopy', 'copy.deepcopy', (['self.ec_buffer[action].prev_id[state]'], {}), '(self.ec_buffer[action].prev_id[state])\n', (2198, 2237), False, 'import copy\n'), ((2849, 2883), 'numpy.unique', 'np.unique', (['[a for s, a in sa_pair]'], {}), '([a for s, a in sa_pair])\n', (2858, 2883), True, 'import numpy as np\n'), ((3069, 3093), 'numpy.max', 'np.max', (['intrinsic_values'], {}), '(intrinsic_values)\n', (3075, 3093), True, 'import numpy as np\n'), ((5320, 5380), 'copy.deepcopy', 'copy.deepcopy', (['self.ec_buffer[action_tp1].prev_id[state_tp1]'], {}), '(self.ec_buffer[action_tp1].prev_id[state_tp1])\n', (5333, 5380), False, 'import copy\n'), ((8226, 8244), 'numpy.max', 'np.max', (['act_values'], {}), '(act_values)\n', (8232, 8244), True, 'import numpy as np\n'), ((869, 890), 'numpy.argmin', 'np.argmin', (['buffer.lru'], {}), '(buffer.lru)\n', (878, 890), True, 'import numpy as np\n'), ((3241, 3291), 'copy.deepcopy', 'copy.deepcopy', (['self.ec_buffer[a].external_value[s]'], {}), '(self.ec_buffer[a].external_value[s])\n', (3254, 3291), False, 'import copy\n'), ((6502, 6519), 'numpy.max', 'np.max', (['index_tp1'], {}), '(index_tp1)\n', (6508, 6519), True, 'import numpy as np\n'), ((6521, 6541), 'numpy.argmax', 'np.argmax', (['index_tp1'], {}), '(index_tp1)\n', (6530, 6541), True, 'import numpy as np\n'), ((601, 658), 'baselines.deepq.experiments.atari.lru_knn_gpu_bp.LRU_KNN_GPU_BP', 'LRU_KNN_GPU_BP', (['buffer_size', 'latent_dim', 'hash_dim', '"""game"""'], {}), "(buffer_size, latent_dim, hash_dim, 'game')\n", (615, 658), False, 'from baselines.deepq.experiments.atari.lru_knn_gpu_bp import LRU_KNN_GPU_BP\n'), ((6568, 6590), 'numpy.square', 'np.square', (['(z_t - z_tp1)'], {}), '(z_t - z_tp1)\n', (6577, 6590), True, 'import numpy as np\n')] |
import math
import numpy as np
from onireader._onireader import Device as _Device
from onireader._onireader import ANY_DEVICE, PixelFormat
class Intrinsics:
def __init__(self, fx, fy, cx, cy):
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
def __str__(self):
return "Intrinsics: fx={}, fy={}, cx={}, cy={}".format(
self.fx, self.fy, self.cx, self.cy)
def __repr__(self):
return str(self)
class Device(_Device):
def __init__(self):
super(Device, self).__init__()
def open(self, device_uri=None):
if device_uri is None:
device_uri = ""
return super(Device, self).open(device_uri)
def find_best_fit_modes(self, width, height,
depth_format=PixelFormat.DEPTH_1_MM,
rgb_format=PixelFormat.RGB888):
depth_vmodes = self.get_depth_video_modes()
rgb_vmodes = self.get_color_video_modes()
target_res = np.array([width, height])
depth_res_dist = [
(mode_num,
np.linalg.norm(target_res - np.array([vmode.width, vmode.height])))
for mode_num, vmode in enumerate(depth_vmodes)
if vmode.format == depth_format]
rgb_res_dist = [
(mode_num,
np.linalg.norm(target_res - np.array([vmode.width, vmode.height])))
for mode_num, vmode in enumerate(rgb_vmodes)
if vmode.format == rgb_format]
depth_mode = sorted(depth_res_dist, key=lambda x: x[1])[0][0]
rgb_mode = sorted(rgb_res_dist, key=lambda x: x[1])[0][0]
return depth_mode, rgb_mode
def start(self, depth_mode=-1, rgb_mode=-1):
super(Device, self).start(depth_mode, rgb_mode)
def get_intrinsics(self):
hfov = self.get_horizontal_fov()
vfov = self.get_vertical_fov()
vmode = self.get_depth_video_mode()
import ipdb
ipdb.set_trace()
fx = vmode.width * .5 / math.tan(hfov)
fy = vmode.height * .5 / math.tan(vfov)
cx = vmode.width * .5
cy = vmode.height * .5
return Intrinsics(fx, fy, cx, cy)
| [
"ipdb.set_trace",
"numpy.array",
"math.tan"
] | [((1007, 1032), 'numpy.array', 'np.array', (['[width, height]'], {}), '([width, height])\n', (1015, 1032), True, 'import numpy as np\n'), ((1963, 1979), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (1977, 1979), False, 'import ipdb\n'), ((2013, 2027), 'math.tan', 'math.tan', (['hfov'], {}), '(hfov)\n', (2021, 2027), False, 'import math\n'), ((2061, 2075), 'math.tan', 'math.tan', (['vfov'], {}), '(vfov)\n', (2069, 2075), False, 'import math\n'), ((1125, 1162), 'numpy.array', 'np.array', (['[vmode.width, vmode.height]'], {}), '([vmode.width, vmode.height])\n', (1133, 1162), True, 'import numpy as np\n'), ((1359, 1396), 'numpy.array', 'np.array', (['[vmode.width, vmode.height]'], {}), '([vmode.width, vmode.height])\n', (1367, 1396), True, 'import numpy as np\n')] |
import csv
import os
import pathlib
import pickle
import sqlite3
import time
import urllib.request
from typing import Union
import cv2
import numpy as np
import GLOBAL_VARIABLES as GV
class Helpers(object):
"""
This class contains some general purposes helper functions like create database, is connected to internest, ans so on.
Args:
object (class): [description]
"""
#####################################################################
# GENERAL HELPER FUNCTIONS
#####################################################################
@staticmethod
def get_txt_file_lines(path):
"""
This function takes path of a .txt file and
returns the list of lines in that file
"""
with open(path) as f:
sources = [line.rstrip() for line in f]
return sources
@staticmethod
def write_pickle(obj, path):
with open(path, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def read_pickle(path: Union[str, pathlib.Path]):
"""Reads a pickle file
Args:
path (str): Path to the pickle file
Returns:
object: The object stored in the file. If file is empty or doesn't exist, returns None
"""
try:
with open(path, 'rb') as handle:
obj = pickle.load(handle)
except (EOFError, FileNotFoundError):
obj = None
return obj
@staticmethod
def read_multiple_pickle_files(path: str) -> list:
"""Reads multiple pickle files and returns them in a list
Args:
path (str): Path to directory containing the .pickle files
Returns:
list: List of objects loaded from the files
"""
object_list = []
for file in pathlib.Path(path).glob('**/*.pickle'):
obj = Helpers.read_pickle(file)
if obj:
object_list.append(obj)
return object_list
@staticmethod
def draw_polygon(frame, polygons, type='ENTRY'):
canvas = np.zeros_like(frame)
for polygon in polygons:
xx, yy = polygon.exterior.coords.xy
points = []
for x, y in zip(xx, yy):
points.append((int(x), int(y)))
cv2.fillPoly(canvas, np.array([points]), GV.POLYGON_COLORS[type])
cv2.addWeighted(frame, 0.8, canvas, 0.2, 0, frame)
@staticmethod
def write_dict_header_to_csv(dict_keys, save_location):
directory_path = save_location.rsplit('/', 1)[0] + '/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
with open(save_location, newline='', mode='w') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=list(dict_keys))
csv_writer.writeheader()
@staticmethod
def write_dict_to_csv_row(row_data, save_location):
header_data = list(row_data.keys())
if not os.path.exists(save_location):
Helpers.write_dict_header_to_csv(header_data, save_location)
with open(save_location, newline='', mode='a') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=header_data)
csv_writer.writerow(row_data)
@staticmethod
def write_csv_row(header_data: list, row_data: list, save_location: str):
"""Writes a csv row and handles the opening and closing of file.
Args:
header_data (list): Data to be written as header. Written only when file is initialized
row_data (list): Data to be appended to row
save_location (str): Location to be saved
"""
if not os.path.exists(save_location):
directory_path = save_location.rsplit('/', 1)[0] + '/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
with open(save_location, newline='', mode='w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(header_data)
with open(save_location, newline='', mode='a') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(row_data)
#####################################################################
# COORDINATE CONVERSION FUNCTIONS
#####################################################################
@staticmethod
def _xcycwh_to_tlwh(bbox_cxcywh):
bbox_cxcywh[:, 0] = bbox_cxcywh[:, 0] - bbox_cxcywh[:, 2] / 2.
bbox_cxcywh[:, 1] = bbox_cxcywh[:, 1] - bbox_cxcywh[:, 3] / 2.
return bbox_cxcywh
@staticmethod
def _cxcywh_to_txtybxby(bbox_cxcywh, width, height):
xc, yc, w, h = bbox_cxcywh
tx = max(int(xc - w / 2), 0)
bx = min(int(xc + w / 2), width - 1)
ty = max(int(yc - h / 2), 0)
by = min(int(yc + h / 2), height - 1)
return tx, ty, bx, by
@staticmethod
def _tlwh_to_txtybxby(bbox_tlwh, width, height):
x, y, w, h = bbox_tlwh
tx = max(int(x), 0)
bx = min(int(x + w), width - 1)
ty = max(int(y), 0)
by = min(int(y + h), height - 1)
return tx, ty, bx, by
@staticmethod
def _txtybxby_to_cxcywh(x1, y1, x2, y2):
cx = (x1 + x2) / 2
w = x2 - x1
cy = (y1 + y2) / 2
h = y2 - y1
return cx, cy, w, h
#####################################################################
# POLYGONS RELATED FUNCTIONS
#####################################################################
@staticmethod
def get_bottom_center(bbox):
return np.array([(bbox[2] - bbox[0]) // 2 + bbox[0], bbox[3]])
@staticmethod
def toworld(centers, F):
imagepoint = [centers[0], centers[1], 1]
worldpoint = np.array(np.dot(F, imagepoint))
scalar = worldpoint[2]
xworld = int(worldpoint[0] / scalar)
yworld = int(worldpoint[1] / scalar)
return (xworld, yworld)
#####################################################################
# Image Related Utilities
#####################################################################
@staticmethod
def clip_crop(frame, detection):
return frame[int(detection.bbox[1]):int(detection.bbox[3]), int(detection.bbox[0]):int(detection.bbox[2])]
@staticmethod
def save_crops_from_bboxes(detections, frame):
for det in detections.Detections:
crop = Helpers.clip_crop(frame, det)
cv2.imwrite(f'./saved_crops/{GV.CROP_SAVE_NUMBER}.jpg', crop)
GV.CROP_SAVE_NUMBER += 1
# @staticmethod
# def load_vino_model(model_path):
# model_path = osp.abspath(model_path)
# model_description_path = model_path
# model_weights_path = osp.splitext(model_path)[0] + ".bin"
# assert osp.isfile(model_description_path), \
# assert osp.isfile(model_weights_path), \
# "Model weights are not found at '%s'" % (model_weights_path)
# model = IENetwork(model_description_path, model_weights_path)
# return model
#####################################################################
# Database Related Utilities
#####################################################################
@staticmethod
def push_data_to_db(db_path, data, key):
conn = sqlite3.connect(db_path)
curs = conn.cursor()
curs.execute('insert into requests (Data, Key) values(?, ?)', [str(data), key])
conn.commit()
conn.close()
@staticmethod
def clear_db(db_path):
conn = sqlite3.connect(db_path)
curs = conn.cursor()
curs.execute('DELETE from requests')
conn.commit()
conn.close()
@staticmethod
def create_connection(db_file):
""" create a database connection to a SQLite database """
conn = None
try:
conn = sqlite3.connect(db_file)
conn.execute('''CREATE TABLE `requests` (
`Row_ID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`Data` TEXT NOT NULL,
`Key` TEXT
);''')
print(sqlite3.version)
except sqlite3.Error as e:
print(e)
finally:
if conn:
conn.close()
@staticmethod
def create_database(video_id, path='./data_db/'):
""" create a database connection to an SQLite database """
conn = None
path = './data_db/' + video_id + '.db'
if os.path.exists(path):
print('--------- Database Already Exists ---------')
return path
try:
conn = sqlite3.connect(path)
conn.execute('''CREATE TABLE `requests` (
`Row_ID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`Data` TEXT NOT NULL,
`Key` TEXT
);''')
except Exception as e:
print(e)
if conn:
conn.close()
print('--------- Database Created Successfully ---------')
return path
@staticmethod
def pop_data_from_db(db_path):
"""
To get a row from DB to post. Deletes that row from the db as well
"""
data = key = None
conn = sqlite3.connect(db_path)
try:
curs = conn.execute('SELECT Row_ID, Data, Key FROM requests order by Row_ID limit 1')
result = curs.fetchone()
if type(result).__name__ != 'NoneType':
row_id, data, key = result[0], result[1], result[2]
conn.execute("DELETE from requests where Row_ID = ?;", [row_id])
conn.commit()
conn.close()
return data, key, True
except Exception as e:
print(e)
return data, key, False
return data, key, False
@staticmethod
def is_connected_to_internet(host='http://google.com'):
time.sleep(1)
try:
urllib.request.urlopen(host)
return True
except Exception as e:
print("Error during is_connected_to_internet")
print(e)
return False
| [
"csv.DictWriter",
"os.path.exists",
"cv2.imwrite",
"pickle.dump",
"sqlite3.connect",
"os.makedirs",
"pathlib.Path",
"csv.writer",
"pickle.load",
"time.sleep",
"cv2.addWeighted",
"numpy.array",
"numpy.dot",
"numpy.zeros_like"
] | [((2134, 2154), 'numpy.zeros_like', 'np.zeros_like', (['frame'], {}), '(frame)\n', (2147, 2154), True, 'import numpy as np\n'), ((2434, 2484), 'cv2.addWeighted', 'cv2.addWeighted', (['frame', '(0.8)', 'canvas', '(0.2)', '(0)', 'frame'], {}), '(frame, 0.8, canvas, 0.2, 0, frame)\n', (2449, 2484), False, 'import cv2\n'), ((5693, 5748), 'numpy.array', 'np.array', (['[(bbox[2] - bbox[0]) // 2 + bbox[0], bbox[3]]'], {}), '([(bbox[2] - bbox[0]) // 2 + bbox[0], bbox[3]])\n', (5701, 5748), True, 'import numpy as np\n'), ((7434, 7458), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (7449, 7458), False, 'import sqlite3\n'), ((7683, 7707), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (7698, 7707), False, 'import sqlite3\n'), ((8625, 8645), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8639, 8645), False, 'import os\n'), ((9396, 9420), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (9411, 9420), False, 'import sqlite3\n'), ((10080, 10093), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10090, 10093), False, 'import time\n'), ((974, 1032), 'pickle.dump', 'pickle.dump', (['obj', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (985, 1032), False, 'import pickle\n'), ((2643, 2673), 'os.path.exists', 'os.path.exists', (['directory_path'], {}), '(directory_path)\n', (2657, 2673), False, 'import os\n'), ((2687, 2714), 'os.makedirs', 'os.makedirs', (['directory_path'], {}), '(directory_path)\n', (2698, 2714), False, 'import os\n'), ((3033, 3062), 'os.path.exists', 'os.path.exists', (['save_location'], {}), '(save_location)\n', (3047, 3062), False, 'import os\n'), ((3231, 3279), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'header_data'}), '(csv_file, fieldnames=header_data)\n', (3245, 3279), False, 'import csv\n'), ((3745, 3774), 'os.path.exists', 'os.path.exists', (['save_location'], {}), '(save_location)\n', (3759, 3774), False, 'import os\n'), ((4203, 4223), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (4213, 4223), False, 'import csv\n'), ((5877, 5898), 'numpy.dot', 'np.dot', (['F', 'imagepoint'], {}), '(F, imagepoint)\n', (5883, 5898), True, 'import numpy as np\n'), ((6577, 6638), 'cv2.imwrite', 'cv2.imwrite', (['f"""./saved_crops/{GV.CROP_SAVE_NUMBER}.jpg"""', 'crop'], {}), "(f'./saved_crops/{GV.CROP_SAVE_NUMBER}.jpg', crop)\n", (6588, 6638), False, 'import cv2\n'), ((8000, 8024), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8015, 8024), False, 'import sqlite3\n'), ((8769, 8790), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (8784, 8790), False, 'import sqlite3\n'), ((1408, 1427), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1419, 1427), False, 'import pickle\n'), ((1873, 1891), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1885, 1891), False, 'import pathlib\n'), ((2380, 2398), 'numpy.array', 'np.array', (['[points]'], {}), '([points])\n', (2388, 2398), True, 'import numpy as np\n'), ((3862, 3892), 'os.path.exists', 'os.path.exists', (['directory_path'], {}), '(directory_path)\n', (3876, 3892), False, 'import os\n'), ((3910, 3937), 'os.makedirs', 'os.makedirs', (['directory_path'], {}), '(directory_path)\n', (3921, 3937), False, 'import os\n'), ((4039, 4059), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (4049, 4059), False, 'import csv\n')] |
import unittest
import numpy as np
from paderbox.array import split_complex_features, merge_complex_features
T, B, F = 400, 6, 513
A = np.random.uniform(size=(T, B, F)) + 1j * np.random.uniform(size=(T, B, F))
class TestSplitMerge(unittest.TestCase):
def test_identity_operation(self):
splitted = split_complex_features(A)
assert splitted.shape == (T, B, 2*F)
merged = merge_complex_features(splitted)
np.testing.assert_almost_equal(A, merged)
def test_split_toy_example(self):
A = np.asarray([[[1 + 2j]]])
splitted = split_complex_features(A)
np.testing.assert_almost_equal(splitted, np.asarray([[[1, 2]]]))
def test_merge_toy_example(self):
A = np.asarray([[[1, 2]]])
merged = merge_complex_features(A)
np.testing.assert_almost_equal(merged, np.asarray([[[1 + 2j]]]))
| [
"numpy.asarray",
"paderbox.array.merge_complex_features",
"numpy.testing.assert_almost_equal",
"numpy.random.uniform",
"paderbox.array.split_complex_features"
] | [((136, 169), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(T, B, F)'}), '(size=(T, B, F))\n', (153, 169), True, 'import numpy as np\n'), ((177, 210), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(T, B, F)'}), '(size=(T, B, F))\n', (194, 210), True, 'import numpy as np\n'), ((312, 337), 'paderbox.array.split_complex_features', 'split_complex_features', (['A'], {}), '(A)\n', (334, 337), False, 'from paderbox.array import split_complex_features, merge_complex_features\n'), ((400, 432), 'paderbox.array.merge_complex_features', 'merge_complex_features', (['splitted'], {}), '(splitted)\n', (422, 432), False, 'from paderbox.array import split_complex_features, merge_complex_features\n'), ((441, 482), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['A', 'merged'], {}), '(A, merged)\n', (471, 482), True, 'import numpy as np\n'), ((534, 560), 'numpy.asarray', 'np.asarray', (['[[[1 + 2.0j]]]'], {}), '([[[1 + 2.0j]]])\n', (544, 560), True, 'import numpy as np\n'), ((578, 603), 'paderbox.array.split_complex_features', 'split_complex_features', (['A'], {}), '(A)\n', (600, 603), False, 'from paderbox.array import split_complex_features, merge_complex_features\n'), ((728, 750), 'numpy.asarray', 'np.asarray', (['[[[1, 2]]]'], {}), '([[[1, 2]]])\n', (738, 750), True, 'import numpy as np\n'), ((768, 793), 'paderbox.array.merge_complex_features', 'merge_complex_features', (['A'], {}), '(A)\n', (790, 793), False, 'from paderbox.array import split_complex_features, merge_complex_features\n'), ((653, 675), 'numpy.asarray', 'np.asarray', (['[[[1, 2]]]'], {}), '([[[1, 2]]])\n', (663, 675), True, 'import numpy as np\n'), ((841, 867), 'numpy.asarray', 'np.asarray', (['[[[1 + 2.0j]]]'], {}), '([[[1 + 2.0j]]])\n', (851, 867), True, 'import numpy as np\n')] |
"""Look into Person Dataset"""
import os
import torch
import numpy as np
from PIL import Image
from awesome_semantic_segmentation_pytorch.data.dataloader.segbase import SegmentationDataset
class LIPSegmentation(SegmentationDataset):
"""Look into person parsing dataset """
BASE_DIR = 'LIP'
NUM_CLASS = 20
def __init__(self, root='../datasets/LIP', split='train', mode=None, transform=None, **kwargs):
super(LIPSegmentation, self).__init__(root, split, mode, transform, **kwargs)
_trainval_image_dir = os.path.join(root, 'TrainVal_images')
_testing_image_dir = os.path.join(root, 'Testing_images')
_trainval_mask_dir = os.path.join(root, 'TrainVal_parsing_annotations')
if split == 'train':
_image_dir = os.path.join(_trainval_image_dir, 'train_images')
_mask_dir = os.path.join(_trainval_mask_dir, 'train_segmentations')
_split_f = os.path.join(_trainval_image_dir, 'train_id.txt')
elif split == 'val':
_image_dir = os.path.join(_trainval_image_dir, 'val_images')
_mask_dir = os.path.join(_trainval_mask_dir, 'val_segmentations')
_split_f = os.path.join(_trainval_image_dir, 'val_id.txt')
elif split == 'test':
_image_dir = os.path.join(_testing_image_dir, 'testing_images')
_split_f = os.path.join(_testing_image_dir, 'test_id.txt')
else:
raise RuntimeError('Unknown dataset split.')
self.images = []
self.masks = []
with open(os.path.join(_split_f), 'r') as lines:
for line in lines:
_image = os.path.join(_image_dir, line.rstrip('\n') + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
if split != 'test':
_mask = os.path.join(_mask_dir, line.rstrip('\n') + '.png')
assert os.path.isfile(_mask)
self.masks.append(_mask)
if split != 'test':
assert (len(self.images) == len(self.masks))
print('Found {} {} images in the folder {}'.format(len(self.images), split, root))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
img = self._img_transform(img)
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchronized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask, os.path.basename(self.images[index])
def __len__(self):
return len(self.images)
def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
return torch.from_numpy(target).long()
@property
def classes(self):
"""Category name."""
return ('background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe',
'rightShoe')
if __name__ == '__main__':
dataset = LIPSegmentation(base_size=280, crop_size=256) | [
"PIL.Image.open",
"os.path.join",
"torch.from_numpy",
"os.path.isfile",
"numpy.array",
"os.path.basename"
] | [((538, 575), 'os.path.join', 'os.path.join', (['root', '"""TrainVal_images"""'], {}), "(root, 'TrainVal_images')\n", (550, 575), False, 'import os\n'), ((605, 641), 'os.path.join', 'os.path.join', (['root', '"""Testing_images"""'], {}), "(root, 'Testing_images')\n", (617, 641), False, 'import os\n'), ((671, 721), 'os.path.join', 'os.path.join', (['root', '"""TrainVal_parsing_annotations"""'], {}), "(root, 'TrainVal_parsing_annotations')\n", (683, 721), False, 'import os\n'), ((2501, 2530), 'PIL.Image.open', 'Image.open', (['self.masks[index]'], {}), '(self.masks[index])\n', (2511, 2530), False, 'from PIL import Image\n'), ((776, 825), 'os.path.join', 'os.path.join', (['_trainval_image_dir', '"""train_images"""'], {}), "(_trainval_image_dir, 'train_images')\n", (788, 825), False, 'import os\n'), ((850, 905), 'os.path.join', 'os.path.join', (['_trainval_mask_dir', '"""train_segmentations"""'], {}), "(_trainval_mask_dir, 'train_segmentations')\n", (862, 905), False, 'import os\n'), ((929, 978), 'os.path.join', 'os.path.join', (['_trainval_image_dir', '"""train_id.txt"""'], {}), "(_trainval_image_dir, 'train_id.txt')\n", (941, 978), False, 'import os\n'), ((3032, 3068), 'os.path.basename', 'os.path.basename', (['self.images[index]'], {}), '(self.images[index])\n', (3048, 3068), False, 'import os\n'), ((1033, 1080), 'os.path.join', 'os.path.join', (['_trainval_image_dir', '"""val_images"""'], {}), "(_trainval_image_dir, 'val_images')\n", (1045, 1080), False, 'import os\n'), ((1105, 1158), 'os.path.join', 'os.path.join', (['_trainval_mask_dir', '"""val_segmentations"""'], {}), "(_trainval_mask_dir, 'val_segmentations')\n", (1117, 1158), False, 'import os\n'), ((1182, 1229), 'os.path.join', 'os.path.join', (['_trainval_image_dir', '"""val_id.txt"""'], {}), "(_trainval_image_dir, 'val_id.txt')\n", (1194, 1229), False, 'import os\n'), ((1546, 1568), 'os.path.join', 'os.path.join', (['_split_f'], {}), '(_split_f)\n', (1558, 1568), False, 'import os\n'), ((1717, 1739), 'os.path.isfile', 'os.path.isfile', (['_image'], {}), '(_image)\n', (1731, 1739), False, 'import os\n'), ((2219, 2249), 'PIL.Image.open', 'Image.open', (['self.images[index]'], {}), '(self.images[index])\n', (2229, 2249), False, 'from PIL import Image\n'), ((2449, 2485), 'os.path.basename', 'os.path.basename', (['self.images[index]'], {}), '(self.images[index])\n', (2465, 2485), False, 'import os\n'), ((3180, 3194), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (3188, 3194), True, 'import numpy as np\n'), ((3226, 3250), 'torch.from_numpy', 'torch.from_numpy', (['target'], {}), '(target)\n', (3242, 3250), False, 'import torch\n'), ((1285, 1335), 'os.path.join', 'os.path.join', (['_testing_image_dir', '"""testing_images"""'], {}), "(_testing_image_dir, 'testing_images')\n", (1297, 1335), False, 'import os\n'), ((1359, 1406), 'os.path.join', 'os.path.join', (['_testing_image_dir', '"""test_id.txt"""'], {}), "(_testing_image_dir, 'test_id.txt')\n", (1371, 1406), False, 'import os\n'), ((1926, 1947), 'os.path.isfile', 'os.path.isfile', (['_mask'], {}), '(_mask)\n', (1940, 1947), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import mne
import numpy as np
from braininvaders2012 import download as dl
import os
import glob
import zipfile
from scipy.io import loadmat
BI2012a_URL = 'https://zenodo.org/record/2649069/files/'
class BrainInvaders2012():
'''
We describe the experimental procedures for a dataset that we have made publicly available at
https://doi.org/10.5281/zenodo.2649006 in mat and csv formats. This dataset contains
electroencephalographic (EEG) recordings of 25 subjects testing the Brain Invaders
(Congedo, 2011), a visual P300 Brain-Computer Interface inspired by the famous vintage video
game Space Invaders (Taito, Tokyo, Japan). The visual P300 is an event-related potential
elicited by a visual stimulation, peaking 240-600 ms after stimulus onset. EEG data were recorded
by 16 electrodes in an experiment that took place in the GIPSA-lab, Grenoble, France, in 2012
(Van Veen, 2013 and Congedo, 2013). A full description of the experiment is available
https://hal.archives-ouvertes.fr/hal-02126068. Python code for manipulating the data is
available at https://github.com/plcrodrigues/py.BI.EEG.2012-GIPSA.The ID of this dataset is
BI.EEG.2012-GIPSA.
**Full description of the experiment and dataset**
https://hal.archives-ouvertes.fr/hal-02126068
**Link to the data**
https://doi.org/10.5281/zenodo.2649006
**Authors**
Principal Investigator: B.Sc. Gijsbrecht <NAME>
Technical Supervisors: Ph.D. <NAME>, Eng. <NAME>, Eng. <NAME>, Eng. <NAME>
Scientific Supervisor: Ph.D. <NAME>
**ID of the dataset**
BI.EEG.2012-GIPSA
'''
def __init__(self, Training=True, Online=False):
self.training = Training
self.online = Online
self.subject_list = list(range(1, 25 + 1))
def _get_single_subject_data(self, subject):
"""return data for a single subject"""
file_path_list = self.data_path(subject)
sessions = {}
for file_path in file_path_list:
session_name = 'session_1'
condition = file_path.split('/')[-1].split('.')[0].split(os.sep)[-1]
run_name = 'run_' + condition
chnames = ['F7',
'F3',
'Fz',
'F4',
'F8',
'T7',
'C3',
'Cz',
'C4',
'T8',
'P7',
'P3',
'Pz',
'P4',
'P8',
'O1',
'O2',
'STI 014']
chtypes = ['eeg'] * 17 + ['stim']
X = loadmat(file_path)[condition].T
S = X[1:18,:]
stim = (X[18,:] + X[19,:])[None,:]
X = np.concatenate([S, stim])
info = mne.create_info(ch_names=chnames, sfreq=128,
ch_types=chtypes, montage='standard_1020',
verbose=False)
raw = mne.io.RawArray(data=X, info=info, verbose=False)
# get rid of the Fz channel (it is the ground)
raw.info['bads'] = ['Fz']
raw.pick_types(eeg=True, stim=True)
sessions[session_name] = {}
sessions[session_name][run_name] = raw
return sessions
def data_path(self, subject, path=None, force_update=False,
update_path=None, verbose=None):
if subject not in self.subject_list:
raise(ValueError("Invalid subject number"))
# check if has the .zip
url = BI2012a_URL + 'subject_' + str(subject).zfill(2) + '.zip'
path_zip = dl.data_path(url, 'BRAININVADERS2012')
path_folder = path_zip.strip('subject_' + str(subject).zfill(2) + '.zip')
# check if has to unzip
if not(os.path.isdir(path_folder + 'subject_{:d}/'.format(subject))) and not(os.path.isdir(path_folder + 'subject_0{:d}/'.format(subject))):
print('unzip', path_zip)
zip_ref = zipfile.ZipFile(path_zip, "r")
zip_ref.extractall(path_folder)
subject_paths = []
# filter the data regarding the experimental conditions
if self.training:
subject_paths.append(path_folder + 'subject_' + str(subject).zfill(2) + '/training.mat')
if self.online:
subject_paths.append(path_folder + 'subject_' + str(subject).zfill(2) + '/online.mat')
return subject_paths
| [
"mne.create_info",
"zipfile.ZipFile",
"scipy.io.loadmat",
"numpy.concatenate",
"braininvaders2012.download.data_path",
"mne.io.RawArray"
] | [((3836, 3874), 'braininvaders2012.download.data_path', 'dl.data_path', (['url', '"""BRAININVADERS2012"""'], {}), "(url, 'BRAININVADERS2012')\n", (3848, 3874), True, 'from braininvaders2012 import download as dl\n'), ((2944, 2969), 'numpy.concatenate', 'np.concatenate', (['[S, stim]'], {}), '([S, stim])\n', (2958, 2969), True, 'import numpy as np\n'), ((2990, 3097), 'mne.create_info', 'mne.create_info', ([], {'ch_names': 'chnames', 'sfreq': '(128)', 'ch_types': 'chtypes', 'montage': '"""standard_1020"""', 'verbose': '(False)'}), "(ch_names=chnames, sfreq=128, ch_types=chtypes, montage=\n 'standard_1020', verbose=False)\n", (3005, 3097), False, 'import mne\n'), ((3181, 3230), 'mne.io.RawArray', 'mne.io.RawArray', ([], {'data': 'X', 'info': 'info', 'verbose': '(False)'}), '(data=X, info=info, verbose=False)\n', (3196, 3230), False, 'import mne\n'), ((4198, 4228), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_zip', '"""r"""'], {}), "(path_zip, 'r')\n", (4213, 4228), False, 'import zipfile\n'), ((2823, 2841), 'scipy.io.loadmat', 'loadmat', (['file_path'], {}), '(file_path)\n', (2830, 2841), False, 'from scipy.io import loadmat\n')] |
"""(Iterative) Luce Spectral Ranking and related inference algorithms."""
import functools
import numpy as np
from .convergence import NormOfDifferenceTest
from .utils import exp_transform, log_transform, statdist
def _init_lsr(n_items, alpha, initial_params):
"""Initialize the LSR Markov chain and the weights."""
if initial_params is None:
weights = np.ones(n_items)
else:
weights = exp_transform(initial_params)
chain = alpha * np.ones((n_items, n_items), dtype=float)
return weights, chain
def _ilsr(fun, params, max_iter, tol):
"""Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
converged = NormOfDifferenceTest(tol, order=1)
for _ in range(max_iter):
params = fun(initial_params=params)
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter))
def lsr_pairwise(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for pairwise-comparison data (see :ref:`data-pairwise`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, loser in data:
chain[loser, winner] += 1 / (weights[winner] + weights[loser])
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
def ilsr_pairwise(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
def lsr_pairwise_dense(comp_mat, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters.
"""
n_items = comp_mat.shape[0]
ws, chain = _init_lsr(n_items, alpha, initial_params)
denom = np.tile(ws, (n_items, 1))
chain += comp_mat.T / (denom + denom.T)
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
def ilsr_pairwise_dense(
comp_mat, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
def rank_centrality(n_items, data, alpha=0.0):
"""Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
_, chain = _init_lsr(n_items, alpha, None)
for winner, loser in data:
chain[loser, winner] += 1.0
# Transform the counts into ratios.
idx = chain > 0 # Indices (i,j) of non-zero entries.
chain[idx] = chain[idx] / (chain + chain.T)[idx]
# Finalize the Markov chain by adding the self-transition rate.
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
def lsr_rankings(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for ranking data (see :ref:`data-rankings`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_rankings` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
val = 1.0 / sum_
for loser in ranking[i+1:]:
chain[loser, winner] += val
sum_ -= weights[winner]
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
def ilsr_rankings(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_rankings, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
def lsr_top1(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for top-1 data (see :ref:`data-top1`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_top1` for an idea on how this works). If it is set to
`None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float
Regularization parameter.
initial_params : array_like
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, losers in data:
val = 1 / (weights.take(losers).sum() + weights[winner])
for loser in losers:
chain[loser, winner] += val
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
def ilsr_top1(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(lsr_top1, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
| [
"numpy.tile",
"functools.partial",
"numpy.ones"
] | [((3622, 3694), 'functools.partial', 'functools.partial', (['lsr_pairwise'], {'n_items': 'n_items', 'data': 'data', 'alpha': 'alpha'}), '(lsr_pairwise, n_items=n_items, data=data, alpha=alpha)\n', (3639, 3694), False, 'import functools\n'), ((5382, 5407), 'numpy.tile', 'np.tile', (['ws', '(n_items, 1)'], {}), '(ws, (n_items, 1))\n', (5389, 5407), True, 'import numpy as np\n'), ((7027, 7096), 'functools.partial', 'functools.partial', (['lsr_pairwise_dense'], {'comp_mat': 'comp_mat', 'alpha': 'alpha'}), '(lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha)\n', (7044, 7096), False, 'import functools\n'), ((11154, 11226), 'functools.partial', 'functools.partial', (['lsr_rankings'], {'n_items': 'n_items', 'data': 'data', 'alpha': 'alpha'}), '(lsr_rankings, n_items=n_items, data=data, alpha=alpha)\n', (11171, 11226), False, 'import functools\n'), ((13865, 13933), 'functools.partial', 'functools.partial', (['lsr_top1'], {'n_items': 'n_items', 'data': 'data', 'alpha': 'alpha'}), '(lsr_top1, n_items=n_items, data=data, alpha=alpha)\n', (13882, 13933), False, 'import functools\n'), ((373, 389), 'numpy.ones', 'np.ones', (['n_items'], {}), '(n_items)\n', (380, 389), True, 'import numpy as np\n'), ((468, 508), 'numpy.ones', 'np.ones', (['(n_items, n_items)'], {'dtype': 'float'}), '((n_items, n_items), dtype=float)\n', (475, 508), True, 'import numpy as np\n')] |
#!/usr/local/bin/python3
# Author: <NAME> (https://github.com/linzebing)
from datetime import datetime, date, timedelta
import math
import numpy as np
import time
import sys
import requests
import yfinance as yf
import os
if len(sys.argv) == 1:
# symbols = ['SPXL', 'SSO', 'VOO', 'TMF', 'UBT', 'VGLT']
symbols = ['SPXL', 'SSO', 'VOO']
# symbols = ['TMF', 'UBT', 'VGLT']
# symbols = ['TYD', 'UST', 'IEF']
# symbols = ['VOO', 'VGLT']
# symbols = ['SPY', 'TLT']
# symbols = ['SPY', 'IEF']
# symbols = ['00631L.TW', '0050.TW', '00680L.TW', '00679B.TWO']
# symbols = ['00631L.TW', '0050.TW']
# symbols = ['00680L.TW', '00679B.TWO']
# symbols = ['0050.TW', '00679B.TWO']
# symbols = ['VTI', 'BND']
# symbols = ['VT', 'BNDW']
else:
symbols = sys.argv[1].split(',')
for i in range(len(symbols)):
symbols[i] = symbols[i].strip().upper()
num_trading_days_per_year = 252
window_size = 0
date_format = "%Y-%m-%d"
loss_only = False
consider_dividends = False
if window_size == 0 :
# season
end_timestamp = datetime.strptime('2022-06-16', date_format).timestamp()
start_timestamp = datetime.strptime('2022-03-18', date_format).timestamp()
# ['SPXL', 'SSO', 'VOO', 'TMF', 'UBT', 'VGLT']
# end_timestamp = int(time.time())
# start_timestamp = datetime.strptime('2011-01-01', date_format).timestamp()
# '00631L.TW
# end_timestamp = int(time.time())
# start_timestamp = datetime.strptime('2014-10-23', date_format).timestamp()
# '0050.TW'
# end_timestamp = int(time.time())
# start_timestamp = datetime.strptime('2008-01-02', date_format).timestamp()
# ['00680L.TW', '00679B.TWO']
# end_timestamp = int(time.time())
# start_timestamp = datetime.strptime('2017-01-11', date_format).timestamp()
# 10 years
# end_timestamp = datetime.strptime('2021-12-31', date_format).timestamp()
# start_timestamp = datetime.strptime('2012-01-01', date_format).timestamp()
# ['SPY', 'TLT']
# end_timestamp = int(time.time())
# start_timestamp = datetime.strptime('2002-07-30', date_format).timestamp()
# end_timestamp = int(time.time())
# start_timestamp = datetime.strptime('1980-01-01', date_format).timestamp()
else:
end_timestamp = int(time.time())
start_timestamp = int(end_timestamp - (1.4 * (window_size + 1) + 4) * 86400)
def get_volatility_and_performance(symbol):
# download_url = "https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history&crumb=a7pcO//zvcW".format(symbol, start_timestamp, end_timestamp)
# lines = requests.get(download_url, cookies={'B': 'chjes25epq9b6&b=3&s=18'}).text.strip().split('\n')
start_str = datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_str = datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d')
data = yf.download(tickers=symbol, start=start_str, end=end_str, auto_adjust=consider_dividends)
data.to_csv(f'{symbol}.csv')
with open(f'{symbol}.csv') as file:
lines = file.readlines()
os.remove(f'{symbol}.csv')
assert lines[0].split(',')[0] == 'Date'
assert lines[0].split(',')[4] == 'Close'
prices = []
for line in lines[1:]:
prices.append(float(line.split(',')[4]))
prices.reverse()
volatilities_in_window = []
if window_size == 0:
trading_days = len(prices)-1
else:
trading_days = window_size
for i in range(trading_days):
# volatilities_in_window.append(math.log(prices[i] / prices[i+1]))
volatilities_in_window.append((prices[i]-prices[i+1])/prices[i+1])
if loss_only:
volatilities_in_window = np.array(volatilities_in_window)
volatilities_in_window = volatilities_in_window[volatilities_in_window<=0]
# most_recent_date = datetime.strptime(lines[-1].split(',')[0], date_format).date()
# assert (date.today() - most_recent_date).days <= 4, "today is {}, most recent trading day is {}".format(date.today(), most_recent_date)
return np.std(volatilities_in_window, ddof = 1) * np.sqrt(num_trading_days_per_year), prices[0] / prices[trading_days] - 1.0
volatilities = []
performances = []
sum_inverse_volatility = 0.0
for symbol in symbols:
volatility, performance = get_volatility_and_performance(symbol)
sum_inverse_volatility += 1 / volatility
volatilities.append(volatility)
performances.append(performance)
print ("Portfolio: {}, as of {} (window size is {} days) from {}".format(str(symbols), datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d'), window_size, datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d')))
for i in range(len(symbols)):
print ('{} allocation ratio: {:.2f}% (anualized volatility: {:.2f}%, performance: {:.2f}%)'.format(symbols[i], float(100 / (volatilities[i] * sum_inverse_volatility)), float(volatilities[i] * 100), float(performances[i] * 100)))
| [
"datetime.datetime.fromtimestamp",
"numpy.sqrt",
"datetime.datetime.strptime",
"yfinance.download",
"numpy.array",
"numpy.std",
"time.time",
"os.remove"
] | [((2894, 2988), 'yfinance.download', 'yf.download', ([], {'tickers': 'symbol', 'start': 'start_str', 'end': 'end_str', 'auto_adjust': 'consider_dividends'}), '(tickers=symbol, start=start_str, end=end_str, auto_adjust=\n consider_dividends)\n', (2905, 2988), True, 'import yfinance as yf\n'), ((3094, 3120), 'os.remove', 'os.remove', (['f"""{symbol}.csv"""'], {}), "(f'{symbol}.csv')\n", (3103, 3120), False, 'import os\n'), ((2293, 2304), 'time.time', 'time.time', ([], {}), '()\n', (2302, 2304), False, 'import time\n'), ((3699, 3731), 'numpy.array', 'np.array', (['volatilities_in_window'], {}), '(volatilities_in_window)\n', (3707, 3731), True, 'import numpy as np\n'), ((1078, 1122), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2022-06-16"""', 'date_format'], {}), "('2022-06-16', date_format)\n", (1095, 1122), False, 'from datetime import datetime, date, timedelta\n'), ((1157, 1201), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2022-03-18"""', 'date_format'], {}), "('2022-03-18', date_format)\n", (1174, 1201), False, 'from datetime import datetime, date, timedelta\n'), ((2749, 2788), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['start_timestamp'], {}), '(start_timestamp)\n', (2771, 2788), False, 'from datetime import datetime, date, timedelta\n'), ((2824, 2861), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['end_timestamp'], {}), '(end_timestamp)\n', (2846, 2861), False, 'from datetime import datetime, date, timedelta\n'), ((4058, 4096), 'numpy.std', 'np.std', (['volatilities_in_window'], {'ddof': '(1)'}), '(volatilities_in_window, ddof=1)\n', (4064, 4096), True, 'import numpy as np\n'), ((4101, 4135), 'numpy.sqrt', 'np.sqrt', (['num_trading_days_per_year'], {}), '(num_trading_days_per_year)\n', (4108, 4135), True, 'import numpy as np\n'), ((4540, 4577), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['end_timestamp'], {}), '(end_timestamp)\n', (4562, 4577), False, 'from datetime import datetime, date, timedelta\n'), ((4613, 4652), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['start_timestamp'], {}), '(start_timestamp)\n', (4635, 4652), False, 'from datetime import datetime, date, timedelta\n')] |
from keras.models import load_model
import numpy as np
import time
import csv
import os
input = np.load('set_hp_1s_0.2total.npy')
path = '.'
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.h5' in file:
files.append(os.path.join(r, file))
for f in files:
print(f)
model = load_model(f)
predict_start = time.time()
predictions = model.predict(input)
predict_end = time.time()
predict_time = predict_end - predict_start
print(predictions)
print(predict_time)
with open('inference_metrics.csv', mode='w') as employee_file:
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
employee_writer.writerow(['device', 'device_name', 'inference_time'])
employee_writer.writerow(['GPU', 'JetsonTX2', predict_time])
| [
"keras.models.load_model",
"csv.writer",
"os.path.join",
"numpy.load",
"time.time",
"os.walk"
] | [((97, 130), 'numpy.load', 'np.load', (['"""set_hp_1s_0.2total.npy"""'], {}), "('set_hp_1s_0.2total.npy')\n", (104, 130), True, 'import numpy as np\n'), ((204, 217), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (211, 217), False, 'import os\n'), ((354, 367), 'keras.models.load_model', 'load_model', (['f'], {}), '(f)\n', (364, 367), False, 'from keras.models import load_model\n'), ((388, 399), 'time.time', 'time.time', ([], {}), '()\n', (397, 399), False, 'import time\n'), ((457, 468), 'time.time', 'time.time', ([], {}), '()\n', (466, 468), False, 'import time\n'), ((650, 737), 'csv.writer', 'csv.writer', (['employee_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(employee_file, delimiter=\',\', quotechar=\'"\', quoting=csv.\n QUOTE_MINIMAL)\n', (660, 737), False, 'import csv\n'), ((289, 310), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (301, 310), False, 'import os\n')] |
import os
import argparse
import tensorflow as tf
import glob
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import csv
sns.set()
LongTensor = torch.cuda.LongTensor
FloatTensor = torch.cuda.FloatTensor
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--experiment_paths", type = str, nargs = '+', required = True)
parser.add_argument("--result_dir", type = str, default = '/home/daverics/adversarial_learning_speech/audio_mnist/experiment_results/')
parser.add_argument("--model", type = str, required = True)
args = parser.parse_args()
return args
def main():
args = parse_args()
print(args.experiment_paths)
save_dir = os.path.join(args.result_dir, args.model)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
spec_digit_acc_F_mean = []
spec_gender_acc_F_mean = []
audio_digit_acc_F_mean = []
audio_gender_acc_F_mean = []
fid_audio_F_mean = []
spec_digit_acc_F_std = []
spec_gender_acc_F_std = []
audio_digit_acc_F_std = []
audio_gender_acc_F_std = []
fid_audio_F_std = []
spec_digit_acc_G_mean = []
spec_gender_acc_G_mean = []
audio_digit_acc_G_mean = []
audio_gender_acc_G_mean = []
fid_audio_G_mean = []
spec_digit_acc_G_std = []
spec_gender_acc_G_std = []
audio_digit_acc_G_std = []
audio_gender_acc_G_std = []
fid_audio_G_std = []
for experiment in args.experiment_paths:
spec_digit_acc_F_list = np.genfromtxt(os.path.join(experiment,'spec_digit_acc_F.csv'),delimiter=',')[1:]
spec_gender_acc_F_list = np.genfromtxt(os.path.join(experiment,'spec_orig_gender_acc_F.csv'),delimiter=',')[1:]
audio_digit_acc_F_list = np.genfromtxt(os.path.join(experiment,'audio_digit_acc_F.csv'),delimiter=',')[1:]
audio_gender_acc_F_list = np.genfromtxt(os.path.join(experiment,'audio_orig_gender_acc_F.csv'),delimiter=',')[1:]
spec_digit_acc_G_list = np.genfromtxt(os.path.join(experiment,'spec_digit_acc_G.csv'),delimiter=',')[1:]
spec_gender_acc_G_list = np.genfromtxt(os.path.join(experiment,'spec_orig_gender_acc_G.csv'),delimiter=',')[1:]
audio_digit_acc_G_list = np.genfromtxt(os.path.join(experiment,'audio_digit_acc_G.csv'),delimiter=',')[1:]
audio_gender_acc_G_list = np.genfromtxt(os.path.join(experiment,'audio_orig_gender_acc_G.csv'),delimiter=',')[1:]
fid_audio_F_list = np.genfromtxt(os.path.join(experiment,'fid_audio_F.csv'),delimiter=',')[1:]
fid_audio_G_list = np.genfromtxt(os.path.join(experiment,'fid_audio_G.csv'),delimiter=',')[1:]
spec_digit_acc_F_mean.append(np.mean(spec_digit_acc_F_list))
spec_digit_acc_F_std.append(np.std(spec_digit_acc_F_list))
spec_gender_acc_F_mean.append(np.mean(spec_gender_acc_F_list))
spec_gender_acc_F_std.append(np.std(spec_gender_acc_F_list))
audio_digit_acc_F_mean.append(np.mean(audio_digit_acc_F_list))
audio_digit_acc_F_std.append(np.std(audio_digit_acc_F_list))
audio_gender_acc_F_mean.append(np.mean(audio_gender_acc_F_list))
audio_gender_acc_F_std.append(np.std(audio_gender_acc_F_list))
spec_digit_acc_G_mean.append(np.mean(spec_digit_acc_G_list))
spec_digit_acc_G_std.append(np.std(spec_digit_acc_G_list))
spec_gender_acc_G_mean.append(np.mean(spec_gender_acc_G_list))
spec_gender_acc_G_std.append(np.std(spec_gender_acc_G_list))
audio_digit_acc_G_mean.append(np.mean(audio_digit_acc_G_list))
audio_digit_acc_G_std.append(np.std(audio_digit_acc_G_list))
audio_gender_acc_G_mean.append(np.mean(audio_gender_acc_G_list))
audio_gender_acc_G_std.append(np.std(audio_gender_acc_G_list))
fid_audio_F_mean.append(np.mean(fid_audio_F_list))
fid_audio_F_std.append(np.std(fid_audio_F_list))
fid_audio_G_mean.append(np.mean(fid_audio_G_list))
fid_audio_G_std.append(np.std(fid_audio_G_list))
# Generate FID and acc table scores
with open(os.path.join(save_dir,'fid_audio.csv'), mode='w') as file:
fid_writer = csv.writer(file, delimiter=',')
for i in range(4):
fid_writer.writerow(['$ {:5.2f} \pm {:5.2f} $'.format(fid_audio_F_mean[i], fid_audio_F_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(fid_audio_G_mean[i], fid_audio_G_std[i])])
with open(os.path.join(save_dir,'audio_accs_pcgan.csv'), mode='w') as file:
fid_writer = csv.writer(file, delimiter=',')
for i in range(4):
fid_writer.writerow(['$ {:5.2f} \pm {:5.2f} $'.format(audio_gender_acc_F_mean[i], audio_gender_acc_F_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(audio_gender_acc_G_mean[i], audio_gender_acc_G_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(audio_digit_acc_F_mean[i], audio_digit_acc_F_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(audio_digit_acc_G_mean[i], audio_digit_acc_G_std[i])])
with open(os.path.join(save_dir,'spec_accs_pcgan.csv'), mode='w') as file:
fid_writer = csv.writer(file, delimiter=',')
for i in range(4):
fid_writer.writerow(['$ {:5.2f} \pm {:5.2f} $'.format(spec_gender_acc_F_mean[i], spec_gender_acc_F_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(spec_gender_acc_G_mean[i], spec_gender_acc_G_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(spec_digit_acc_F_mean[i], spec_digit_acc_F_std[i]),
'$ {:5.2f} \pm {:5.2f} $'.format(spec_digit_acc_G_mean[i], spec_digit_acc_G_std[i])])
#label = ['eps=0.01','eps=0.05','eps=0.1','eps=0.2']
#epsilons = [0.01, 0.05, 0.1, 0.2]
#labe =['eps=0.1','eps=0.2']
epsilons = [0.005, 0.01, 0.05, 0.1]
#Plot digit vs gender accuracy
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
ax1.scatter(spec_digit_acc_F_mean, spec_gender_acc_F_mean, c = 'orange', label = 'Filter')
ax1.scatter(spec_digit_acc_G_mean, spec_gender_acc_G_mean, c = 'blue', label = 'PCMelGAN')
ax2.scatter(audio_digit_acc_F_mean, audio_gender_acc_F_mean, c = 'orange', label = 'Filter')
ax2.scatter(audio_digit_acc_G_mean, audio_gender_acc_G_mean, c = 'blue', label = 'PCMelGAN')
for i, eps in enumerate(epsilons):
ax1.annotate(r'$\varepsilon$ =' + str(eps) , xy = (spec_digit_acc_F_mean[i], spec_gender_acc_F_mean[i]), xytext = (spec_digit_acc_F_mean[i] - 4, spec_gender_acc_F_mean[i]))
ax1.annotate(r'$\varepsilon$ =' + str(eps) , xy = (spec_digit_acc_G_mean[i], spec_gender_acc_G_mean[i]), xytext = (spec_digit_acc_G_mean[i] - 4, spec_gender_acc_G_mean[i]))
ax2.annotate(r'$\varepsilon$ =' + str(eps) , xy = (audio_digit_acc_F_mean[i], audio_gender_acc_F_mean[i]), xytext = (audio_digit_acc_F_mean[i] - 2, audio_gender_acc_F_mean[i]))
ax2.annotate(r'$\varepsilon$ =' + str(eps) , xy = (audio_digit_acc_G_mean[i], audio_gender_acc_G_mean[i]), xytext = (audio_digit_acc_G_mean[i] - 2, audio_gender_acc_G_mean[i]))
ax1.set_ylabel('Privacy')
ax1.set_xlabel('Utility')
ax2.set_ylabel('Privacy')
ax2.set_xlabel('Utility')
ax1.legend(loc = 'upper left')
ax2.legend(loc = 'upper left')
fig1.savefig(os.path.join(save_dir,'trade_off_plot_spec_15_june.png'))
ax2.set_ylabel('Privacy')
ax2.set_xlabel('Utility')
fig2.savefig(os.path.join(save_dir,'trade_off_plot_audio_15_june.png'))
plt.close(fig1)
plt.close(fig2)
# #Plot Fid vs epsilon
# #Spectrograms
# fig, ax = plt.subplots()
# ax.scatter(epsilons, fid_spec_mean)
#
# plt.ylabel('Frechet Inception Distance')
# plt.xlabel('Epsilon')
# fig.savefig(os.path.join(save_dir,'fid_spec_plot.png'))
# plt.close(fig)
#
# #Audio
# fig, ax = plt.subplots()
# ax.scatter(epsilons, fid_audio_mean)
#
# plt.ylabel('Frechet Inception Distance')
# plt.xlabel('Epsilon')
# fig.savefig(os.path.join(save_dir,'fid_audio_plot.png'))
# plt.close(fig)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"seaborn.set",
"numpy.mean",
"argparse.ArgumentParser",
"csv.writer",
"os.path.join",
"matplotlib.pyplot.close",
"os.mkdir",
"numpy.std",
"matplotlib.pyplot.subplots"
] | [((159, 168), 'seaborn.set', 'sns.set', ([], {}), '()\n', (166, 168), True, 'import seaborn as sns\n'), ((274, 299), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (297, 299), False, 'import argparse\n'), ((728, 769), 'os.path.join', 'os.path.join', (['args.result_dir', 'args.model'], {}), '(args.result_dir, args.model)\n', (740, 769), False, 'import os\n'), ((5771, 5785), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5783, 5785), True, 'import matplotlib.pyplot as plt\n'), ((5802, 5816), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5814, 5816), True, 'import matplotlib.pyplot as plt\n'), ((7381, 7396), 'matplotlib.pyplot.close', 'plt.close', (['fig1'], {}), '(fig1)\n', (7390, 7396), True, 'import matplotlib.pyplot as plt\n'), ((7401, 7416), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (7410, 7416), True, 'import matplotlib.pyplot as plt\n'), ((781, 805), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (795, 805), False, 'import os\n'), ((815, 833), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (823, 833), False, 'import os\n'), ((4135, 4166), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (4145, 4166), False, 'import csv\n'), ((4492, 4523), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (4502, 4523), False, 'import csv\n'), ((5075, 5106), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (5085, 5106), False, 'import csv\n'), ((7181, 7238), 'os.path.join', 'os.path.join', (['save_dir', '"""trade_off_plot_spec_15_june.png"""'], {}), "(save_dir, 'trade_off_plot_spec_15_june.png')\n", (7193, 7238), False, 'import os\n'), ((7317, 7375), 'os.path.join', 'os.path.join', (['save_dir', '"""trade_off_plot_audio_15_june.png"""'], {}), "(save_dir, 'trade_off_plot_audio_15_june.png')\n", (7329, 7375), False, 'import os\n'), ((2679, 2709), 'numpy.mean', 'np.mean', (['spec_digit_acc_F_list'], {}), '(spec_digit_acc_F_list)\n', (2686, 2709), True, 'import numpy as np\n'), ((2747, 2776), 'numpy.std', 'np.std', (['spec_digit_acc_F_list'], {}), '(spec_digit_acc_F_list)\n', (2753, 2776), True, 'import numpy as np\n'), ((2816, 2847), 'numpy.mean', 'np.mean', (['spec_gender_acc_F_list'], {}), '(spec_gender_acc_F_list)\n', (2823, 2847), True, 'import numpy as np\n'), ((2886, 2916), 'numpy.std', 'np.std', (['spec_gender_acc_F_list'], {}), '(spec_gender_acc_F_list)\n', (2892, 2916), True, 'import numpy as np\n'), ((2957, 2988), 'numpy.mean', 'np.mean', (['audio_digit_acc_F_list'], {}), '(audio_digit_acc_F_list)\n', (2964, 2988), True, 'import numpy as np\n'), ((3027, 3057), 'numpy.std', 'np.std', (['audio_digit_acc_F_list'], {}), '(audio_digit_acc_F_list)\n', (3033, 3057), True, 'import numpy as np\n'), ((3098, 3130), 'numpy.mean', 'np.mean', (['audio_gender_acc_F_list'], {}), '(audio_gender_acc_F_list)\n', (3105, 3130), True, 'import numpy as np\n'), ((3170, 3201), 'numpy.std', 'np.std', (['audio_gender_acc_F_list'], {}), '(audio_gender_acc_F_list)\n', (3176, 3201), True, 'import numpy as np\n'), ((3241, 3271), 'numpy.mean', 'np.mean', (['spec_digit_acc_G_list'], {}), '(spec_digit_acc_G_list)\n', (3248, 3271), True, 'import numpy as np\n'), ((3309, 3338), 'numpy.std', 'np.std', (['spec_digit_acc_G_list'], {}), '(spec_digit_acc_G_list)\n', (3315, 3338), True, 'import numpy as np\n'), ((3378, 3409), 'numpy.mean', 'np.mean', (['spec_gender_acc_G_list'], {}), '(spec_gender_acc_G_list)\n', (3385, 3409), True, 'import numpy as np\n'), ((3448, 3478), 'numpy.std', 'np.std', (['spec_gender_acc_G_list'], {}), '(spec_gender_acc_G_list)\n', (3454, 3478), True, 'import numpy as np\n'), ((3519, 3550), 'numpy.mean', 'np.mean', (['audio_digit_acc_G_list'], {}), '(audio_digit_acc_G_list)\n', (3526, 3550), True, 'import numpy as np\n'), ((3589, 3619), 'numpy.std', 'np.std', (['audio_digit_acc_G_list'], {}), '(audio_digit_acc_G_list)\n', (3595, 3619), True, 'import numpy as np\n'), ((3660, 3692), 'numpy.mean', 'np.mean', (['audio_gender_acc_G_list'], {}), '(audio_gender_acc_G_list)\n', (3667, 3692), True, 'import numpy as np\n'), ((3732, 3763), 'numpy.std', 'np.std', (['audio_gender_acc_G_list'], {}), '(audio_gender_acc_G_list)\n', (3738, 3763), True, 'import numpy as np\n'), ((3798, 3823), 'numpy.mean', 'np.mean', (['fid_audio_F_list'], {}), '(fid_audio_F_list)\n', (3805, 3823), True, 'import numpy as np\n'), ((3856, 3880), 'numpy.std', 'np.std', (['fid_audio_F_list'], {}), '(fid_audio_F_list)\n', (3862, 3880), True, 'import numpy as np\n'), ((3915, 3940), 'numpy.mean', 'np.mean', (['fid_audio_G_list'], {}), '(fid_audio_G_list)\n', (3922, 3940), True, 'import numpy as np\n'), ((3973, 3997), 'numpy.std', 'np.std', (['fid_audio_G_list'], {}), '(fid_audio_G_list)\n', (3979, 3997), True, 'import numpy as np\n'), ((4055, 4094), 'os.path.join', 'os.path.join', (['save_dir', '"""fid_audio.csv"""'], {}), "(save_dir, 'fid_audio.csv')\n", (4067, 4094), False, 'import os\n'), ((4405, 4451), 'os.path.join', 'os.path.join', (['save_dir', '"""audio_accs_pcgan.csv"""'], {}), "(save_dir, 'audio_accs_pcgan.csv')\n", (4417, 4451), False, 'import os\n'), ((4989, 5034), 'os.path.join', 'os.path.join', (['save_dir', '"""spec_accs_pcgan.csv"""'], {}), "(save_dir, 'spec_accs_pcgan.csv')\n", (5001, 5034), False, 'import os\n'), ((1537, 1585), 'os.path.join', 'os.path.join', (['experiment', '"""spec_digit_acc_F.csv"""'], {}), "(experiment, 'spec_digit_acc_F.csv')\n", (1549, 1585), False, 'import os\n'), ((1651, 1705), 'os.path.join', 'os.path.join', (['experiment', '"""spec_orig_gender_acc_F.csv"""'], {}), "(experiment, 'spec_orig_gender_acc_F.csv')\n", (1663, 1705), False, 'import os\n'), ((1772, 1821), 'os.path.join', 'os.path.join', (['experiment', '"""audio_digit_acc_F.csv"""'], {}), "(experiment, 'audio_digit_acc_F.csv')\n", (1784, 1821), False, 'import os\n'), ((1888, 1943), 'os.path.join', 'os.path.join', (['experiment', '"""audio_orig_gender_acc_F.csv"""'], {}), "(experiment, 'audio_orig_gender_acc_F.csv')\n", (1900, 1943), False, 'import os\n'), ((2009, 2057), 'os.path.join', 'os.path.join', (['experiment', '"""spec_digit_acc_G.csv"""'], {}), "(experiment, 'spec_digit_acc_G.csv')\n", (2021, 2057), False, 'import os\n'), ((2123, 2177), 'os.path.join', 'os.path.join', (['experiment', '"""spec_orig_gender_acc_G.csv"""'], {}), "(experiment, 'spec_orig_gender_acc_G.csv')\n", (2135, 2177), False, 'import os\n'), ((2244, 2293), 'os.path.join', 'os.path.join', (['experiment', '"""audio_digit_acc_G.csv"""'], {}), "(experiment, 'audio_digit_acc_G.csv')\n", (2256, 2293), False, 'import os\n'), ((2360, 2415), 'os.path.join', 'os.path.join', (['experiment', '"""audio_orig_gender_acc_G.csv"""'], {}), "(experiment, 'audio_orig_gender_acc_G.csv')\n", (2372, 2415), False, 'import os\n'), ((2476, 2519), 'os.path.join', 'os.path.join', (['experiment', '"""fid_audio_F.csv"""'], {}), "(experiment, 'fid_audio_F.csv')\n", (2488, 2519), False, 'import os\n'), ((2579, 2622), 'os.path.join', 'os.path.join', (['experiment', '"""fid_audio_G.csv"""'], {}), "(experiment, 'fid_audio_G.csv')\n", (2591, 2622), False, 'import os\n')] |
import unittest
import numpy as np
import pandas as pd
from diamond.glms.logistic import LogisticRegression
import os
import logging
from diamond.integration_tests.utils import run_r_script
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class TestLogistic(unittest.TestCase):
def setUp(self, tol=0.02):
# assumes working directory is diamond/
folder = os.path.join('diamond', 'integration_tests', 'logistic')
data_loc = 'simulated_logistic_df.csv'
cov_loc = 'simulated_logistic_covariance.csv'
simulated_data_loc = os.path.join(folder, data_loc)
estimated_covariance_loc = os.path.join(folder, cov_loc)
resources_exist = os.path.exists(simulated_data_loc) and \
os.path.exists(estimated_covariance_loc)
if not resources_exist:
logging.info('Simulating data and estimating covariances in R')
run_r_script(os.path.join(folder, 'logistic_generate_and_fit.R'))
logging.info('Reading in training data and R::lme4-estimated covariance matrix')
df_train = pd.read_csv(simulated_data_loc)
df_estimated_covariance = pd.read_csv(estimated_covariance_loc)
self.model = LogisticRegression(train_df=df_train,
priors_df=df_estimated_covariance,
copy=True,
test_df=None)
logging.info("Fitting model in diamond")
self.formula = "y ~ 1 + x + (1 + x | level)"
results = self.model.fit(self.formula, tol=1e-4, verbose=True)
# the format of the coefficient vector is:
# fixed effects, then [random intercept, random slope] for each level
beta_hat = np.append(results["fixed_effects"].value.values,
pd.melt(results["level"], "level").sort_values(["level", "variable"]).value.values)
beta_true = pd.read_csv("%s/simulated_logistic_true_parameters.csv" % folder)["x"].values
rel_error = np.mean((beta_hat - beta_true) ** 2) / np.mean(abs(beta_true))
if rel_error > tol:
logging.warn("relative error = %f > tolerance = %f" % (rel_error, tol))
else:
logging.info("relative error = %f < tolerance = %f" % (rel_error, tol))
# make sure the coefficients are very close
self.assertTrue(rel_error < tol)
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"os.path.exists",
"numpy.mean",
"logging.warn",
"pandas.read_csv",
"diamond.glms.logistic.LogisticRegression",
"os.path.join",
"pandas.melt",
"unittest.main",
"logging.info"
] | [((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n'), ((2439, 2454), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2452, 2454), False, 'import unittest\n'), ((395, 451), 'os.path.join', 'os.path.join', (['"""diamond"""', '"""integration_tests"""', '"""logistic"""'], {}), "('diamond', 'integration_tests', 'logistic')\n", (407, 451), False, 'import os\n'), ((583, 613), 'os.path.join', 'os.path.join', (['folder', 'data_loc'], {}), '(folder, data_loc)\n', (595, 613), False, 'import os\n'), ((649, 678), 'os.path.join', 'os.path.join', (['folder', 'cov_loc'], {}), '(folder, cov_loc)\n', (661, 678), False, 'import os\n'), ((993, 1078), 'logging.info', 'logging.info', (['"""Reading in training data and R::lme4-estimated covariance matrix"""'], {}), "('Reading in training data and R::lme4-estimated covariance matrix'\n )\n", (1005, 1078), False, 'import logging\n'), ((1093, 1124), 'pandas.read_csv', 'pd.read_csv', (['simulated_data_loc'], {}), '(simulated_data_loc)\n', (1104, 1124), True, 'import pandas as pd\n'), ((1159, 1196), 'pandas.read_csv', 'pd.read_csv', (['estimated_covariance_loc'], {}), '(estimated_covariance_loc)\n', (1170, 1196), True, 'import pandas as pd\n'), ((1219, 1320), 'diamond.glms.logistic.LogisticRegression', 'LogisticRegression', ([], {'train_df': 'df_train', 'priors_df': 'df_estimated_covariance', 'copy': '(True)', 'test_df': 'None'}), '(train_df=df_train, priors_df=df_estimated_covariance,\n copy=True, test_df=None)\n', (1237, 1320), False, 'from diamond.glms.logistic import LogisticRegression\n'), ((1445, 1485), 'logging.info', 'logging.info', (['"""Fitting model in diamond"""'], {}), "('Fitting model in diamond')\n", (1457, 1485), False, 'import logging\n'), ((705, 739), 'os.path.exists', 'os.path.exists', (['simulated_data_loc'], {}), '(simulated_data_loc)\n', (719, 739), False, 'import os\n'), ((758, 798), 'os.path.exists', 'os.path.exists', (['estimated_covariance_loc'], {}), '(estimated_covariance_loc)\n', (772, 798), False, 'import os\n'), ((843, 906), 'logging.info', 'logging.info', (['"""Simulating data and estimating covariances in R"""'], {}), "('Simulating data and estimating covariances in R')\n", (855, 906), False, 'import logging\n'), ((2040, 2076), 'numpy.mean', 'np.mean', (['((beta_hat - beta_true) ** 2)'], {}), '((beta_hat - beta_true) ** 2)\n', (2047, 2076), True, 'import numpy as np\n'), ((2143, 2214), 'logging.warn', 'logging.warn', (["('relative error = %f > tolerance = %f' % (rel_error, tol))"], {}), "('relative error = %f > tolerance = %f' % (rel_error, tol))\n", (2155, 2214), False, 'import logging\n'), ((2241, 2312), 'logging.info', 'logging.info', (["('relative error = %f < tolerance = %f' % (rel_error, tol))"], {}), "('relative error = %f < tolerance = %f' % (rel_error, tol))\n", (2253, 2312), False, 'import logging\n'), ((932, 983), 'os.path.join', 'os.path.join', (['folder', '"""logistic_generate_and_fit.R"""'], {}), "(folder, 'logistic_generate_and_fit.R')\n", (944, 983), False, 'import os\n'), ((1942, 2007), 'pandas.read_csv', 'pd.read_csv', (["('%s/simulated_logistic_true_parameters.csv' % folder)"], {}), "('%s/simulated_logistic_true_parameters.csv' % folder)\n", (1953, 2007), True, 'import pandas as pd\n'), ((1837, 1871), 'pandas.melt', 'pd.melt', (["results['level']", '"""level"""'], {}), "(results['level'], 'level')\n", (1844, 1871), True, 'import pandas as pd\n')] |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import deformation_utils
_ARRAY_COMPARE_TOLERANCE = 1e-5
class ControlGridTest(tf.test.TestCase):
def test_create_control_grid_for_cubic_interp_2d(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1]),
control_grid_spacings_pix=[9, 9])
self.assertAllEqual([6, 8, 2], grid.eval().shape)
def test_create_control_grid_for_cubic_interp_3d(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[10, 20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1, 0.1]),
control_grid_spacings_pix=[9, 9, 9])
self.assertAllEqual([4, 6, 8, 3], grid.eval().shape)
def test_create_control_grid_for_cubic_interp_3d_single_slice(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[1, 20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1, 0.1]),
control_grid_spacings_pix=[1, 9, 9])
self.assertAllEqual([3, 6, 8, 3], grid.eval().shape)
class Create2DDeformationFieldTest(tf.test.TestCase):
def test_applies_cropping_offset(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([2.0, 3.0]))
expected_output = np.array([[[2, 3], [2, 4], [2, 5]],
[[3, 3], [3, 4], [3, 5]],
[[4, 3], [4, 4], [4, 5]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_rotation(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(np.pi / 4.),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-0.4142135624, 1.],
[0.2928932188, 1.7071067812],
[1., 2.4142135624]],
[[0.2928932188, 0.2928932188],
[1., 1.],
[1.7071067812, 1.7071067812]],
[[1., -0.4142135624],
[1.7071067812, 0.2928932188],
[2.4142135624, 1]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_shear(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.1]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-0.1, 0], [0, 1], [0.1, 2]],
[[0.9, 0], [1, 1], [1.1, 2]],
[[1.9, 0], [2, 1], [2.1, 2]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_mirror(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([-1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[2., 0.], [2., 1.], [2., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[0., 0.], [0., 1.], [0., 2.]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_scale(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([2.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-1., 0.], [-1., 1.], [-1., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[3., 0.], [3., 1.], [3., 2.]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_multiple_transforms_together(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(np.pi / 2.),
scale_factors=tf.constant([1.0, 2.0]),
mirror_factors=tf.constant([1, -1]),
shearing_coefs=tf.constant([0.1, 0.0]),
cropping_offset_pix=tf.constant([3.0, 5.0]))
expected_output = np.array([[[3., 3.9], [4., 4.], [5., 4.1]],
[[3., 5.9], [4., 6.], [5., 6.1]],
[[3., 7.9], [4., 8.], [5., 8.1]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_oddEvenErrorHandling(self):
with tf.Session():
deform = deformation_utils.create_2d_deformation_field(
np.array([101, 101]) / 2,
raw_image_element_size_um=tf.constant([1., 1.]),
net_input_spatial_shape=[50, 101],
net_input_element_size_um=tf.constant([2., 1.]),
control_grid_spacings_pix=[10, 10],
deformations_magnitudes_um=tf.constant((0., 0.)),
rotation_angle=tf.constant(0.),
scale_factors=tf.constant((1., 1.)),
mirror_factors=tf.constant((1., 1.)),
shearing_coefs=tf.constant((0., 0., 0., 0.)),
cropping_offset_pix=tf.constant((0., 0.)))
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
"factor must be odd as input and output size is even"):
deform.eval()
if __name__ == "__main__":
tf.test.main()
| [
"numpy.array",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.constant"
] | [((9498, 9512), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (9510, 9512), True, 'import tensorflow.compat.v1 as tf\n'), ((2778, 2871), 'numpy.array', 'np.array', (['[[[2, 3], [2, 4], [2, 5]], [[3, 3], [3, 4], [3, 5]], [[4, 3], [4, 4], [4, 5]]]'], {}), '([[[2, 3], [2, 4], [2, 5]], [[3, 3], [3, 4], [3, 5]], [[4, 3], [4, \n 4], [4, 5]]])\n', (2786, 2871), True, 'import numpy as np\n'), ((3798, 4045), 'numpy.array', 'np.array', (['[[[-0.4142135624, 1.0], [0.2928932188, 1.7071067812], [1.0, 2.4142135624]],\n [[0.2928932188, 0.2928932188], [1.0, 1.0], [1.7071067812, 1.7071067812]\n ], [[1.0, -0.4142135624], [1.7071067812, 0.2928932188], [2.4142135624, 1]]]'], {}), '([[[-0.4142135624, 1.0], [0.2928932188, 1.7071067812], [1.0, \n 2.4142135624]], [[0.2928932188, 0.2928932188], [1.0, 1.0], [\n 1.7071067812, 1.7071067812]], [[1.0, -0.4142135624], [1.7071067812, \n 0.2928932188], [2.4142135624, 1]]])\n', (3806, 4045), True, 'import numpy as np\n'), ((5145, 5251), 'numpy.array', 'np.array', (['[[[-0.1, 0], [0, 1], [0.1, 2]], [[0.9, 0], [1, 1], [1.1, 2]], [[1.9, 0], [2,\n 1], [2.1, 2]]]'], {}), '([[[-0.1, 0], [0, 1], [0.1, 2]], [[0.9, 0], [1, 1], [1.1, 2]], [[\n 1.9, 0], [2, 1], [2.1, 2]]])\n', (5153, 5251), True, 'import numpy as np\n'), ((6170, 6299), 'numpy.array', 'np.array', (['[[[2.0, 0.0], [2.0, 1.0], [2.0, 2.0]], [[1.0, 0.0], [1.0, 1.0], [1.0, 2.0]],\n [[0.0, 0.0], [0.0, 1.0], [0.0, 2.0]]]'], {}), '([[[2.0, 0.0], [2.0, 1.0], [2.0, 2.0]], [[1.0, 0.0], [1.0, 1.0], [\n 1.0, 2.0]], [[0.0, 0.0], [0.0, 1.0], [0.0, 2.0]]])\n', (6178, 6299), True, 'import numpy as np\n'), ((7198, 7329), 'numpy.array', 'np.array', (['[[[-1.0, 0.0], [-1.0, 1.0], [-1.0, 2.0]], [[1.0, 0.0], [1.0, 1.0], [1.0, \n 2.0]], [[3.0, 0.0], [3.0, 1.0], [3.0, 2.0]]]'], {}), '([[[-1.0, 0.0], [-1.0, 1.0], [-1.0, 2.0]], [[1.0, 0.0], [1.0, 1.0],\n [1.0, 2.0]], [[3.0, 0.0], [3.0, 1.0], [3.0, 2.0]]])\n', (7206, 7329), True, 'import numpy as np\n'), ((8260, 8389), 'numpy.array', 'np.array', (['[[[3.0, 3.9], [4.0, 4.0], [5.0, 4.1]], [[3.0, 5.9], [4.0, 6.0], [5.0, 6.1]],\n [[3.0, 7.9], [4.0, 8.0], [5.0, 8.1]]]'], {}), '([[[3.0, 3.9], [4.0, 4.0], [5.0, 4.1]], [[3.0, 5.9], [4.0, 6.0], [\n 5.0, 6.1]], [[3.0, 7.9], [4.0, 8.0], [5.0, 8.1]]])\n', (8268, 8389), True, 'import numpy as np\n'), ((8661, 8673), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (8671, 8673), True, 'import tensorflow.compat.v1 as tf\n'), ((2233, 2256), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2244, 2256), True, 'import tensorflow.compat.v1 as tf\n'), ((2292, 2315), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2303, 2315), True, 'import tensorflow.compat.v1 as tf\n'), ((2391, 2414), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2402, 2414), True, 'import tensorflow.compat.v1 as tf\n'), ((2497, 2520), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2508, 2520), True, 'import tensorflow.compat.v1 as tf\n'), ((2545, 2561), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (2556, 2561), True, 'import tensorflow.compat.v1 as tf\n'), ((2585, 2608), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2596, 2608), True, 'import tensorflow.compat.v1 as tf\n'), ((2633, 2652), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 1]'], {}), '([1, 1])\n', (2644, 2652), True, 'import tensorflow.compat.v1 as tf\n'), ((2677, 2700), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2688, 2700), True, 'import tensorflow.compat.v1 as tf\n'), ((2730, 2753), 'tensorflow.compat.v1.constant', 'tf.constant', (['[2.0, 3.0]'], {}), '([2.0, 3.0])\n', (2741, 2753), True, 'import tensorflow.compat.v1 as tf\n'), ((3246, 3269), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (3257, 3269), True, 'import tensorflow.compat.v1 as tf\n'), ((3305, 3328), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (3316, 3328), True, 'import tensorflow.compat.v1 as tf\n'), ((3404, 3427), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (3415, 3427), True, 'import tensorflow.compat.v1 as tf\n'), ((3510, 3533), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3521, 3533), True, 'import tensorflow.compat.v1 as tf\n'), ((3558, 3582), 'tensorflow.compat.v1.constant', 'tf.constant', (['(np.pi / 4.0)'], {}), '(np.pi / 4.0)\n', (3569, 3582), True, 'import tensorflow.compat.v1 as tf\n'), ((3605, 3628), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (3616, 3628), True, 'import tensorflow.compat.v1 as tf\n'), ((3653, 3672), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 1]'], {}), '([1, 1])\n', (3664, 3672), True, 'import tensorflow.compat.v1 as tf\n'), ((3697, 3720), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3708, 3720), True, 'import tensorflow.compat.v1 as tf\n'), ((3750, 3773), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3761, 3773), True, 'import tensorflow.compat.v1 as tf\n'), ((4600, 4623), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4611, 4623), True, 'import tensorflow.compat.v1 as tf\n'), ((4659, 4682), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4670, 4682), True, 'import tensorflow.compat.v1 as tf\n'), ((4758, 4781), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4769, 4781), True, 'import tensorflow.compat.v1 as tf\n'), ((4864, 4887), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (4875, 4887), True, 'import tensorflow.compat.v1 as tf\n'), ((4912, 4928), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (4923, 4928), True, 'import tensorflow.compat.v1 as tf\n'), ((4952, 4975), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4963, 4975), True, 'import tensorflow.compat.v1 as tf\n'), ((5000, 5019), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 1]'], {}), '([1, 1])\n', (5011, 5019), True, 'import tensorflow.compat.v1 as tf\n'), ((5044, 5067), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.1]'], {}), '([0.0, 0.1])\n', (5055, 5067), True, 'import tensorflow.compat.v1 as tf\n'), ((5097, 5120), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5108, 5120), True, 'import tensorflow.compat.v1 as tf\n'), ((5624, 5647), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5635, 5647), True, 'import tensorflow.compat.v1 as tf\n'), ((5683, 5706), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5694, 5706), True, 'import tensorflow.compat.v1 as tf\n'), ((5782, 5805), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5793, 5805), True, 'import tensorflow.compat.v1 as tf\n'), ((5888, 5911), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5899, 5911), True, 'import tensorflow.compat.v1 as tf\n'), ((5936, 5952), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (5947, 5952), True, 'import tensorflow.compat.v1 as tf\n'), ((5976, 5999), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5987, 5999), True, 'import tensorflow.compat.v1 as tf\n'), ((6024, 6044), 'tensorflow.compat.v1.constant', 'tf.constant', (['[-1, 1]'], {}), '([-1, 1])\n', (6035, 6044), True, 'import tensorflow.compat.v1 as tf\n'), ((6069, 6092), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (6080, 6092), True, 'import tensorflow.compat.v1 as tf\n'), ((6122, 6145), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (6133, 6145), True, 'import tensorflow.compat.v1 as tf\n'), ((6653, 6676), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (6664, 6676), True, 'import tensorflow.compat.v1 as tf\n'), ((6712, 6735), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (6723, 6735), True, 'import tensorflow.compat.v1 as tf\n'), ((6811, 6834), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (6822, 6834), True, 'import tensorflow.compat.v1 as tf\n'), ((6917, 6940), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (6928, 6940), True, 'import tensorflow.compat.v1 as tf\n'), ((6965, 6981), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (6976, 6981), True, 'import tensorflow.compat.v1 as tf\n'), ((7005, 7028), 'tensorflow.compat.v1.constant', 'tf.constant', (['[2.0, 1.0]'], {}), '([2.0, 1.0])\n', (7016, 7028), True, 'import tensorflow.compat.v1 as tf\n'), ((7053, 7072), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 1]'], {}), '([1, 1])\n', (7064, 7072), True, 'import tensorflow.compat.v1 as tf\n'), ((7097, 7120), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (7108, 7120), True, 'import tensorflow.compat.v1 as tf\n'), ((7150, 7173), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (7161, 7173), True, 'import tensorflow.compat.v1 as tf\n'), ((7707, 7730), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (7718, 7730), True, 'import tensorflow.compat.v1 as tf\n'), ((7766, 7789), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (7777, 7789), True, 'import tensorflow.compat.v1 as tf\n'), ((7865, 7888), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (7876, 7888), True, 'import tensorflow.compat.v1 as tf\n'), ((7971, 7994), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (7982, 7994), True, 'import tensorflow.compat.v1 as tf\n'), ((8019, 8043), 'tensorflow.compat.v1.constant', 'tf.constant', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (8030, 8043), True, 'import tensorflow.compat.v1 as tf\n'), ((8066, 8089), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (8077, 8089), True, 'import tensorflow.compat.v1 as tf\n'), ((8114, 8134), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, -1]'], {}), '([1, -1])\n', (8125, 8134), True, 'import tensorflow.compat.v1 as tf\n'), ((8159, 8182), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.1, 0.0]'], {}), '([0.1, 0.0])\n', (8170, 8182), True, 'import tensorflow.compat.v1 as tf\n'), ((8212, 8235), 'tensorflow.compat.v1.constant', 'tf.constant', (['[3.0, 5.0]'], {}), '([3.0, 5.0])\n', (8223, 8235), True, 'import tensorflow.compat.v1 as tf\n'), ((1139, 1162), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (1150, 1162), True, 'import tensorflow.compat.v1 as tf\n'), ((1505, 1533), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (1516, 1533), True, 'import tensorflow.compat.v1 as tf\n'), ((1894, 1922), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (1905, 1922), True, 'import tensorflow.compat.v1 as tf\n'), ((8747, 8767), 'numpy.array', 'np.array', (['[101, 101]'], {}), '([101, 101])\n', (8755, 8767), True, 'import numpy as np\n'), ((8809, 8832), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (8820, 8832), True, 'import tensorflow.compat.v1 as tf\n'), ((8913, 8936), 'tensorflow.compat.v1.constant', 'tf.constant', (['[2.0, 1.0]'], {}), '([2.0, 1.0])\n', (8924, 8936), True, 'import tensorflow.compat.v1 as tf\n'), ((9019, 9042), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (9030, 9042), True, 'import tensorflow.compat.v1 as tf\n'), ((9067, 9083), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (9078, 9083), True, 'import tensorflow.compat.v1 as tf\n'), ((9108, 9131), 'tensorflow.compat.v1.constant', 'tf.constant', (['(1.0, 1.0)'], {}), '((1.0, 1.0))\n', (9119, 9131), True, 'import tensorflow.compat.v1 as tf\n'), ((9156, 9179), 'tensorflow.compat.v1.constant', 'tf.constant', (['(1.0, 1.0)'], {}), '((1.0, 1.0))\n', (9167, 9179), True, 'import tensorflow.compat.v1 as tf\n'), ((9204, 9237), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0))\n', (9215, 9237), True, 'import tensorflow.compat.v1 as tf\n'), ((9265, 9288), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (9276, 9288), True, 'import tensorflow.compat.v1 as tf\n')] |
from __future__ import division, print_function
import numpy as np
def rect_to_points(r):
return (r[0],r[1]), (r[0]+r[2], r[1]+r[3])
def points_to_rect(p1,p2):
return (p1[0],p1[1], p2[0]-p1[0], p2[1]-p1[1])
def union_rect(r1, r2):
(a1,b1),(c1,d1) = rect_to_points(r1)
(a2,b2),(c2,d2) = rect_to_points(r2)
a = min(a1,a2)
b = min(b1,b2)
c = max(c1,c2)
d = max(d1,d2)
return points_to_rect((a,b),(c,d))
def intersection_rect(r1,r2, return_none=False):
# a1 c1
# x----x
# . .
# case1) x----x .
# c2 .
# .
# case2) x----x
# a2
(a1,b1),(c1,d1) = rect_to_points(r1)
(a2,b2),(c2,d2) = rect_to_points(r2)
if a1>c2 or c1<a2 or b1>d2 or d1<b2 :
# no intersection
if return_none:
return None
else:
return (0,0,0,0)
else:
a = max(a1,a2)
b = max(b1,b2)
c = min(c1,c2)
d = min(d1,d2)
return points_to_rect((a,b),(c,d))
def area_rect(r):
return r[2]*r[3]
def iou_rect(r1,r2):
ai = area_rect(intersection_rect(r1,r2))
return ai / (area_rect(r1)+area_rect(r2)-ai)
''' line segment intersection
ref: https://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
'''
def points_to_ppr(p1, p2):
'''
points to p, r vector
[in]
p1 - start point of the line [ array like ]
p2 - end point of the line [ array like ]
[out]
p - p1 [numpy array]
r - vector (p1-->p2) [numpy array]
'''
return np.array(p1), np.array(p2)-np.array(p1)
def intersect_line_segments(p1, p2, p3, p4):
'''
check two line segments have intersection or not
[in]
p1 - start point of line1
p2 - end point of line1
p3 - start point of line2
p4 - end point of line2
[out]
r - False : no intersection
- True : intersect
'''
p,r = points_to_ppr(p1, p2)
q,s = points_to_ppr(p3, p4)
# prepare data
#
crs = np.cross(r,s)
cqmpr = np.cross(q-p,r)
# print('r x s =', crs)
# print('(q-p) x r =', cqmpr)
if crs==0.: # collinear or parallel
t,u = 10.0, 10.0 # meaning less value
else:
t = np.cross(q-p,s)/crs
u = np.cross(q-p,r)/crs
#
# decision
if crs==0. and cqmpr==0.:
''' collinear '''
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = np.dot(q+s-p,r)/np.dot(r,r)
print('collinear', t0, t1)
# t1 = t0 + np.dot(s,r)/np.dot(r,r)
# print('t0 =', t0)
# print('t1 =', t0)
if max(t0,t1)<0.0 or 1.0<min(t0,t1):
pass
else:
# if 0.0<=t0<=1.0 or 0.0<=t1<=1.0:
''' collinear and intersecting '''
print('collinear and intersect ...', t0, t1)
return True
elif crs==0. and cqmpr!=0.:
''' parallel and not intersecting '''
return False
elif crs!=0. and 0.<=t<=1. and 0.<=u<=1.:
''' not parallel and intersecting '''
return True
else:
''' not intersecting '''
pass
return False
def intersection_point(p1, p2, p3, p4):
'''
get intersection point of two lines
[in]
p1 - start point of line1
p2 - end point of line1
p3 - start point of line2
p4 - end point of line2
[out]
p - point or None
ref
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
'''
c12 = p1[0]*p2[1] - p2[0]*p1[1]
c34 = p3[0]*p4[1] - p4[0]*p3[1]
deno = (p1[0]-p2[0])*(p3[1]-p4[1]) - (p1[1]-p2[1])*(p3[0]-p4[0])
if deno==0:
return None
else:
return np.array([
c12*(p3[0]-p4[0])-c34*(p1[0]-p2[0]),
c12*(p3[1]-p4[1])-c34*(p1[1]-p2[1])])/deno
def intersection_tu(p1, p2, p3, p4):
'''
get intersection t, u of two lines
[in]
p1 - start point of line1
p2 - end point of line1
p3 - start point of line2
p4 - end point of line2
[out]
t - parameter
u - parameter
ref
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
'''
deno = (p1[0]-p2[0])*(p3[1]-p4[1]) - (p1[1]-p2[1])*(p3[0]-p4[0])
if deno==0:
return None, None
else:
return np.array([
(p1[0]-p3[0])*(p3[1]-p4[1])-(p1[1]-p3[1])*(p3[0]-p4[0]),
-(p1[0]-p2[0])*(p1[1]-p3[1])+(p1[1]-p2[1])*(p1[0]-p3[0])])/deno
if __name__ == '__main__':
if False:
import numpy as np
import cv2
import u_opencv as ucv
rects = [
( (10,10,200,200), (90,90,200,200) ),
( (90,90,200,200), (10,10,200,200) ),
( (90,10,200,200), (10,90,200,200) ),
( (10,90,200,200), (90,10,200,200) ),
( (100,100,200,50), (200,50,50,200) ),
( (200,50,50,200), (100,100,200,50) ),
( (100,100,200,200), (150,150,50,50) ),
( (150,150,50,50), (100,100,200,200) ),
( (100,100,50,50), (300,300,50,50) ),
( (300,300,50,50), (100,100,50,50) ),
]
for i, (r1,r2) in enumerate(rects):
print('rectangles',i)
print(' area 1:', area_rect(r1))
print(' area 2:', area_rect(r2))
print(' area int:', area_rect(intersection_rect(r1,r2)))
print(' area iou:', iou_rect(r1,r2))
img = np.zeros((512,512,3),dtype=np.uint8)
ucv.rectangle( img, r1, ucv.color('darkblue'),thickness=3)
ucv.rectangle( img, r2, ucv.color('darkgreen'),thickness=3)
ucv.rectangle( img, intersection_rect(r1,r2), ucv.color('magenta'))
ucv.rectangle( img, union_rect(r1,r2), ucv.color('cyan'))
cv2.imshow( 'test', img)
cv2.waitKey(0)
if True:
ps = np.array([
[ [0,0], [1,0] ],
[ [1,1], [2,1] ],
[ [2,2], [3,2] ],
[ [3,3], [4,3] ] ])
print('//', intersect_line_segments( ps[0,0], ps[2,0], ps[0,1], ps[2,1]) )
print()
print('x ', intersect_line_segments( ps[0,0], ps[2,1], ps[0,1], ps[2,0]) )
print()
print('./', intersect_line_segments( ps[0,0], ps[1,0], ps[0,1], ps[2,1]) )
print()
print('/.', intersect_line_segments( ps[0,0], ps[2,0], ps[0,1], ps[1,1]) )
print()
print('/<', intersect_line_segments( ps[0,0], ps[2,0], ps[0,1], ps[1,0]) )
print()
print('V ', intersect_line_segments( ps[0,1], ps[2,0], ps[0,1], ps[2,1]) )
print()
print('^ ', intersect_line_segments( ps[0,0], ps[2,0], ps[0,1], ps[2,0]) )
print()
print('/-', intersect_line_segments( ps[0,0], ps[2,0], ps[1,0], ps[1,1]) )
print()
print('-/', intersect_line_segments( ps[1,0], ps[1,1], ps[2,1], ps[0,1]) )
print()
print('= ', intersect_line_segments( ps[0,0], ps[0,1], ps[2,0], ps[2,1]) )
print()
print(': ', intersect_line_segments( ps[0,0], ps[1,0], ps[1,0], ps[3,0]) )
print()
print(': ', intersect_line_segments( ps[0,0], ps[2,0], ps[2,0], ps[3,0]) )
print()
print(': ', intersect_line_segments( ps[0,0], ps[2,0], ps[1,0], ps[2,0]) )
print()
print('/_', intersect_line_segments( ps[0,0], ps[2,0], ps[0,0], ps[0,1]) )
print()
# print('error generated')
# print('/_', intersect_line_segments( ps[0,0], ps[0,0], ps[0,0], ps[0,1]) )
# print()
''' intersection_point '''
p1 = [-1, 0]
p2 = [ 1, 0]
p3 = [ 0,-1]
p4 = [ 0, 1]
pc = [ 0, 0]
assert np.allclose(pc, intersection_point(p1,p2,p3,p4))
p1 = [-1, 1]
p2 = [ 1, 1]
p3 = [ 0,-2]
p4 = [ 0, 2]
pc = [ 0, 1]
assert np.allclose(pc, intersection_point(p1,p2,p3,p4))
dx=2.5; dy=1.2;
pc = [ 1, 1]
p1 = [pc[0]-dx,pc[1]-dy]
p2 = [pc[0]+dx,pc[1]+dy]
p3 = [pc[0]-dx,pc[1]+dy]
p4 = [pc[0]+dx,pc[1]-dy]
try:
assert np.allclose(pc, intersection_point(p1,p2,p3,p4))
except Exception as e:
print('pc', pc)
print('intersection point', intersection_point(p1,p2,p3,p4))
raise(e)
dx=2.5; dy=1.2;
pc = [ 1, 1]
p1 = [pc[0]-dx,pc[1]-dy]
p2 = [pc[0]+dx,pc[1]+dy]
p3 = [pc[0]-dx,pc[1]-dy]
p4 = [pc[0]+dx,pc[1]+dy]
try:
assert intersection_point(p1,p2,p3,p4) is None
except Exception as e:
print('pc', pc)
print('intersection point', intersection_point(p1,p2,p3,p4))
raise(e)
| [
"u_opencv.color",
"numpy.cross",
"cv2.imshow",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"cv2.waitKey"
] | [((2264, 2278), 'numpy.cross', 'np.cross', (['r', 's'], {}), '(r, s)\n', (2272, 2278), True, 'import numpy as np\n'), ((2290, 2308), 'numpy.cross', 'np.cross', (['(q - p)', 'r'], {}), '(q - p, r)\n', (2298, 2308), True, 'import numpy as np\n'), ((1786, 1798), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (1794, 1798), True, 'import numpy as np\n'), ((6132, 6218), 'numpy.array', 'np.array', (['[[[0, 0], [1, 0]], [[1, 1], [2, 1]], [[2, 2], [3, 2]], [[3, 3], [4, 3]]]'], {}), '([[[0, 0], [1, 0]], [[1, 1], [2, 1]], [[2, 2], [3, 2]], [[3, 3], [4,\n 3]]])\n', (6140, 6218), True, 'import numpy as np\n'), ((1800, 1812), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (1808, 1812), True, 'import numpy as np\n'), ((1813, 1825), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (1821, 1825), True, 'import numpy as np\n'), ((2503, 2521), 'numpy.cross', 'np.cross', (['(q - p)', 's'], {}), '(q - p, s)\n', (2511, 2521), True, 'import numpy as np\n'), ((2535, 2553), 'numpy.cross', 'np.cross', (['(q - p)', 'r'], {}), '(q - p, r)\n', (2543, 2553), True, 'import numpy as np\n'), ((2645, 2661), 'numpy.dot', 'np.dot', (['(q - p)', 'r'], {}), '(q - p, r)\n', (2651, 2661), True, 'import numpy as np\n'), ((2659, 2671), 'numpy.dot', 'np.dot', (['r', 'r'], {}), '(r, r)\n', (2665, 2671), True, 'import numpy as np\n'), ((2684, 2704), 'numpy.dot', 'np.dot', (['(q + s - p)', 'r'], {}), '(q + s - p, r)\n', (2690, 2704), True, 'import numpy as np\n'), ((2700, 2712), 'numpy.dot', 'np.dot', (['r', 'r'], {}), '(r, r)\n', (2706, 2712), True, 'import numpy as np\n'), ((3952, 4061), 'numpy.array', 'np.array', (['[c12 * (p3[0] - p4[0]) - c34 * (p1[0] - p2[0]), c12 * (p3[1] - p4[1]) - c34 *\n (p1[1] - p2[1])]'], {}), '([c12 * (p3[0] - p4[0]) - c34 * (p1[0] - p2[0]), c12 * (p3[1] - p4[\n 1]) - c34 * (p1[1] - p2[1])])\n', (3960, 4061), True, 'import numpy as np\n'), ((4581, 4743), 'numpy.array', 'np.array', (['[(p1[0] - p3[0]) * (p3[1] - p4[1]) - (p1[1] - p3[1]) * (p3[0] - p4[0]), -(\n p1[0] - p2[0]) * (p1[1] - p3[1]) + (p1[1] - p2[1]) * (p1[0] - p3[0])]'], {}), '([(p1[0] - p3[0]) * (p3[1] - p4[1]) - (p1[1] - p3[1]) * (p3[0] - p4\n [0]), -(p1[0] - p2[0]) * (p1[1] - p3[1]) + (p1[1] - p2[1]) * (p1[0] -\n p3[0])])\n', (4589, 4743), True, 'import numpy as np\n'), ((5706, 5745), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)'], {'dtype': 'np.uint8'}), '((512, 512, 3), dtype=np.uint8)\n', (5714, 5745), True, 'import numpy as np\n'), ((6050, 6073), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'img'], {}), "('test', img)\n", (6060, 6073), False, 'import cv2\n'), ((6088, 6102), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6099, 6102), False, 'import cv2\n'), ((5780, 5801), 'u_opencv.color', 'ucv.color', (['"""darkblue"""'], {}), "('darkblue')\n", (5789, 5801), True, 'import u_opencv as ucv\n'), ((5851, 5873), 'u_opencv.color', 'ucv.color', (['"""darkgreen"""'], {}), "('darkgreen')\n", (5860, 5873), True, 'import u_opencv as ucv\n'), ((5945, 5965), 'u_opencv.color', 'ucv.color', (['"""magenta"""'], {}), "('magenta')\n", (5954, 5965), True, 'import u_opencv as ucv\n'), ((6018, 6035), 'u_opencv.color', 'ucv.color', (['"""cyan"""'], {}), "('cyan')\n", (6027, 6035), True, 'import u_opencv as ucv\n')] |
# Important Imports
import numpy as np
from PIL import Image
from scipy.signal import find_peaks
# image = PIL.Image, n = Number of Segments
# ignoreBottomTop = Segmentation of top and bottom of Image
# axis = 0 (for vertical-lines) or 1 (for horizontal-lines)
# Returns a gray image, PIL Image.
def recursiveXYCut(image, n, ignoreBottomTop = True, axis = 1):
image = image.convert('L')
image_arr = np.asarray(image)
# distance for peaks
distance = image_arr.shape[0 if axis == 1 else 1]/n
# Sum the pixels along given axis
sum_vals = image_arr.sum(axis = axis)
# Get the indices of the peaks
peaks, _ = find_peaks(sum_vals, distance=distance)
# Temp variable to create segment lines i.e. 0 out the required values.
temp = np.ones(image_arr.shape)
# Skip top and bottom segmentation or not (depends on the param)
#for peak in peaks[1:-1 if ignoreBottomTop else ]:
for peak in peaks[1:-1] if ignoreBottomTop else peaks:
if axis == 1:
temp[range(peak-2, peak+2)] = 0
else:
temp[:, range(peak-2, peak+2)] = 0
return Image.fromarray(np.uint8(image_arr * temp))
| [
"numpy.uint8",
"numpy.asarray",
"numpy.ones",
"scipy.signal.find_peaks"
] | [((408, 425), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (418, 425), True, 'import numpy as np\n'), ((637, 676), 'scipy.signal.find_peaks', 'find_peaks', (['sum_vals'], {'distance': 'distance'}), '(sum_vals, distance=distance)\n', (647, 676), False, 'from scipy.signal import find_peaks\n'), ((764, 788), 'numpy.ones', 'np.ones', (['image_arr.shape'], {}), '(image_arr.shape)\n', (771, 788), True, 'import numpy as np\n'), ((1126, 1152), 'numpy.uint8', 'np.uint8', (['(image_arr * temp)'], {}), '(image_arr * temp)\n', (1134, 1152), True, 'import numpy as np\n')] |
import numpy as np
def mean(values, ignore_zeros=False):
used_values = [x for x in values if not ignore_zeros or (ignore_zeros and x != 0)]
return np.mean(used_values) if len(used_values) else 0
| [
"numpy.mean"
] | [((157, 177), 'numpy.mean', 'np.mean', (['used_values'], {}), '(used_values)\n', (164, 177), True, 'import numpy as np\n')] |
import sys
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from PIL import Image
import model.components.attention_mechanism
from model.base import BaseModel
from model.decoder import Decoder
from model.encoder import Encoder
from model.evaluation.text import score_files, truncate_end, write_answers
from model.utils.general import Config, Progbar, minibatches
from model.utils.image import pad_batch_images
from model.utils.text import pad_batch_formulas
class Img2SeqModel(BaseModel):
"""Specialized class for Img2Seq Model"""
def __init__(self, config, dir_output, vocab):
"""
Args:
config: Config instance defining hyperparams
vocab: Vocab instance defining useful vocab objects like tok_to_id
"""
super(Img2SeqModel, self).__init__(config, dir_output)
model.components.attention_mechanism.ctx_vector = []
self._vocab = vocab
def build_train(self, config):
"""Builds model"""
self.logger.info("Building model...")
self.encoder = Encoder(self._config)
self.decoder = Decoder(self._config, self._vocab.n_tok, self._vocab.id_end)
self._add_placeholders_op()
self._add_pred_op()
self._add_loss_op()
self._add_train_op(config.lr_method, self.lr, self.loss, config.clip)
self.init_session()
self.logger.info("- done.")
def build_pred(self):
self.logger.info("Building model...")
self.encoder = Encoder(self._config)
self.decoder = Decoder(self._config, self._vocab.n_tok, self._vocab.id_end)
self._add_placeholders_op()
self._add_pred_op()
self._add_loss_op()
self.init_session()
self.logger.info("- done.")
def _add_placeholders_op(self):
"""
Add placeholder attributes
"""
# hyper params
self.lr = tf.placeholder(tf.float32, shape=(), name='lr')
self.dropout = tf.placeholder(tf.float32, shape=(), name='dropout')
self.training = tf.placeholder(tf.bool, shape=(), name="training")
# input of the graph
self.img = tf.placeholder(tf.uint8, shape=(None, None, None, 1), name='img')
self.formula = tf.placeholder(tf.int32, shape=(None, None), name='formula')
self.formula_length = tf.placeholder(tf.int32, shape=(None, ), name='formula_length')
# tensorboard
tf.summary.scalar("learning_rate", self.lr)
tf.summary.scalar("dropout", self.dropout)
tf.summary.image("img", self.img)
def _get_feed_dict(self, img, training, formula=None, lr=None, dropout=1):
"""Returns a dict"""
img = pad_batch_images(img)
fd = {
self.img: img,
self.dropout: dropout,
self.training: training,
}
if formula is not None:
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
# print img.shape, formula.shape
fd[self.formula] = formula
fd[self.formula_length] = formula_length
if lr is not None:
fd[self.lr] = lr
return fd
def _add_pred_op(self):
"""Defines self.pred"""
encoded_img = self.encoder(self.training, self.img, self.dropout)
train, test = self.decoder(self.training, encoded_img, self.formula, self.dropout)
self.pred_train = train
self.pred_test = test
def _add_loss_op(self):
"""Defines self.loss"""
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.pred_train, labels=self.formula)
mask = tf.sequence_mask(self.formula_length)
losses = tf.boolean_mask(losses, mask)
# loss for training
self.loss = tf.reduce_mean(losses)
# # to compute perplexity for test
self.ce_words = tf.reduce_sum(losses) # sum of CE for each word
self.n_words = tf.reduce_sum(self.formula_length) # number of words
# for tensorboard
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("sum_of_CE_for_each_word", self.ce_words)
tf.summary.scalar("number_of_words", self.n_words)
def _run_epoch(self, config, train_set, val_set, epoch, lr_schedule):
"""Performs an epoch of training
Args:
config: Config instance
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest
score
"""
# logging
batch_size = config.batch_size
nbatches = (len(train_set) + batch_size - 1) // batch_size
prog = Progbar(nbatches)
# iterate over dataset
for i, (img, formula) in enumerate(minibatches(train_set, batch_size)):
# get feed dict
fd = self._get_feed_dict(img, training=True, formula=formula, lr=lr_schedule.lr, dropout=config.dropout)
# update step
_, loss_eval = self.sess.run([self.train_op, self.loss], feed_dict=fd)
prog.update(i + 1, [("loss", loss_eval), ("perplexity", np.exp(loss_eval)), ("lr", lr_schedule.lr)])
# update learning rate
lr_schedule.update(batch_no=epoch*nbatches + i)
# 生成summary
summary_str = self.sess.run(self.merged, feed_dict=fd)
self.file_writer.add_summary(summary_str, epoch) # 将summary 写入文件
# if (i+1) % 100 == 0:
# # 太慢了,读了 100 批次后就保存先,保存的权重要用于调试 attention
# self.save_debug_session(epoch, i)
# logging
self.logger.info("- Training: {}".format(prog.info))
# evaluation
config_eval = Config({"dir_answers": self._dir_output + "formulas_val/", "batch_size": config.batch_size})
scores = self.evaluate(config_eval, val_set)
score = scores[config.metric_val]
lr_schedule.update(score=score)
return score
def write_prediction(self, config, test_set):
"""Performs an epoch of evaluation
Args:
config: (Config) with batch_size and dir_answers
test_set:(Dataset) instance
Returns:
files: (list) of path to files
perp: (float) perplexity on test set
"""
# initialize containers of references and predictions
if self._config.decoding == "greedy":
refs, hyps = [], [[]]
elif self._config.decoding == "beam_search":
refs, hyps = [], [[] for i in range(self._config.beam_size)]
# iterate over the dataset
n_words, ce_words = 0, 0 # sum of ce for all words + nb of words
for img, formula in minibatches(test_set, config.batch_size):
fd = self._get_feed_dict(img, training=False, formula=formula, dropout=1)
ce_words_eval, n_words_eval, ids_eval = self.sess.run([self.ce_words, self.n_words, self.pred_test.ids], feed_dict=fd)
# TODO(guillaume): move this logic into tf graph
if self._config.decoding == "greedy":
ids_eval = np.expand_dims(ids_eval, axis=1)
elif self._config.decoding == "beam_search":
ids_eval = np.transpose(ids_eval, [0, 2, 1])
# print("---------------------------------------------------------------after decoding :")
# print(ids_eval)
n_words += n_words_eval
ce_words += ce_words_eval
# print("---------------------------------------------------------------formula and prediction :")
for form, preds in zip(formula, ids_eval):
refs.append(form)
# print(form, " ---------- ", preds[0])
for i, pred in enumerate(preds):
hyps[i].append(pred)
files = write_answers(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self._vocab.id_end)
perp = - np.exp(ce_words / float(n_words))
return files, perp
def _run_evaluate(self, config, test_set):
"""Performs an epoch of evaluation
Args:
test_set: Dataset instance
params: (dict) with extra params in it
- "dir_name": (string)
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
files, perp = self.write_prediction(config, test_set)
scores = score_files(files[0], files[1])
scores["perplexity"] = perp
return scores
def predict_batch(self, images):
if self._config.decoding == "greedy":
hyps = [[]]
elif self._config.decoding == "beam_search":
hyps = [[] for i in range(self._config.beam_size)]
fd = self._get_feed_dict(images, training=False, dropout=1)
ids_eval, = self.sess.run([self.pred_test.ids], feed_dict=fd)
if self._config.decoding == "greedy":
ids_eval = np.expand_dims(ids_eval, axis=1)
elif self._config.decoding == "beam_search":
ids_eval = np.transpose(ids_eval, [0, 2, 1])
for preds in ids_eval:
for i, pred in enumerate(preds):
p = truncate_end(pred, self._vocab.id_end)
p = " ".join([self._vocab.id_to_tok[idx] for idx in p])
hyps[i].append(p)
return hyps
def predict(self, img):
preds = self.predict_batch([img])
preds_ = []
# extract only one element (no batch)
for hyp in preds:
preds_.append(hyp[0])
return preds_
def predict_vis(self, mode_name='test', batch_size=1, visualize=True):
"""predict with visualize attention
Args:
mode_name: ('train', 'test', 'validate')
batch_size: (int) 批次大小,visualize==True 的时候批次大小必须为 1
visualize: (bool) 是否可视化预测的过程
Returns:
inp_seqs: (list) of latex string
"""
# if visualize:
# assert (batch_size == 1), "Batch size should be 1 for visualize mode"
# import random
# # f = np.load('train_list_buckets.npy').tolist()
# f = np.load(mode_name+'_buckets.npy').tolist()
# random_key = random.choice(f.keys())
# #random_key = (160,40)
# f = f[random_key]
# imgs = []
# print("Image shape: ", random_key)
# while len(imgs) != batch_size:
# start = np.random.randint(0, len(f), 1)[0]
# if os.path.exists('./images_processed/'+f[start][0]):
# imgs.append(np.asarray(Image.open('./images_processed/'+f[start][0]).convert('YCbCr'))[:, :, 0][:, :, None])
# imgs = np.asarray(imgs, dtype=np.float32).transpose(0, 3, 1, 2)
# inp_seqs = np.zeros((batch_size, 160)).astype('int32')
# print(imgs.shape)
# inp_seqs[:, 0] = np.load('properties.npy').tolist()['char_to_idx']['#START']
# ctx_vector = []
# l_size = random_key[0]*2
# r_size = random_key[1]*2
# inp_image = Image.fromarray(imgs[0][0]).resize((l_size, r_size))
# l = int(np.ceil(random_key[1]/8.))
# r = int(np.ceil(random_key[0]/8.))
# properties = np.load('properties.npy').tolist()
# def idx_to_chars(Y): return ' '.join(map(lambda x: properties['idx_to_char'][x], Y))
# for i in range(1, 160):
# inp_seqs[:, i] = self.sess.run(predictions, feed_dict={X: imgs, input_seqs: inp_seqs[:, :i]})
# # print i,inp_seqs[:,i]
# if visualize == True:
# att = sorted(list(enumerate(ctx_vector[-1].flatten())), key=lambda tup: tup[1], reverse=True)
# idxs, att = zip(*att)
# j = 1
# while sum(att[:j]) < 0.9:
# j += 1
# positions = idxs[:j]
# print("Attention weights: ", att[:j])
# positions = [(pos/r, pos % r) for pos in positions]
# outarray = np.ones((l, r))*255.
# for loc in positions:
# outarray[loc] = 0.
# out_image = Image.fromarray(outarray).resize((l_size, r_size), Image.NEAREST)
# print("Latex sequence: ", idx_to_chars(inp_seqs[0, :i]))
# outp = Image.blend(inp_image.convert('RGBA'), out_image.convert('RGBA'), 0.5)
# outp.show(title=properties['idx_to_char'][inp_seqs[0, i]])
# # raw_input()
# time.sleep(3)
# os.system('pkill display')
# np.save('pred_imgs', imgs)
# np.save('pred_latex', inp_seqs)
# print("Saved npy files! Use Predict.ipynb to view results")
# return inp_seqs
| [
"model.evaluation.text.write_answers",
"tensorflow.boolean_mask",
"model.decoder.Decoder",
"tensorflow.reduce_sum",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensorflow.summary.image",
"model.utils.text.pad_batch_formulas",
"tensorflow.placeholder",
"nump... | [((1145, 1166), 'model.encoder.Encoder', 'Encoder', (['self._config'], {}), '(self._config)\n', (1152, 1166), False, 'from model.encoder import Encoder\n'), ((1191, 1251), 'model.decoder.Decoder', 'Decoder', (['self._config', 'self._vocab.n_tok', 'self._vocab.id_end'], {}), '(self._config, self._vocab.n_tok, self._vocab.id_end)\n', (1198, 1251), False, 'from model.decoder import Decoder\n'), ((1600, 1621), 'model.encoder.Encoder', 'Encoder', (['self._config'], {}), '(self._config)\n', (1607, 1621), False, 'from model.encoder import Encoder\n'), ((1646, 1706), 'model.decoder.Decoder', 'Decoder', (['self._config', 'self._vocab.n_tok', 'self._vocab.id_end'], {}), '(self._config, self._vocab.n_tok, self._vocab.id_end)\n', (1653, 1706), False, 'from model.decoder import Decoder\n'), ((2018, 2065), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()', 'name': '"""lr"""'}), "(tf.float32, shape=(), name='lr')\n", (2032, 2065), True, 'import tensorflow as tf\n'), ((2090, 2142), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()', 'name': '"""dropout"""'}), "(tf.float32, shape=(), name='dropout')\n", (2104, 2142), True, 'import tensorflow as tf\n'), ((2170, 2220), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()', 'name': '"""training"""'}), "(tf.bool, shape=(), name='training')\n", (2184, 2220), True, 'import tensorflow as tf\n'), ((2274, 2339), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8'], {'shape': '(None, None, None, 1)', 'name': '"""img"""'}), "(tf.uint8, shape=(None, None, None, 1), name='img')\n", (2288, 2339), True, 'import tensorflow as tf\n'), ((2365, 2425), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, None)', 'name': '"""formula"""'}), "(tf.int32, shape=(None, None), name='formula')\n", (2379, 2425), True, 'import tensorflow as tf\n'), ((2458, 2520), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)', 'name': '"""formula_length"""'}), "(tf.int32, shape=(None,), name='formula_length')\n", (2472, 2520), True, 'import tensorflow as tf\n'), ((2558, 2601), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.lr'], {}), "('learning_rate', self.lr)\n", (2575, 2601), True, 'import tensorflow as tf\n'), ((2611, 2653), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""dropout"""', 'self.dropout'], {}), "('dropout', self.dropout)\n", (2628, 2653), True, 'import tensorflow as tf\n'), ((2663, 2696), 'tensorflow.summary.image', 'tf.summary.image', (['"""img"""', 'self.img'], {}), "('img', self.img)\n", (2679, 2696), True, 'import tensorflow as tf\n'), ((2824, 2845), 'model.utils.image.pad_batch_images', 'pad_batch_images', (['img'], {}), '(img)\n', (2840, 2845), False, 'from model.utils.image import pad_batch_images\n'), ((3717, 3812), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.pred_train', 'labels': 'self.formula'}), '(logits=self.pred_train,\n labels=self.formula)\n', (3763, 3812), True, 'import tensorflow as tf\n'), ((3827, 3864), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['self.formula_length'], {}), '(self.formula_length)\n', (3843, 3864), True, 'import tensorflow as tf\n'), ((3883, 3912), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['losses', 'mask'], {}), '(losses, mask)\n', (3898, 3912), True, 'import tensorflow as tf\n'), ((3965, 3987), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (3979, 3987), True, 'import tensorflow as tf\n'), ((4059, 4080), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['losses'], {}), '(losses)\n', (4072, 4080), True, 'import tensorflow as tf\n'), ((4132, 4166), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.formula_length'], {}), '(self.formula_length)\n', (4145, 4166), True, 'import tensorflow as tf\n'), ((4224, 4260), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (4241, 4260), True, 'import tensorflow as tf\n'), ((4270, 4329), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""sum_of_CE_for_each_word"""', 'self.ce_words'], {}), "('sum_of_CE_for_each_word', self.ce_words)\n", (4287, 4329), True, 'import tensorflow as tf\n'), ((4339, 4389), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""number_of_words"""', 'self.n_words'], {}), "('number_of_words', self.n_words)\n", (4356, 4389), True, 'import tensorflow as tf\n'), ((5059, 5076), 'model.utils.general.Progbar', 'Progbar', (['nbatches'], {}), '(nbatches)\n', (5066, 5076), False, 'from model.utils.general import Config, Progbar, minibatches\n'), ((6121, 6217), 'model.utils.general.Config', 'Config', (["{'dir_answers': self._dir_output + 'formulas_val/', 'batch_size': config.\n batch_size}"], {}), "({'dir_answers': self._dir_output + 'formulas_val/', 'batch_size':\n config.batch_size})\n", (6127, 6217), False, 'from model.utils.general import Config, Progbar, minibatches\n'), ((7137, 7177), 'model.utils.general.minibatches', 'minibatches', (['test_set', 'config.batch_size'], {}), '(test_set, config.batch_size)\n', (7148, 7177), False, 'from model.utils.general import Config, Progbar, minibatches\n'), ((8282, 8375), 'model.evaluation.text.write_answers', 'write_answers', (['refs', 'hyps', 'self._vocab.id_to_tok', 'config.dir_answers', 'self._vocab.id_end'], {}), '(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self.\n _vocab.id_end)\n', (8295, 8375), False, 'from model.evaluation.text import score_files, truncate_end, write_answers\n'), ((8876, 8907), 'model.evaluation.text.score_files', 'score_files', (['files[0]', 'files[1]'], {}), '(files[0], files[1])\n', (8887, 8907), False, 'from model.evaluation.text import score_files, truncate_end, write_answers\n'), ((3051, 3118), 'model.utils.text.pad_batch_formulas', 'pad_batch_formulas', (['formula', 'self._vocab.id_pad', 'self._vocab.id_end'], {}), '(formula, self._vocab.id_pad, self._vocab.id_end)\n', (3069, 3118), False, 'from model.utils.text import pad_batch_formulas\n'), ((5155, 5189), 'model.utils.general.minibatches', 'minibatches', (['train_set', 'batch_size'], {}), '(train_set, batch_size)\n', (5166, 5189), False, 'from model.utils.general import Config, Progbar, minibatches\n'), ((9415, 9447), 'numpy.expand_dims', 'np.expand_dims', (['ids_eval'], {'axis': '(1)'}), '(ids_eval, axis=1)\n', (9429, 9447), True, 'import numpy as np\n'), ((7539, 7571), 'numpy.expand_dims', 'np.expand_dims', (['ids_eval'], {'axis': '(1)'}), '(ids_eval, axis=1)\n', (7553, 7571), True, 'import numpy as np\n'), ((9528, 9561), 'numpy.transpose', 'np.transpose', (['ids_eval', '[0, 2, 1]'], {}), '(ids_eval, [0, 2, 1])\n', (9540, 9561), True, 'import numpy as np\n'), ((9663, 9701), 'model.evaluation.text.truncate_end', 'truncate_end', (['pred', 'self._vocab.id_end'], {}), '(pred, self._vocab.id_end)\n', (9675, 9701), False, 'from model.evaluation.text import score_files, truncate_end, write_answers\n'), ((7660, 7693), 'numpy.transpose', 'np.transpose', (['ids_eval', '[0, 2, 1]'], {}), '(ids_eval, [0, 2, 1])\n', (7672, 7693), True, 'import numpy as np\n'), ((5521, 5538), 'numpy.exp', 'np.exp', (['loss_eval'], {}), '(loss_eval)\n', (5527, 5538), True, 'import numpy as np\n')] |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from collections import deque
class BaseRobot(object):
"""Base class for all robot classes."""
def __init__(
self,
n_jnt,
n_obj,
pos_bounds=None,
vel_bounds=None,
calibration_path=None,
is_hardware=False,
device_name=None,
overlay=False,
calibration_mode=False,
observation_cache_maxsize=5,
):
"""Create a new robot.
Args:
n_jnt: The number of dofs in the robot.
n_obj: The number of dofs in the object.
pos_bounds: (n_jnt, 2)-shape matrix denoting the min and max joint
position for each joint.
vel_bounds: (n_jnt, 2)-shape matrix denoting the min and max joint
velocity for each joint.
calibration_path: File path to the calibration configuration file to
use.
is_hardware: Whether to run on hardware or not.
device_name: The device path for the robot hardware. Only required
in legacy mode.
overlay: Whether to show a simulation overlay of the hardware.
calibration_mode: Start with motors disengaged.
"""
assert n_jnt > 0
assert n_obj >= 0
self._n_jnt = n_jnt
self._n_obj = n_obj
self._n_dofs = n_jnt + n_obj
self._pos_bounds = None
if pos_bounds is not None:
pos_bounds = np.array(pos_bounds, dtype=np.float32)
assert pos_bounds.shape == (self._n_dofs, 2)
for low, high in pos_bounds:
assert low < high
self._pos_bounds = pos_bounds
self._vel_bounds = None
if vel_bounds is not None:
vel_bounds = np.array(vel_bounds, dtype=np.float32)
assert vel_bounds.shape == (self._n_dofs, 2)
for low, high in vel_bounds:
assert low < high
self._vel_bounds = vel_bounds
self._is_hardware = is_hardware
self._device_name = device_name
self._calibration_path = calibration_path
self._overlay = overlay
self._calibration_mode = calibration_mode
self._observation_cache_maxsize = observation_cache_maxsize
# Gets updated
self._observation_cache = deque([], maxlen=self._observation_cache_maxsize)
@property
def n_jnt(self):
return self._n_jnt
@property
def n_obj(self):
return self._n_obj
@property
def n_dofs(self):
return self._n_dofs
@property
def pos_bounds(self):
return self._pos_bounds
@property
def vel_bounds(self):
return self._vel_bounds
@property
def is_hardware(self):
return self._is_hardware
@property
def device_name(self):
return self._device_name
@property
def calibration_path(self):
return self._calibration_path
@property
def overlay(self):
return self._overlay
@property
def has_obj(self):
return self._n_obj > 0
@property
def calibration_mode(self):
return self._calibration_mode
@property
def observation_cache_maxsize(self):
return self._observation_cache_maxsize
@property
def observation_cache(self):
return self._observation_cache
def clip_positions(self, positions):
"""Clips the given joint positions to the position bounds.
Args:
positions: The joint positions.
Returns:
The bounded joint positions.
"""
if self.pos_bounds is None:
return positions
assert len(positions) == self.n_jnt or len(positions) == self.n_dofs
pos_bounds = self.pos_bounds[: len(positions)]
return np.clip(positions, pos_bounds[:, 0], pos_bounds[:, 1])
| [
"numpy.clip",
"numpy.array",
"collections.deque"
] | [((2918, 2967), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self._observation_cache_maxsize'}), '([], maxlen=self._observation_cache_maxsize)\n', (2923, 2967), False, 'from collections import deque\n'), ((4402, 4456), 'numpy.clip', 'np.clip', (['positions', 'pos_bounds[:, 0]', 'pos_bounds[:, 1]'], {}), '(positions, pos_bounds[:, 0], pos_bounds[:, 1])\n', (4409, 4456), True, 'import numpy as np\n'), ((2061, 2099), 'numpy.array', 'np.array', (['pos_bounds'], {'dtype': 'np.float32'}), '(pos_bounds, dtype=np.float32)\n', (2069, 2099), True, 'import numpy as np\n'), ((2366, 2404), 'numpy.array', 'np.array', (['vel_bounds'], {'dtype': 'np.float32'}), '(vel_bounds, dtype=np.float32)\n', (2374, 2404), True, 'import numpy as np\n')] |
import os
import qtmodern.styles
import qtmodern.windows
import numpy as np
import pandas as pd
import PySide6
import pyqtgraph as pg
from pyqtgraph import PlotWidget
from pyqtgraph.Qt import QtCore, QtGui
# from PySide6.QtGui import QIcon, QFont, QPixmap
# from PySide6.QtCore import QThread, Signal, QTimer, QDate, QTime, QDateTime, Qt, QTranslator
# from PySide6.QtUiTools import QUiLoader
# from PySide6.QtWidgets import QApplication, QFileDialog, QMessageBox, QSplashScreen,QProgressBar, QGraphicsView
# from PySide6.QtWidgets import QFileDialog, QMessageBox
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
WindowTemplate, TemplateBaseClass = pg.Qt.loadUiType("fr.ui")
class Stats(TemplateBaseClass):
def __init__(self):
TemplateBaseClass.__init__(self)
self.ui = WindowTemplate()
self.ui.setupUi(self)
self.file =['','']
from configparser import ConfigParser
self.config = ConfigParser()
if os.path.exists('config.ini'):
self.config.read('config.ini', encoding='UTF-8')
else:
self.config['Default'] = {'End_line': 30, 'offset_degree': 2.0, 'thickness': 100.0, 'init_dir':f'{os.getcwd()}'}
with open('config.ini', 'w', encoding='utf-8') as file:
self.config.write(file)
self.dir=self.config['Default']['init_dir']
self.ui.spinBox_end.setValue(int(self.config['Default']['End_line']))
self.ui.deg.setValue(float(self.config['Default']['offset_degree']))
self.ui.thickness.setValue(float(self.config['Default']['thickness']))
self.ui.qtplot.showGrid(x=True, y=True)
self.ui.qtplot.setLabel("bottom", "磁场(G)")
self.curve = self.ui.qtplot.getPlotItem().plot(pen=pg.mkPen(width=3))
self.ui.plot1.setEnabled(False)
self.ui.plot2.setEnabled(False)
self.ui.plot3.setEnabled(False)
self.ui.plot4.setEnabled(False)
self.ui.filebrowser.clicked.connect(self.browser)
self.ui.plot1.clicked.connect(self.plot1)
self.ui.plot2.clicked.connect(self.plot2)
self.ui.plot3.clicked.connect(self.plot3)
self.ui.plot4.clicked.connect(self.plot4)
def browser(self):
self.file = QtGui.QFileDialog.getOpenFileName(caption=f"选择法拉第测试文件",directory=self.dir,filter="Excel表格文件 (*.xls *.xlsx)")
if self.file[0] != '':
self.dir = os.path.dirname(self.file[0])
self.config['Default']['init_dir']=self.dir
with open('config.ini', 'w', encoding='utf-8') as file:
self.config.write(file)
self.ui.lineEdit.setText(os.path.basename(self.file[0]))
QtGui.QApplication.processEvents()
try:
df = pd.read_excel(self.file[0],usecols=['磁场(G)','电流(A)'])
except:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误','文件读取失败!')
msgBox.exec_()
self.ui.statusbar.showMessage('文件读取失败!')
self.ui.plot1.setEnabled(False)
self.ui.plot2.setEnabled(False)
self.ui.plot3.setEnabled(False)
self.ui.plot4.setEnabled(False)
else:
self.ui.plot1.setEnabled(True)
self.ui.plot2.setEnabled(True)
self.ui.plot3.setEnabled(True)
self.ui.plot4.setEnabled(True)
if df.loc[0,'磁场(G)'] >= 0:
self.ui.statusbar.showMessage(f"法拉第测试文件读取成功(正⇨负⇨正,负磁场最大值在第{np.argmin(df['磁场(G)'])}行)")
self.ui.label_2.setText(f"求斜率:采用线性拟合的斜率(负磁场最大值在第{np.argmin(df['磁场(G)'])}行)")
else:
self.ui.statusbar.showMessage(f"法拉第测试文件读取成功(负⇨正⇨负,正磁场最大值在第{np.argmax(df['磁场(G)'])}行)")
self.ui.label_2.setText(f"求斜率:采用线性拟合的斜率(正磁场最大值在第{np.argmax(df['磁场(G)'])}行)")
else:
self.ui.lineEdit.clear()
self.ui.statusbar.showMessage('没有读入文件!')
self.ui.plot1.setEnabled(False)
self.ui.plot2.setEnabled(False)
self.ui.plot3.setEnabled(False)
self.ui.plot4.setEnabled(False)
def plot1(self):
self.ui.qtplot.setLabel("left", "法拉第旋角 (deg)")
df = pd.read_excel(self.file[0],usecols=['磁场(G)','电流(A)'])
df['电流(A)']=self.ui.deg.value()-np.rad2deg(np.arcsin(np.sqrt(df['电流(A)']/df['电流(A)'].median())*np.sin(np.deg2rad(self.ui.deg.value()))))#self.ui.deg.value()*(1-np.sqrt(df['电流(A)']/df['电流(A)'].median()))
df.dropna(axis=0,inplace=True)
self.curve.setData(df['磁场(G)'],df['电流(A)'])
def plot2(self):
self.ui.qtplot.setLabel("left", "法拉第旋角 (deg)")
df = pd.read_excel(self.file[0],usecols=['磁场(G)','电流(A)'])
df['电流(A)']=self.ui.deg.value()-np.rad2deg(np.arcsin(np.sqrt(df['电流(A)']/df['电流(A)'].median())*np.sin(np.deg2rad(self.ui.deg.value()))))
df.dropna(axis=0,inplace=True)
co = self.ui.spinBox_start.value()
ci = self.ui.spinBox_end.value()
if co == ci:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误','起始行数和结束行数必须不同!')
msgBox.exec_()
self.ui.statusbar.showMessage('起始行数和结束行数必须不同!')
elif co>len(df.index) or ci>len(df.index):
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误',f'超出索引(文件最大行数为{len(df.index)-1})!')
msgBox.exec_()
self.ui.statusbar.showMessage(f'超出索引(文件最大行数为{len(df.index)-1})!')
else:
try:
k, b = np.polyfit(df.loc[min(co,ci):max(co,ci),'磁场(G)'], df.loc[min(co,ci):max(co,ci),'电流(A)'], 1)
except:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误','所选区间无法线性拟合,请选择合适区间!')
msgBox.exec_()
self.ui.statusbar.showMessage('所选区间无法线性拟合,请选择合适区间!')
else:
df['电流(A)'] = df['电流(A)']-k*df['磁场(G)']
self.curve.setData(df['磁场(G)'],df['电流(A)'])
def plot3(self):
self.ui.qtplot.setLabel("left", "法拉第旋角 (deg/cm)")
df = pd.read_excel(self.file[0],usecols=['磁场(G)','电流(A)'])
df['电流(A)']=(self.ui.deg.value()-np.rad2deg(np.arcsin(np.sqrt(df['电流(A)']/df['电流(A)'].median())*np.sin(np.deg2rad(self.ui.deg.value())))))/self.ui.thickness.value()*1e7
df.dropna(axis=0,inplace=True)
self.curve.setData(df['磁场(G)'],df['电流(A)'])
def plot4(self):
self.ui.qtplot.setLabel("left", "法拉第旋角 (deg/cm)")
df = pd.read_excel(self.file[0],usecols=['磁场(G)','电流(A)'])
df['电流(A)']=self.ui.deg.value()-np.rad2deg(np.arcsin(np.sqrt(df['电流(A)']/df['电流(A)'].median())*np.sin(np.deg2rad(self.ui.deg.value()))))
df.dropna(axis=0,inplace=True)
co = self.ui.spinBox_start.value()
ci = self.ui.spinBox_end.value()
if co == ci:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误','起始行数和结束行数必须不同!')
msgBox.exec_()
self.ui.statusbar.showMessage('起始行数和结束行数必须不同!')
elif co>len(df.index) or ci>len(df.index):
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误',f'超出索引(文件最大行数为{len(df.index)-1})!')
msgBox.exec_()
self.ui.statusbar.showMessage(f'超出索引(文件最大行数为{len(df.index)-1})!')
else:
try:
k, b = np.polyfit(df.loc[min(co,ci):max(co,ci),'磁场(G)'], df.loc[min(co,ci):max(co,ci),'电流(A)'], 1)
except:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Critical,'错误','所选区间无法线性拟合,请选择合适区间!')
msgBox.exec_()
self.ui.statusbar.showMessage('所选区间无法线性拟合,请选择合适区间!')
else:
df['电流(A)'] = (df['电流(A)']-k*df['磁场(G)'])/self.ui.thickness.value()*1e7
self.curve.setData(df['磁场(G)'],df['电流(A)'])
QtGui.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
QtGui.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
pg.mkQApp()
# WindowTemplate, TemplateBaseClass = pg.Qt.loadUiType("fr.ui")
app = QtGui.QApplication([])
translator = QtCore.QTranslator()
translator.load("qt_zh_CN.qm")
app.installTranslator(translator)
app.setFont(QtGui.QFont('微软雅黑'))
splash_pix=QtGui.QPixmap('SplashScreen.png').scaled(600, 600, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
splash = QtGui.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
# splash.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
splash.show()
stats = Stats()
# mw=stats
qtmodern.styles.dark(QtGui.QApplication.instance())
mw = qtmodern.windows.ModernWindow(stats)
mw.setWindowIcon(QtGui.QIcon('logo.png'))
screen=app.primaryScreen().geometry()
size=mw.geometry()
mw.move((screen.width() - size.width()) // 2,(screen.height() - size.height()) // 3)
mw.show()
splash.finish(mw)
app.exec_()
| [
"pyqtgraph.Qt.QtGui.QApplication.processEvents",
"configparser.ConfigParser",
"pyqtgraph.Qt.loadUiType",
"pyqtgraph.Qt.QtGui.QApplication",
"pyqtgraph.mkPen",
"pandas.read_excel",
"pyqtgraph.Qt.QtGui.QApplication.setAttribute",
"os.path.exists",
"pyqtgraph.Qt.QtGui.QIcon",
"pyqtgraph.Qt.QtGui.QFil... | [((782, 807), 'pyqtgraph.Qt.loadUiType', 'pg.Qt.loadUiType', (['"""fr.ui"""'], {}), "('fr.ui')\n", (798, 807), True, 'import pyqtgraph as pg\n'), ((8114, 8180), 'pyqtgraph.Qt.QtGui.QApplication.setAttribute', 'QtGui.QApplication.setAttribute', (['QtCore.Qt.AA_EnableHighDpiScaling'], {}), '(QtCore.Qt.AA_EnableHighDpiScaling)\n', (8145, 8180), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8182, 8245), 'pyqtgraph.Qt.QtGui.QApplication.setAttribute', 'QtGui.QApplication.setAttribute', (['QtCore.Qt.AA_UseHighDpiPixmaps'], {}), '(QtCore.Qt.AA_UseHighDpiPixmaps)\n', (8213, 8245), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8247, 8258), 'pyqtgraph.mkQApp', 'pg.mkQApp', ([], {}), '()\n', (8256, 8258), True, 'import pyqtgraph as pg\n'), ((8331, 8353), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (8349, 8353), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8368, 8388), 'pyqtgraph.Qt.QtCore.QTranslator', 'QtCore.QTranslator', ([], {}), '()\n', (8386, 8388), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8624, 8687), 'pyqtgraph.Qt.QtGui.QSplashScreen', 'QtGui.QSplashScreen', (['splash_pix', 'QtCore.Qt.WindowStaysOnTopHint'], {}), '(splash_pix, QtCore.Qt.WindowStaysOnTopHint)\n', (8643, 8687), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8469, 8488), 'pyqtgraph.Qt.QtGui.QFont', 'QtGui.QFont', (['"""微软雅黑"""'], {}), "('微软雅黑')\n", (8480, 8488), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8831, 8860), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (8858, 8860), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((8923, 8946), 'pyqtgraph.Qt.QtGui.QIcon', 'QtGui.QIcon', (['"""logo.png"""'], {}), "('logo.png')\n", (8934, 8946), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((1079, 1093), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1091, 1093), False, 'from configparser import ConfigParser\n'), ((1106, 1134), 'os.path.exists', 'os.path.exists', (['"""config.ini"""'], {}), "('config.ini')\n", (1120, 1134), False, 'import os\n'), ((2421, 2535), 'pyqtgraph.Qt.QtGui.QFileDialog.getOpenFileName', 'QtGui.QFileDialog.getOpenFileName', ([], {'caption': 'f"""选择法拉第测试文件"""', 'directory': 'self.dir', 'filter': '"""Excel表格文件 (*.xls *.xlsx)"""'}), "(caption=f'选择法拉第测试文件', directory=self.dir,\n filter='Excel表格文件 (*.xls *.xlsx)')\n", (2454, 2535), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((4482, 4537), 'pandas.read_excel', 'pd.read_excel', (['self.file[0]'], {'usecols': "['磁场(G)', '电流(A)']"}), "(self.file[0], usecols=['磁场(G)', '电流(A)'])\n", (4495, 4537), True, 'import pandas as pd\n'), ((4940, 4995), 'pandas.read_excel', 'pd.read_excel', (['self.file[0]'], {'usecols': "['磁场(G)', '电流(A)']"}), "(self.file[0], usecols=['磁场(G)', '电流(A)'])\n", (4953, 4995), True, 'import pandas as pd\n'), ((6339, 6394), 'pandas.read_excel', 'pd.read_excel', (['self.file[0]'], {'usecols': "['磁场(G)', '电流(A)']"}), "(self.file[0], usecols=['磁场(G)', '电流(A)'])\n", (6352, 6394), True, 'import pandas as pd\n'), ((6769, 6824), 'pandas.read_excel', 'pd.read_excel', (['self.file[0]'], {'usecols': "['磁场(G)', '电流(A)']"}), "(self.file[0], usecols=['磁场(G)', '电流(A)'])\n", (6782, 6824), True, 'import pandas as pd\n'), ((8504, 8537), 'pyqtgraph.Qt.QtGui.QPixmap', 'QtGui.QPixmap', (['"""SplashScreen.png"""'], {}), "('SplashScreen.png')\n", (8517, 8537), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((2586, 2615), 'os.path.dirname', 'os.path.dirname', (['self.file[0]'], {}), '(self.file[0])\n', (2601, 2615), False, 'import os\n'), ((2866, 2900), 'pyqtgraph.Qt.QtGui.QApplication.processEvents', 'QtGui.QApplication.processEvents', ([], {}), '()\n', (2898, 2900), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((5310, 5379), 'pyqtgraph.Qt.QtGui.QMessageBox', 'QtGui.QMessageBox', (['QtGui.QMessageBox.Critical', '"""错误"""', '"""起始行数和结束行数必须不同!"""'], {}), "(QtGui.QMessageBox.Critical, '错误', '起始行数和结束行数必须不同!')\n", (5327, 5379), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((7139, 7208), 'pyqtgraph.Qt.QtGui.QMessageBox', 'QtGui.QMessageBox', (['QtGui.QMessageBox.Critical', '"""错误"""', '"""起始行数和结束行数必须不同!"""'], {}), "(QtGui.QMessageBox.Critical, '错误', '起始行数和结束行数必须不同!')\n", (7156, 7208), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((1916, 1933), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'width': '(3)'}), '(width=3)\n', (1924, 1933), True, 'import pyqtgraph as pg\n'), ((2821, 2851), 'os.path.basename', 'os.path.basename', (['self.file[0]'], {}), '(self.file[0])\n', (2837, 2851), False, 'import os\n'), ((2941, 2996), 'pandas.read_excel', 'pd.read_excel', (['self.file[0]'], {'usecols': "['磁场(G)', '电流(A)']"}), "(self.file[0], usecols=['磁场(G)', '电流(A)'])\n", (2954, 2996), True, 'import pandas as pd\n'), ((3042, 3104), 'pyqtgraph.Qt.QtGui.QMessageBox', 'QtGui.QMessageBox', (['QtGui.QMessageBox.Critical', '"""错误"""', '"""文件读取失败!"""'], {}), "(QtGui.QMessageBox.Critical, '错误', '文件读取失败!')\n", (3059, 3104), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((1324, 1335), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1333, 1335), False, 'import os\n'), ((5930, 6004), 'pyqtgraph.Qt.QtGui.QMessageBox', 'QtGui.QMessageBox', (['QtGui.QMessageBox.Critical', '"""错误"""', '"""所选区间无法线性拟合,请选择合适区间!"""'], {}), "(QtGui.QMessageBox.Critical, '错误', '所选区间无法线性拟合,请选择合适区间!')\n", (5947, 6004), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((7759, 7833), 'pyqtgraph.Qt.QtGui.QMessageBox', 'QtGui.QMessageBox', (['QtGui.QMessageBox.Critical', '"""错误"""', '"""所选区间无法线性拟合,请选择合适区间!"""'], {}), "(QtGui.QMessageBox.Critical, '错误', '所选区间无法线性拟合,请选择合适区间!')\n", (7776, 7833), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((3756, 3778), 'numpy.argmin', 'np.argmin', (["df['磁场(G)']"], {}), "(df['磁场(G)'])\n", (3765, 3778), True, 'import numpy as np\n'), ((3854, 3876), 'numpy.argmin', 'np.argmin', (["df['磁场(G)']"], {}), "(df['磁场(G)'])\n", (3863, 3876), True, 'import numpy as np\n'), ((3985, 4007), 'numpy.argmax', 'np.argmax', (["df['磁场(G)']"], {}), "(df['磁场(G)'])\n", (3994, 4007), True, 'import numpy as np\n'), ((4083, 4105), 'numpy.argmax', 'np.argmax', (["df['磁场(G)']"], {}), "(df['磁场(G)'])\n", (4092, 4105), True, 'import numpy as np\n')] |
"""
Functions for performing EDA on the LendingClub and economy data. It is useful to observe
statistics about the data like max, min, and mean to understand distribution, as well as
look for outliers and missing values.
"""
import numpy as np
import pandas as pd
def outlier(arr):
"""
:param arr:
:return:
"""
return
def print_statistics(arr, population=False):
"""
Computes and prints statistics/parameters from the inputted data.
:param arr: array of data
:param population: population or sample data
"""
print("Max: ", max(arr))
print("Min: ", min(arr))
print("Mean: ", np.mean(arr))
if population:
print("Standard Deviation: ", np.std(arr))
else:
print("Standard Deviation: ", np.std(arr, ddof=1))
| [
"numpy.mean",
"numpy.std"
] | [((632, 644), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (639, 644), True, 'import numpy as np\n'), ((703, 714), 'numpy.std', 'np.std', (['arr'], {}), '(arr)\n', (709, 714), True, 'import numpy as np\n'), ((764, 783), 'numpy.std', 'np.std', (['arr'], {'ddof': '(1)'}), '(arr, ddof=1)\n', (770, 783), True, 'import numpy as np\n')] |
import os
from typing import Optional
from simplejson import OrderedDict
import torch
from torch import nn
import numpy as np
from einops import rearrange
from typing import List
from transformers import AutoModelForSequenceClassification
from transformers.models.wav2vec2.feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor
from . import ModalityModel
from transformers.models.hubert.modeling_hubert import HubertForSequenceClassification, HubertEncoderLayerStableLayerNorm
class Conformer(ModalityModel):
def __init__(self, num_classes, input_dim=80, encoder_dim=512,
num_layers: int = 17,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
input_dropout_p: float = 0.1,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
pretrain_ckpt: Optional[str] = None
):
"""
pretrain_ckpt: ckpt of ConformerLSTM from openspeech
"""
from openspeech.models.conformer.model import ConformerEncoder
super().__init__()
self.backbone = ConformerEncoder(
# num_classes not used unless joint_ctc_attention=True
num_classes, input_dim=input_dim, encoder_dim=encoder_dim,
num_layers= num_layers,
num_attention_heads=num_attention_heads,
feed_forward_expansion_factor=feed_forward_expansion_factor,
conv_expansion_factor=conv_expansion_factor,
input_dropout_p=input_dropout_p,
feed_forward_dropout_p=feed_forward_dropout_p,
attention_dropout_p=attention_dropout_p,
conv_dropout_p=conv_dropout_p,
conv_kernel_size=conv_kernel_size,
half_step_residual=half_step_residual,
joint_ctc_attention=False,
)
if pretrain_ckpt is not None:
assert os.path.exists(pretrain_ckpt)
ckpt = torch.load(pretrain_ckpt)
old_state_dict = ckpt["state_dict"]
state_dict = OrderedDict()
for layer, value in old_state_dict.items():
if layer.startswith("encoder."):
state_dict[layer[8:]] = value
self.backbone.load_state_dict(state_dict)
self.out = nn.Linear(encoder_dim, num_classes)
def forward(self, audios, audio_lengths):
"""
audios: must be filter bank
"""
# (b, max_seq, d)
encoder_outputs, encoder_logits, output_lengths = self.backbone(audios, audio_lengths)
encoder_outputs = encoder_outputs.mean(1)
return self.out(encoder_outputs)
class HuBERT(ModalityModel):
def __init__(self, pretrain_path, num_classes, sample_rate=16_000):
super().__init__()
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-large-superb-er")
self.model = HubertForSequenceClassification.from_pretrained(pretrain_path, num_labels=num_classes)
self.sample_rate = sample_rate
def forward(self, audios, **kwargs):
"""
audios: must be raw wave
"""
inputs = self.feature_extractor(
audios, sampling_rate=self.sample_rate, return_tensors="pt",
padding=True
).to(self.model.device)
if not self.training:
logits = self.model(**inputs).logits
return logits
block_input, extendend_attention_mask = self.before_layers(inputs)
for i, block in enumerate(self.blocks):
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if dropout_probability < self.model.config.layerdrop else False
if i == len(self.blocks) - 1 or i == 1:
skip_the_layer = False
# skip_the_layer = False
if not skip_the_layer:
layer_outputs = self.block(block,
block_input, extendend_attention_mask)
# (bsz, seq_len, d)
block_input = layer_outputs[0]
logits = self.after_layers(block_input, inputs["attention_mask"])
return logits
def before_layers(self, inputs):
input_values, attention_mask = inputs["input_values"], inputs["attention_mask"]
extract_features = self.model.hubert.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
attention_mask = self.model.hubert._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states = self.model.hubert.feature_projection(extract_features)
hidden_states = self.model.hubert._mask_hidden_states(hidden_states, mask_time_indices=None)
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.model.hubert.encoder.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.model.hubert.encoder.dropout(hidden_states)
return hidden_states, attention_mask
def block(self, block: HubertEncoderLayerStableLayerNorm, hidden_states,
attention_mask, skip_input=None):
"""
@skip_input: from other modality
"""
if skip_input is None:
attn_residual = hidden_states
else:
attn_residual = skip_input
hidden_states = block.layer_norm(hidden_states)
hidden_states, attn_weights, _ = block.attention(
hidden_states, attention_mask=attention_mask, output_attentions=False
)
hidden_states = block.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + block.feed_forward(
block.final_layer_norm(hidden_states))
outputs = (hidden_states,)
return outputs
def after_layers(self, encoder_outputs, attention_mask):
encoder_outputs = self.model.hubert.encoder.layer_norm(encoder_outputs)
# (bsz, seq_len, d1 -> d2)
hidden_states = self.model.projector(encoder_outputs)
padding_mask = self.model._get_feature_vector_attention_mask(
hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
# (bsz, d2)
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.model.classifier(pooled_output)
return logits
@property
def hidden_size(self) -> int:
return 1024
@property
def blocks(self) -> List[HubertEncoderLayerStableLayerNorm]:
"""
normalization blocks
"""
return self.model.hubert.encoder.layers | [
"os.path.exists",
"transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.from_pretrained",
"torch.load",
"simplejson.OrderedDict",
"numpy.random.uniform",
"openspeech.models.conformer.model.ConformerEncoder",
"torch.nn.Linear",
"transformers.models.hubert.modeling_hubert.Hu... | [((1282, 1810), 'openspeech.models.conformer.model.ConformerEncoder', 'ConformerEncoder', (['num_classes'], {'input_dim': 'input_dim', 'encoder_dim': 'encoder_dim', 'num_layers': 'num_layers', 'num_attention_heads': 'num_attention_heads', 'feed_forward_expansion_factor': 'feed_forward_expansion_factor', 'conv_expansion_factor': 'conv_expansion_factor', 'input_dropout_p': 'input_dropout_p', 'feed_forward_dropout_p': 'feed_forward_dropout_p', 'attention_dropout_p': 'attention_dropout_p', 'conv_dropout_p': 'conv_dropout_p', 'conv_kernel_size': 'conv_kernel_size', 'half_step_residual': 'half_step_residual', 'joint_ctc_attention': '(False)'}), '(num_classes, input_dim=input_dim, encoder_dim=encoder_dim,\n num_layers=num_layers, num_attention_heads=num_attention_heads,\n feed_forward_expansion_factor=feed_forward_expansion_factor,\n conv_expansion_factor=conv_expansion_factor, input_dropout_p=\n input_dropout_p, feed_forward_dropout_p=feed_forward_dropout_p,\n attention_dropout_p=attention_dropout_p, conv_dropout_p=conv_dropout_p,\n conv_kernel_size=conv_kernel_size, half_step_residual=\n half_step_residual, joint_ctc_attention=False)\n', (1298, 1810), False, 'from openspeech.models.conformer.model import ConformerEncoder\n'), ((2452, 2487), 'torch.nn.Linear', 'nn.Linear', (['encoder_dim', 'num_classes'], {}), '(encoder_dim, num_classes)\n', (2461, 2487), False, 'from torch import nn\n'), ((2974, 3047), 'transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.from_pretrained', 'Wav2Vec2FeatureExtractor.from_pretrained', (['"""superb/hubert-large-superb-er"""'], {}), "('superb/hubert-large-superb-er')\n", (3014, 3047), False, 'from transformers.models.wav2vec2.feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor\n'), ((3069, 3160), 'transformers.models.hubert.modeling_hubert.HubertForSequenceClassification.from_pretrained', 'HubertForSequenceClassification.from_pretrained', (['pretrain_path'], {'num_labels': 'num_classes'}), '(pretrain_path, num_labels=\n num_classes)\n', (3116, 3160), False, 'from transformers.models.hubert.modeling_hubert import HubertForSequenceClassification, HubertEncoderLayerStableLayerNorm\n'), ((2062, 2091), 'os.path.exists', 'os.path.exists', (['pretrain_ckpt'], {}), '(pretrain_ckpt)\n', (2076, 2091), False, 'import os\n'), ((2111, 2136), 'torch.load', 'torch.load', (['pretrain_ckpt'], {}), '(pretrain_ckpt)\n', (2121, 2136), False, 'import torch\n'), ((2210, 2223), 'simplejson.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2221, 2223), False, 'from simplejson import OrderedDict\n'), ((3731, 3754), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3748, 3754), True, 'import numpy as np\n')] |
from deerlab.dipolarkernel import dipolarkernel
from deerlab.utils.utils import ovl
from deerlab.whitegaussnoise import whitegaussnoise
import numpy as np
import matplotlib.pyplot as plt
from deerlab.model import Model,fit
from deerlab.dipolarmodel import ExperimentInfo,dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer
from deerlab import dd_gauss,dd_gauss2,bg_hom3d,bg_exp
import deerlab as dl
# ======================================================================
def test_type():
"Check that the function returns a valid model type"
model = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
assert isinstance(model,Model)
# ======================================================================
# ======================================================================
def test_Nparam_1():
"Check that the function has the correct number of nonlinear parameters"
model = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
assert model.Nnonlin==5 and model.Nlin==1 and model.Nparam==6
# ======================================================================
# ======================================================================
def test_Nparam_2():
"Check that the function has the correct number of nonlinear parameters"
model = dipolarmodel(t,r,dd_gauss2,bg_hom3d,npathways=2)
assert model.Nnonlin==9 and model.Nlin==2 and model.Nparam==11
# ======================================================================
# ======================================================================
def test_preservation():
"Check that the inputs models are not modified by the output model"
model = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=2)
model.mean.par0 = 15
assert model.mean.par0==15 and dd_gauss.mean.par0!=15
# ======================================================================
# ======================================================================
def test_names():
"Check that the model has correct parameter names"
model = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
parameters = ['mean','width','conc','mod','reftime','scale']
for param in parameters:
assert hasattr(model,param)
# ======================================================================
t = np.linspace(-0.5,5,100)
r = np.linspace(2,5,50)
Bfcn = lambda t,lam: bg_hom3d(t,50,lam)
Bfcn_pheno = lambda t,_: bg_exp(t,0.1)
Pr = dd_gauss(r,3,0.2)
V1path = 1e5*dipolarkernel(t,r,mod=0.3,bg=Bfcn)@Pr
V1path_noB = 1e5*dipolarkernel(t,r,mod=0.3)@Pr
V1path_phenoB = 1e5*dipolarkernel(t,r,mod=0.3,bg=Bfcn_pheno)@Pr
V2path = 1e5*dipolarkernel(t,r,pathways=[[0.6],[0.3,0],[0.1,2]],bg=Bfcn)@Pr
V3path = 1e5*dipolarkernel(t,r,pathways=[[0.5],[0.3,0],[0.1,2],[0.1,5]],bg=Bfcn)@Pr
# ======================================================================
def test_call_positional():
"Check that the model called via positional arguments responds correctly"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
Vsim = Vmodel(0.3,0.0,50,3,0.2,1e5)
assert np.allclose(Vsim,V1path)
# ======================================================================
# ======================================================================
def test_call_keywords():
"Check that the model called via keyword arguments responds correctly"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
Vsim = Vmodel(mod=0.3,reftime=0.0,conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V1path)
# ======================================================================
# ======================================================================
def test_phenomenological_Bmodel():
"Check model generation of a dipolar signal with a phenomelogical background"
Vmodel = dipolarmodel(t,r,dd_gauss,Bmodel=bg_exp,npathways=1)
Vsim = Vmodel(mod=0.3,reftime=0.0,mean=3,width=0.2,decay=0.1,scale=1e5)
assert np.allclose(Vsim,V1path_phenoB)
# ======================================================================
# ======================================================================
def test_no_Bmodel():
"Check model generation of a dipolar signal without background"
Vmodel = dipolarmodel(t,r,dd_gauss,None,npathways=1)
Vsim = Vmodel(mod=0.3,reftime=0.0,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V1path_noB)
# ======================================================================
# ======================================================================
def test_model_1pathways():
"Check that the model with one dipolar pathway is correct"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
Vsim = Vmodel(mod=0.3,reftime=0.0,conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V1path)
# ======================================================================
# ======================================================================
def test_model_2pathways():
"Check that the model with two dipolar pathways is correct"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=2)
Vsim = Vmodel(lam1=0.3,reftime1=0.0,lam2=0.1,
reftime2=2,conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V2path)
# ======================================================================
# ======================================================================
def test_model_3pathways():
"Check that the model with three dipolar pathways is correct"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=3)
Vsim = Vmodel(lam1=0.3,reftime1=0.0,
lam2=0.1,reftime2=2, lam3=0.1, reftime3=5,
conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V3path)
# ======================================================================
# ======================================================================
def test_fit_1pathways():
"Check that the model can be correctly fitted with one dipolar pathway"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
result = fit(Vmodel,V1path,nonlin_tol=1e-3)
assert np.allclose(result.model,V1path)
# ======================================================================
# ======================================================================
def test_fit_2pathways():
"Check that the model can be correctly fitted with two dipolar pathways"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=2)
Vmodel.reftime1.freeze(0)
Vmodel.reftime2.freeze(2)
result = fit(Vmodel,V2path,nonlin_tol=1e-3)
assert np.allclose(result.model,V2path)
# ======================================================================
# ======================================================================
def test_fit_3pathways():
"Check that the model can be correctly fitted with three dipolar pathways"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=3)
Vmodel.reftime1.freeze(0)
Vmodel.reftime2.freeze(2)
Vmodel.reftime3.freeze(5)
result = fit(Vmodel,V3path,nonlin_tol=1e-3)
assert np.allclose(result.model,V3path)
# ======================================================================
V1harm = 1e5*dipolarkernel(t,r,pathways=[[0.7],[0.3,0,1]],bg=Bfcn)@Pr
V2harm = 1e5*dipolarkernel(t,r,pathways=[[0.6],[0.3,0,1],[0.1,2,2]],bg=Bfcn)@Pr
V3harm = 1e5*dipolarkernel(t,r,pathways=[[0.5],[0.3,0,1],[0.1,2,2],[0.1,5,3]],bg=Bfcn)@Pr
# ======================================================================
def test_model_1harmonics():
"Check that the model with one harmonic is correct"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1,harmonics=1)
Vsim = Vmodel(mod=0.3,reftime=0.0,conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V1harm)
# ======================================================================
# ======================================================================
def test_model_2harmonics():
"Check that the model with two different harmonics is correct"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=2,harmonics=[1,2])
Vsim = Vmodel(lam1=0.3,reftime1=0.0,lam2=0.1,
reftime2=2,conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V2harm)
# ======================================================================
# ======================================================================
def test_model_3harmonics():
"Check that the model with three different harmonics is correct"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=3,harmonics=[1,2,3])
Vsim = Vmodel(lam1=0.3,reftime1=0.0,
lam2=0.1,reftime2=2, lam3=0.1, reftime3=5,
conc=50,mean=3,width=0.2,scale=1e5)
assert np.allclose(Vsim,V3harm)
# ======================================================================
# ======================================================================
def test_call_Pnonparametric():
"Check that the model with one dipolar pathway is correct"
Vmodel = dipolarmodel(t,r,Bmodel=bg_hom3d,npathways=1)
Vsim = Vmodel(mod=0.3,reftime=0.0,conc=50,P=1e5*dd_gauss(r,3,0.2))
assert np.allclose(Vsim,V1path)
# ======================================================================
# ======================================================================
def test_fit_Pnonparametric():
"Check that the model with one dipolar pathway is correct"
Vmodel = dipolarmodel(t,r,Bmodel=bg_hom3d,npathways=1)
result = fit(Vmodel,V1path,nonlin_tol=1e-3)
assert np.allclose(result.model,V1path,atol=1e-2) and ovl(result.P/1e5,Pr)>0.975
# ======================================================================
tau1,tau2,tau3 = 1,2,3
V3pulse = 1e5*dipolarkernel(t,r,pathways=[[0.6],[0.3,0],[0.1,tau1]],bg=Bfcn)@Pr
V4pulse = 1e5*dipolarkernel(t,r,pathways=[[0.6],[0.3,tau1],[0.1,tau1+tau2]],bg=Bfcn)@Pr
V5pulse = 1e5*dipolarkernel(t,r,pathways=[[0.6],[0.3,tau3],[0.1,tau2]],bg=Bfcn)@Pr
# ======================================================================
def test_ex_3pdeer_type():
"Check the 3-pulse DEER experimental model."
experiment = ex_3pdeer(tau1)
assert isinstance(experiment,ExperimentInfo)
# ======================================================================
# ======================================================================
def test_ex_3pdeer_fit():
"Check the 3-pulse DEER experimental model."
experiment = ex_3pdeer(tau1)
Vmodel = dipolarmodel(t,r,Bmodel=bg_hom3d,npathways=2,experiment=experiment)
result = fit(Vmodel,V3pulse,nonlin_tol=1e-3)
assert np.allclose(V3pulse,result.model,atol=1e-2) and ovl(result.P/1e5,Pr)>0.975
# ======================================================================
# ======================================================================
def test_ex_4pdeer_type():
"Check the 4-pulse DEER experimental model."
experiment = ex_4pdeer(tau1,tau2)
assert isinstance(experiment,ExperimentInfo)
# ======================================================================
# ======================================================================
def test_ex_4pdeer_fit():
"Check the 4-pulse DEER experimental model."
experiment = ex_4pdeer(tau1,tau2)
Vmodel = dipolarmodel(t,r,Bmodel=bg_hom3d,npathways=2,experiment=experiment)
result = fit(Vmodel,V4pulse,nonlin_tol=1e-3)
assert np.allclose(V4pulse,result.model,atol=1e-2) and ovl(result.P/1e5,Pr)>0.975
# ======================================================================
# ======================================================================
def test_ex_5pdeer_type():
"Check the 5-pulse DEER experimental model."
experiment = ex_5pdeer(tau1,tau2,tau3)
assert isinstance(experiment,ExperimentInfo)
# ======================================================================
# ======================================================================
def test_ex_5pdeer_fit():
"Check the 5-pulse DEER experimental model in fitting."
experiment = ex_5pdeer(tau1,tau2,tau3)
Vmodel = dipolarmodel(t,r,Bmodel=bg_hom3d,npathways=2,experiment=experiment)
result = fit(Vmodel,V5pulse,nonlin_tol=1e-3)
assert np.allclose(V5pulse,result.model,atol=1e-2) and ovl(result.P/1e5,Pr)>0.975
# ======================================================================
# ======================================================================
def test_orisel():
"Check that dipolr models with orientation selection work"
Vmodel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1)
Vmodelorisel = dipolarmodel(t,r,dd_gauss,bg_hom3d,npathways=1,orisel=lambda theta: np.ones_like(theta))
Vref = Vmodel(mean=3,width=0.2,mod=0.3,reftime=0,conc=200,scale=1e2)
Vorisel = Vmodelorisel(mean=3,width=0.2,mod=0.3,reftime=0,conc=200,scale=1e2)
assert np.allclose(Vref,Vorisel,rtol=1e-4)
# ======================================================================
| [
"deerlab.model.fit",
"numpy.ones_like",
"numpy.allclose",
"deerlab.bg_exp",
"deerlab.dipolarmodel.ex_3pdeer",
"deerlab.dd_gauss",
"deerlab.dipolarmodel.dipolarmodel",
"deerlab.bg_hom3d",
"deerlab.dipolarmodel.ex_5pdeer",
"numpy.linspace",
"deerlab.dipolarmodel.ex_4pdeer",
"deerlab.utils.utils.... | [((2292, 2317), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(5)', '(100)'], {}), '(-0.5, 5, 100)\n', (2303, 2317), True, 'import numpy as np\n'), ((2320, 2341), 'numpy.linspace', 'np.linspace', (['(2)', '(5)', '(50)'], {}), '(2, 5, 50)\n', (2331, 2341), True, 'import numpy as np\n'), ((2424, 2443), 'deerlab.dd_gauss', 'dd_gauss', (['r', '(3)', '(0.2)'], {}), '(r, 3, 0.2)\n', (2432, 2443), False, 'from deerlab import dd_gauss, dd_gauss2, bg_hom3d, bg_exp\n'), ((572, 623), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (584, 623), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((915, 966), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (927, 966), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((1289, 1341), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss2', 'bg_hom3d'], {'npathways': '(2)'}), '(t, r, dd_gauss2, bg_hom3d, npathways=2)\n', (1301, 1341), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((1664, 1715), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(2)'}), '(t, r, dd_gauss, bg_hom3d, npathways=2)\n', (1676, 1715), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((2030, 2081), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (2042, 2081), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((2361, 2381), 'deerlab.bg_hom3d', 'bg_hom3d', (['t', '(50)', 'lam'], {}), '(t, 50, lam)\n', (2369, 2381), False, 'from deerlab import dd_gauss, dd_gauss2, bg_hom3d, bg_exp\n'), ((2405, 2419), 'deerlab.bg_exp', 'bg_exp', (['t', '(0.1)'], {}), '(t, 0.1)\n', (2411, 2419), False, 'from deerlab import dd_gauss, dd_gauss2, bg_hom3d, bg_exp\n'), ((2960, 3011), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (2972, 3011), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((3065, 3090), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1path'], {}), '(Vsim, V1path)\n', (3076, 3090), True, 'import numpy as np\n'), ((3353, 3404), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (3365, 3404), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((3492, 3517), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1path'], {}), '(Vsim, V1path)\n', (3503, 3517), True, 'import numpy as np\n'), ((3797, 3853), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss'], {'Bmodel': 'bg_exp', 'npathways': '(1)'}), '(t, r, dd_gauss, Bmodel=bg_exp, npathways=1)\n', (3809, 3853), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((3943, 3975), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1path_phenoB'], {}), '(Vsim, V1path_phenoB)\n', (3954, 3975), True, 'import numpy as np\n'), ((4227, 4274), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'None'], {'npathways': '(1)'}), '(t, r, dd_gauss, None, npathways=1)\n', (4239, 4274), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((4354, 4383), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1path_noB'], {}), '(Vsim, V1path_noB)\n', (4365, 4383), True, 'import numpy as np\n'), ((4636, 4687), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (4648, 4687), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((4775, 4800), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1path'], {}), '(Vsim, V1path)\n', (4786, 4800), True, 'import numpy as np\n'), ((5054, 5105), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(2)'}), '(t, r, dd_gauss, bg_hom3d, npathways=2)\n', (5066, 5105), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((5236, 5261), 'numpy.allclose', 'np.allclose', (['Vsim', 'V2path'], {}), '(Vsim, V2path)\n', (5247, 5261), True, 'import numpy as np\n'), ((5517, 5568), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(3)'}), '(t, r, dd_gauss, bg_hom3d, npathways=3)\n', (5529, 5568), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((5734, 5759), 'numpy.allclose', 'np.allclose', (['Vsim', 'V3path'], {}), '(Vsim, V3path)\n', (5745, 5759), True, 'import numpy as np\n'), ((6024, 6075), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (6036, 6075), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((6090, 6127), 'deerlab.model.fit', 'fit', (['Vmodel', 'V1path'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V1path, nonlin_tol=0.001)\n', (6093, 6127), False, 'from deerlab.model import Model, fit\n'), ((6137, 6170), 'numpy.allclose', 'np.allclose', (['result.model', 'V1path'], {}), '(result.model, V1path)\n', (6148, 6170), True, 'import numpy as np\n'), ((6435, 6486), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(2)'}), '(t, r, dd_gauss, bg_hom3d, npathways=2)\n', (6447, 6486), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((6561, 6598), 'deerlab.model.fit', 'fit', (['Vmodel', 'V2path'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V2path, nonlin_tol=0.001)\n', (6564, 6598), False, 'from deerlab.model import Model, fit\n'), ((6608, 6641), 'numpy.allclose', 'np.allclose', (['result.model', 'V2path'], {}), '(result.model, V2path)\n', (6619, 6641), True, 'import numpy as np\n'), ((6908, 6959), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(3)'}), '(t, r, dd_gauss, bg_hom3d, npathways=3)\n', (6920, 6959), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((7064, 7101), 'deerlab.model.fit', 'fit', (['Vmodel', 'V3path'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V3path, nonlin_tol=0.001)\n', (7067, 7101), False, 'from deerlab.model import Model, fit\n'), ((7111, 7144), 'numpy.allclose', 'np.allclose', (['result.model', 'V3path'], {}), '(result.model, V3path)\n', (7122, 7144), True, 'import numpy as np\n'), ((7634, 7698), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)', 'harmonics': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1, harmonics=1)\n', (7646, 7698), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((7785, 7810), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1harm'], {}), '(Vsim, V1harm)\n', (7796, 7810), True, 'import numpy as np\n'), ((8068, 8137), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(2)', 'harmonics': '[1, 2]'}), '(t, r, dd_gauss, bg_hom3d, npathways=2, harmonics=[1, 2])\n', (8080, 8137), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((8266, 8291), 'numpy.allclose', 'np.allclose', (['Vsim', 'V2harm'], {}), '(Vsim, V2harm)\n', (8277, 8291), True, 'import numpy as np\n'), ((8551, 8623), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(3)', 'harmonics': '[1, 2, 3]'}), '(t, r, dd_gauss, bg_hom3d, npathways=3, harmonics=[1, 2, 3])\n', (8563, 8623), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((8786, 8811), 'numpy.allclose', 'np.allclose', (['Vsim', 'V3harm'], {}), '(Vsim, V3harm)\n', (8797, 8811), True, 'import numpy as np\n'), ((9068, 9116), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r'], {'Bmodel': 'bg_hom3d', 'npathways': '(1)'}), '(t, r, Bmodel=bg_hom3d, npathways=1)\n', (9080, 9116), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((9197, 9222), 'numpy.allclose', 'np.allclose', (['Vsim', 'V1path'], {}), '(Vsim, V1path)\n', (9208, 9222), True, 'import numpy as np\n'), ((9478, 9526), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r'], {'Bmodel': 'bg_hom3d', 'npathways': '(1)'}), '(t, r, Bmodel=bg_hom3d, npathways=1)\n', (9490, 9526), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((9542, 9579), 'deerlab.model.fit', 'fit', (['Vmodel', 'V1path'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V1path, nonlin_tol=0.001)\n', (9545, 9579), False, 'from deerlab.model import Model, fit\n'), ((10180, 10195), 'deerlab.dipolarmodel.ex_3pdeer', 'ex_3pdeer', (['tau1'], {}), '(tau1)\n', (10189, 10195), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((10488, 10503), 'deerlab.dipolarmodel.ex_3pdeer', 'ex_3pdeer', (['tau1'], {}), '(tau1)\n', (10497, 10503), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((10517, 10588), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r'], {'Bmodel': 'bg_hom3d', 'npathways': '(2)', 'experiment': 'experiment'}), '(t, r, Bmodel=bg_hom3d, npathways=2, experiment=experiment)\n', (10529, 10588), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((10598, 10636), 'deerlab.model.fit', 'fit', (['Vmodel', 'V3pulse'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V3pulse, nonlin_tol=0.001)\n', (10601, 10636), False, 'from deerlab.model import Model, fit\n'), ((10963, 10984), 'deerlab.dipolarmodel.ex_4pdeer', 'ex_4pdeer', (['tau1', 'tau2'], {}), '(tau1, tau2)\n', (10972, 10984), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((11277, 11298), 'deerlab.dipolarmodel.ex_4pdeer', 'ex_4pdeer', (['tau1', 'tau2'], {}), '(tau1, tau2)\n', (11286, 11298), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((11311, 11382), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r'], {'Bmodel': 'bg_hom3d', 'npathways': '(2)', 'experiment': 'experiment'}), '(t, r, Bmodel=bg_hom3d, npathways=2, experiment=experiment)\n', (11323, 11382), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((11392, 11430), 'deerlab.model.fit', 'fit', (['Vmodel', 'V4pulse'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V4pulse, nonlin_tol=0.001)\n', (11395, 11430), False, 'from deerlab.model import Model, fit\n'), ((11757, 11784), 'deerlab.dipolarmodel.ex_5pdeer', 'ex_5pdeer', (['tau1', 'tau2', 'tau3'], {}), '(tau1, tau2, tau3)\n', (11766, 11784), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((12086, 12113), 'deerlab.dipolarmodel.ex_5pdeer', 'ex_5pdeer', (['tau1', 'tau2', 'tau3'], {}), '(tau1, tau2, tau3)\n', (12095, 12113), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((12125, 12196), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r'], {'Bmodel': 'bg_hom3d', 'npathways': '(2)', 'experiment': 'experiment'}), '(t, r, Bmodel=bg_hom3d, npathways=2, experiment=experiment)\n', (12137, 12196), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((12206, 12244), 'deerlab.model.fit', 'fit', (['Vmodel', 'V5pulse'], {'nonlin_tol': '(0.001)'}), '(Vmodel, V5pulse, nonlin_tol=0.001)\n', (12209, 12244), False, 'from deerlab.model import Model, fit\n'), ((12573, 12624), 'deerlab.dipolarmodel.dipolarmodel', 'dipolarmodel', (['t', 'r', 'dd_gauss', 'bg_hom3d'], {'npathways': '(1)'}), '(t, r, dd_gauss, bg_hom3d, npathways=1)\n', (12585, 12624), False, 'from deerlab.dipolarmodel import ExperimentInfo, dipolarpenalty, dipolarmodel, ex_4pdeer, ex_3pdeer, ex_5pdeer\n'), ((12897, 12936), 'numpy.allclose', 'np.allclose', (['Vref', 'Vorisel'], {'rtol': '(0.0001)'}), '(Vref, Vorisel, rtol=0.0001)\n', (12908, 12936), True, 'import numpy as np\n'), ((2455, 2492), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': '(0.3)', 'bg': 'Bfcn'}), '(t, r, mod=0.3, bg=Bfcn)\n', (2468, 2492), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((2510, 2538), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': '(0.3)'}), '(t, r, mod=0.3)\n', (2523, 2538), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((2560, 2603), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': '(0.3)', 'bg': 'Bfcn_pheno'}), '(t, r, mod=0.3, bg=Bfcn_pheno)\n', (2573, 2603), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((2617, 2683), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.6], [0.3, 0], [0.1, 2]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.6], [0.3, 0], [0.1, 2]], bg=Bfcn)\n', (2630, 2683), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((2693, 2769), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.5], [0.3, 0], [0.1, 2], [0.1, 5]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.5], [0.3, 0], [0.1, 2], [0.1, 5]], bg=Bfcn)\n', (2706, 2769), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((7232, 7291), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.7], [0.3, 0, 1]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.7], [0.3, 0, 1]], bg=Bfcn)\n', (7245, 7291), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((7302, 7374), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.6], [0.3, 0, 1], [0.1, 2, 2]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.6], [0.3, 0, 1], [0.1, 2, 2]], bg=Bfcn)\n', (7315, 7374), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((7382, 7471), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.5], [0.3, 0, 1], [0.1, 2, 2], [0.1, 5, 3]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.5], [0.3, 0, 1], [0.1, 2, 2], [0.1, 5, 3]],\n bg=Bfcn)\n', (7395, 7471), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((9589, 9633), 'numpy.allclose', 'np.allclose', (['result.model', 'V1path'], {'atol': '(0.01)'}), '(result.model, V1path, atol=0.01)\n', (9600, 9633), True, 'import numpy as np\n'), ((9774, 9843), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.6], [0.3, 0], [0.1, tau1]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.6], [0.3, 0], [0.1, tau1]], bg=Bfcn)\n', (9787, 9843), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((9854, 9933), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.6], [0.3, tau1], [0.1, tau1 + tau2]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.6], [0.3, tau1], [0.1, tau1 + tau2]], bg=Bfcn)\n', (9867, 9933), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((9942, 10014), 'deerlab.dipolarkernel.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'pathways': '[[0.6], [0.3, tau3], [0.1, tau2]]', 'bg': 'Bfcn'}), '(t, r, pathways=[[0.6], [0.3, tau3], [0.1, tau2]], bg=Bfcn)\n', (9955, 10014), False, 'from deerlab.dipolarkernel import dipolarkernel\n'), ((10646, 10691), 'numpy.allclose', 'np.allclose', (['V3pulse', 'result.model'], {'atol': '(0.01)'}), '(V3pulse, result.model, atol=0.01)\n', (10657, 10691), True, 'import numpy as np\n'), ((11440, 11485), 'numpy.allclose', 'np.allclose', (['V4pulse', 'result.model'], {'atol': '(0.01)'}), '(V4pulse, result.model, atol=0.01)\n', (11451, 11485), True, 'import numpy as np\n'), ((12254, 12299), 'numpy.allclose', 'np.allclose', (['V5pulse', 'result.model'], {'atol': '(0.01)'}), '(V5pulse, result.model, atol=0.01)\n', (12265, 12299), True, 'import numpy as np\n'), ((9636, 9664), 'deerlab.utils.utils.ovl', 'ovl', (['(result.P / 100000.0)', 'Pr'], {}), '(result.P / 100000.0, Pr)\n', (9639, 9664), False, 'from deerlab.utils.utils import ovl\n'), ((10694, 10722), 'deerlab.utils.utils.ovl', 'ovl', (['(result.P / 100000.0)', 'Pr'], {}), '(result.P / 100000.0, Pr)\n', (10697, 10722), False, 'from deerlab.utils.utils import ovl\n'), ((11488, 11516), 'deerlab.utils.utils.ovl', 'ovl', (['(result.P / 100000.0)', 'Pr'], {}), '(result.P / 100000.0, Pr)\n', (11491, 11516), False, 'from deerlab.utils.utils import ovl\n'), ((12302, 12330), 'deerlab.utils.utils.ovl', 'ovl', (['(result.P / 100000.0)', 'Pr'], {}), '(result.P / 100000.0, Pr)\n', (12305, 12330), False, 'from deerlab.utils.utils import ovl\n'), ((9166, 9185), 'deerlab.dd_gauss', 'dd_gauss', (['r', '(3)', '(0.2)'], {}), '(r, 3, 0.2)\n', (9174, 9185), False, 'from deerlab import dd_gauss, dd_gauss2, bg_hom3d, bg_exp\n'), ((12708, 12727), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (12720, 12727), True, 'import numpy as np\n')] |
import pandas as pd
import ipywidgets as widgets
import src
from IPython.display import clear_output
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.stats as sts
def show_tasks_dropdown(dropdown,df=src.get_raw_data()):
display(dropdown)
def dropdown_eventhandler(change):
clear_output()
display(dropdown)
if change.new == "1. Checking the number of mice":
df = src.get_raw_data()
numMice = len(set(df["Mouse ID"]))
display("Number of mice => " + str(numMice))
elif change.new == "2. Generate a summary table":
display(generate_a_summary_table())
elif change.new == "3a. Generate a plot using Panda's dataframe .plot() that shows the total mice for each treatment regimen":
drug_grp = src.get_last_instance_data().groupby(by="Drug Regimen")
drug_grp['Mouse ID'].count().plot(kind="bar", title="Total mice for each treatment regimen")
plt.ylabel("Mice Count")
plt.xticks(rotation=45)
plt.show()
display()
elif change.new == "3b. Generate a plot using Matplotlib's pyplot that shows the total mice for each treatment regimen":
drug_grp = src.get_last_instance_data().groupby(by="Drug Regimen")
plt.bar(drug_grp.groups.keys(),drug_grp['Mouse ID'].count())
plt.xticks(rotation=45)
plt.ylabel("Mice Count")
plt.title("Total mice for each treatment regimen")
display()
elif change.new == "4a. Generate a pie plot using Panda's dataframe .plot() that shows the distribution of female or male mice in the study":
drug_grp = src.get_last_instance_data().groupby(by="Sex")
display(
drug_grp['Mouse ID'].count().plot(kind='pie', title="Distribution of female or male mice in the study")
)
elif change.new == "4b. Generate a pie plot using Matplotlib's pyplot that shows the distribution of female or male mice in the study":
drug_grp = src.get_last_instance_data().groupby(by="Sex")
plt.pie(drug_grp['Mouse ID'].count(),labels=drug_grp.groups.keys(),explode = [0,0.2],shadow=True, startangle=-30)
plt.title("Distribution of female or male mice in the study")
display()
elif change.new == "5a. Calculate the final tumor volume of each mouse across four of the most promising treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin":
df = src.get_four_promising_treatments()
display(df)
elif change.new == "6a. Calculate the quartiles and IQR and quantitatively determine if there are any potential outliers across all four treatment regimens.":
df = src.get_four_promising_treatments()
grps = df.groupby("Drug Regimen")
summary_df = pd.DataFrame({},columns=["Name","LowerB","Min","Q1","IQR","Q3","Max","UpperB"])
outliers = []
for name, grp in grps:
arr = grp['Final Tumor Volume (mm3)']
[Q1, Q3] = sts.mstats.idealfourths(arr)
IQR = sts.iqr(arr)
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
df_row = pd.DataFrame({
"Name" : [name],
"LowerB": [f"{lower_bound:.1f}"],
"Min" : [f"{arr.min():.1f}"],
"Q1" : [f"{Q1:.1f}"],
"IQR" : [f"{IQR:.1f}"],
"Q3" : [f"{Q3:.1f}"],
"Max" : [f"{arr.max():.1f}"],
"UpperB" : [f"{upper_bound:.1f}"]
})
summary_df = summary_df.append(df_row)
filt = ((arr < lower_bound) | (arr > upper_bound))
if len(arr[filt]) > 0:
outliers.append(arr[filt])
print(outliers)
display(summary_df.set_index("Name"))
elif change.new == "6b. Using Matplotlib, generate a box and whisker plot of the final tumor volume for all four treatment regimens and highlight any potential outliers in the plot by changing their color and style.":
exercise6b()
df = src.get_last_instance_data()
filt = df["Drug Regimen"]=="Infubinol"
df = df[filt].rename(columns={"Tumor Volume (mm3)":"Final Tumor Volume (mm3)"})
display(df.loc[669])
elif change.new == "7. Select a mouse that was treated with Capomulin and generate a line plot of time point versus tumor volume for that mouse.":
df = src.get_raw_data()
filt = df["Drug Regimen"] == "Capomulin"
mouse = df[filt].iloc[0,:]["Mouse ID"]
filt = df["Mouse ID"] == mouse
mouse_data = df[filt][["Timepoint","Tumor Volume (mm3)"]].set_index("Timepoint")
plt.plot(mouse_data)
plt.title(f"Capomulin treated mouse '{mouse}' data: Tumor volume over time")
plt.xlabel("Timepoint")
plt.ylim(0,50)
plt.ylabel("Tumor size (mm3)")
plt.show()
display()
elif change.new == "8. Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin treatment regimen.":
df = src.get_mouse_metadata()
filt = df["Drug Regimen"] == "Capomulin"
df_weight = df[filt][["Mouse ID", "Weight (g)"]]
df = src.get_raw_data()
filt = df["Drug Regimen"] == "Capomulin"
df = df[filt][['Mouse ID',"Tumor Volume (mm3)"]]
df_avg_tumor_volume = df.groupby(by="Mouse ID").mean()
df = df_weight.merge(df_avg_tumor_volume,on="Mouse ID",how='left')
plt.scatter(df['Tumor Volume (mm3)'],df["Weight (g)"])
plt.ylabel("Mouse Weight (g)")
plt.xlabel("Average tumor volume (mm3)")
plt.xlim(34,47)
plt.ylim(10,28)
plt.show()
display()
elif change.new == "9. Calculate the correlation coefficient and linear regression model between mouse weight and average tumor volume for the Capomulin treatment. Plot the linear regression model on top of the previous scatter plot.":
exercise9()
display()
elif change.new == "10a. Observation 1":
print("Observation 1: Rat tumor size correlates with mouse weight, and it's statistically signficant")
exercise9()
display()
elif change.new == "10b. Observation 2":
print("Observation 2: Capomulin and Ramicane were the only treatments that on average decreased tumor size.")
display(generate_a_summary_table())
elif change.new == "10c. Observation 3":
print("Mouse c326 was an outlier in the Infubinol group. It looks like it's last timepoint was 5.. Did the mouse die?")
exercise6b()
df = src.get_last_instance_data()
filt = df["Drug Regimen"]=="Infubinol"
df = df[filt].rename(columns={"Tumor Volume (mm3)":"Final Tumor Volume (mm3)"})
display(df.loc[669])
dropdown.observe(dropdown_eventhandler, names='value')
def exercise6b():
df = src.get_four_promising_treatments()
grp_names = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
grps = [df.groupby(by="Drug Regimen").get_group(grp_name).reset_index()['Final Tumor Volume (mm3)'].rename(grp_name) for grp_name in grp_names]
fig, ax = plt.subplots()
plt.boxplot(grps,labels=grp_names,sym='ob')
ax.set_title("Box and whisker plot for 4 promising treatments")
plt.show()
def exercise9():
df = src.get_mouse_metadata()
filt = df["Drug Regimen"] == "Capomulin"
df_weight = df[filt][["Mouse ID", "Weight (g)"]]
df = src.get_raw_data()
filt = df["Drug Regimen"] == "Capomulin"
df = df[filt][['Mouse ID',"Tumor Volume (mm3)"]]
df_avg_tumor_volume = df.groupby(by="Mouse ID").mean()
df = df_weight.merge(df_avg_tumor_volume,on="Mouse ID",how='left')
x = df['Tumor Volume (mm3)']
y = df["Weight (g)"]
plt.scatter(x,y)
plt.ylabel("Mouse Weight (g)")
plt.xlabel("Average tumor volume (mm3)")
plt.xlim(34,47)
plt.ylim(10,28)
import numpy as np
from scipy.stats.stats import pearsonr
from scipy.stats import linregress
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))
plt.annotate(f"R = {pearsonr(x,y)[0]:.2f}\nslope = {linregress(x,y)[0]:.2f}\np-value = {pearsonr(x,y)[1]:.7f}",(36,24))
plt.show()
def generate_a_summary_table():
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
df = src.get_raw_data()
drug_regimens = df['Drug Regimen'].unique().tolist()
df_first = df.drop_duplicates('Mouse ID',keep='first')
df_last = df.drop_duplicates('Mouse ID',keep='last')
drug_grp = df_last.groupby(by="Drug Regimen")
np.set_printoptions()
summary_df = pd.DataFrame({
"Drug Regimen" : drug_regimens,
"Starting Volume" : df_first.groupby(by="Drug Regimen").mean()["Tumor Volume (mm3)"],
"Mean" : drug_grp['Tumor Volume (mm3)'].mean(),
"Median" : drug_grp['Tumor Volume (mm3)'].median(),
"Variance" : drug_grp['Tumor Volume (mm3)'].var(),
"Standard Deviation" : drug_grp['Tumor Volume (mm3)'].std(),
"SEM" : drug_grp['Tumor Volume (mm3)'].sem()
})
summary_df.insert(3,"% Change",(summary_df["Mean"]-summary_df["Starting Volume"])/summary_df["Mean"]*100)
styler = summary_df.style.background_gradient(cmap=cm.get_cmap('coolwarm', 12),subset=['% Change'])
styler.format({'Starting Volume': "{:.1f}", 'Mean': "{:.1f}", '% Change': "{:.1f}%", 'Median': "{:.1f}", 'Variance': "{:.1f}", 'Standard Deviation': "{:.2f}", 'SEM': "{:.2f}"})
return styler
def old_exercise6a():
df = src.get_four_promising_treatments()
[Q1, Q3] = sts.mstats.idealfourths(df['Final Tumor Volume (mm3)'])
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
filt = ((df['Final Tumor Volume (mm3)']<lower_bound)|(df['Final Tumor Volume (mm3)']>upper_bound))
print(df['Final Tumor Volume (mm3)'][filt])
plt.hist(df['Final Tumor Volume (mm3)'],color="grey",label="histogram of 4 treatment group's final tumor volume",alpha=0.5)
plt.axvline(x=Q1,ymax=20,label=f"Q1: {Q1:.1f}",color='black')
plt.axvline(x=Q3,ymax=20,label=f"Q3: {Q3:.1f}",color='black')
plt.hlines(y=2.5,xmin=Q1,xmax=Q3,label=f"IQR: {IQR:.1f}",color='orange')
plt.axvline(x=lower_bound,ymax=20,label=f"lower_bound: {lower_bound:.1f}",color='blue')
plt.axvline(x=upper_bound,ymax=20,label=f"upper_bound: {upper_bound:.1f}",color='red')
plt.legend()
plt.show()
display(
pd.DataFrame({
"LowerB": [lower_bound],
"Min" : [df['Final Tumor Volume (mm3)'].min()],
"Q1" : [Q1],
"Median" : [df['Final Tumor Volume (mm3)'].median()],
"Q3" : [Q3],
"Max" : [df['Final Tumor Volume (mm3)'].max()],
"UpperB" : [upper_bound]
}).style.format({"LowerB":"{:.1f}",
"Min":"{:.1f}",
"Q1":"{:.1f}",
"Median":"{:.1f}",
"Q3":"{:.1f}",
"Max":"{:.1f}",
"UpperB": "{:.1f}"})
) | [
"matplotlib.pyplot.boxplot",
"scipy.stats.linregress",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"src.get_four_promising_treatments",
"src.get_raw_data",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.scatter",
... | [((247, 265), 'src.get_raw_data', 'src.get_raw_data', ([], {}), '()\n', (263, 265), False, 'import src\n'), ((7532, 7567), 'src.get_four_promising_treatments', 'src.get_four_promising_treatments', ([], {}), '()\n', (7565, 7567), False, 'import src\n'), ((7799, 7813), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7811, 7813), True, 'import matplotlib.pyplot as plt\n'), ((7818, 7863), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['grps'], {'labels': 'grp_names', 'sym': '"""ob"""'}), "(grps, labels=grp_names, sym='ob')\n", (7829, 7863), True, 'import matplotlib.pyplot as plt\n'), ((7934, 7944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7942, 7944), True, 'import matplotlib.pyplot as plt\n'), ((7989, 8013), 'src.get_mouse_metadata', 'src.get_mouse_metadata', ([], {}), '()\n', (8011, 8013), False, 'import src\n'), ((8158, 8176), 'src.get_raw_data', 'src.get_raw_data', ([], {}), '()\n', (8174, 8176), False, 'import src\n'), ((8553, 8570), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (8564, 8570), True, 'import matplotlib.pyplot as plt\n'), ((8586, 8616), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mouse Weight (g)"""'], {}), "('Mouse Weight (g)')\n", (8596, 8616), True, 'import matplotlib.pyplot as plt\n'), ((8633, 8673), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average tumor volume (mm3)"""'], {}), "('Average tumor volume (mm3)')\n", (8643, 8673), True, 'import matplotlib.pyplot as plt\n'), ((8690, 8706), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(34)', '(47)'], {}), '(34, 47)\n', (8698, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8722, 8738), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10)', '(28)'], {}), '(10, 28)\n', (8730, 8738), True, 'import matplotlib.pyplot as plt\n'), ((9117, 9127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9125, 9127), True, 'import matplotlib.pyplot as plt\n'), ((9304, 9322), 'src.get_raw_data', 'src.get_raw_data', ([], {}), '()\n', (9320, 9322), False, 'import src\n'), ((9552, 9573), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '()\n', (9571, 9573), True, 'import numpy as np\n'), ((10469, 10504), 'src.get_four_promising_treatments', 'src.get_four_promising_treatments', ([], {}), '()\n', (10502, 10504), False, 'import src\n'), ((10520, 10575), 'scipy.stats.mstats.idealfourths', 'sts.mstats.idealfourths', (["df['Final Tumor Volume (mm3)']"], {}), "(df['Final Tumor Volume (mm3)'])\n", (10543, 10575), True, 'import scipy.stats as sts\n'), ((10817, 10948), 'matplotlib.pyplot.hist', 'plt.hist', (["df['Final Tumor Volume (mm3)']"], {'color': '"""grey"""', 'label': '"""histogram of 4 treatment group\'s final tumor volume"""', 'alpha': '(0.5)'}), '(df[\'Final Tumor Volume (mm3)\'], color=\'grey\', label=\n "histogram of 4 treatment group\'s final tumor volume", alpha=0.5)\n', (10825, 10948), True, 'import matplotlib.pyplot as plt\n'), ((10945, 11009), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'Q1', 'ymax': '(20)', 'label': 'f"""Q1: {Q1:.1f}"""', 'color': '"""black"""'}), "(x=Q1, ymax=20, label=f'Q1: {Q1:.1f}', color='black')\n", (10956, 11009), True, 'import matplotlib.pyplot as plt\n'), ((11011, 11075), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'Q3', 'ymax': '(20)', 'label': 'f"""Q3: {Q3:.1f}"""', 'color': '"""black"""'}), "(x=Q3, ymax=20, label=f'Q3: {Q3:.1f}', color='black')\n", (11022, 11075), True, 'import matplotlib.pyplot as plt\n'), ((11077, 11153), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': '(2.5)', 'xmin': 'Q1', 'xmax': 'Q3', 'label': 'f"""IQR: {IQR:.1f}"""', 'color': '"""orange"""'}), "(y=2.5, xmin=Q1, xmax=Q3, label=f'IQR: {IQR:.1f}', color='orange')\n", (11087, 11153), True, 'import matplotlib.pyplot as plt\n'), ((11154, 11248), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'lower_bound', 'ymax': '(20)', 'label': 'f"""lower_bound: {lower_bound:.1f}"""', 'color': '"""blue"""'}), "(x=lower_bound, ymax=20, label=f'lower_bound: {lower_bound:.1f}',\n color='blue')\n", (11165, 11248), True, 'import matplotlib.pyplot as plt\n'), ((11246, 11339), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'upper_bound', 'ymax': '(20)', 'label': 'f"""upper_bound: {upper_bound:.1f}"""', 'color': '"""red"""'}), "(x=upper_bound, ymax=20, label=f'upper_bound: {upper_bound:.1f}',\n color='red')\n", (11257, 11339), True, 'import matplotlib.pyplot as plt\n'), ((11337, 11349), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11347, 11349), True, 'import matplotlib.pyplot as plt\n'), ((11354, 11364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11362, 11364), True, 'import matplotlib.pyplot as plt\n'), ((343, 357), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (355, 357), False, 'from IPython.display import clear_output\n'), ((8905, 8917), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (8914, 8917), True, 'import numpy as np\n'), ((461, 479), 'src.get_raw_data', 'src.get_raw_data', ([], {}), '()\n', (477, 479), False, 'import src\n'), ((8950, 8962), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (8959, 8962), True, 'import numpy as np\n'), ((10184, 10211), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""coolwarm"""', '(12)'], {}), "('coolwarm', 12)\n", (10195, 10211), False, 'from matplotlib import cm\n'), ((8929, 8948), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (8939, 8948), True, 'import numpy as np\n'), ((1022, 1046), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mice Count"""'], {}), "('Mice Count')\n", (1032, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1082), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (1069, 1082), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1105), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1103, 1105), True, 'import matplotlib.pyplot as plt\n'), ((9001, 9015), 'scipy.stats.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (9009, 9015), False, 'from scipy.stats.stats import pearsonr\n'), ((9033, 9049), 'scipy.stats.linregress', 'linregress', (['x', 'y'], {}), '(x, y)\n', (9043, 9049), False, 'from scipy.stats import linregress\n'), ((9069, 9083), 'scipy.stats.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (9077, 9083), False, 'from scipy.stats.stats import pearsonr\n'), ((1421, 1444), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (1431, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mice Count"""'], {}), "('Mice Count')\n", (1467, 1481), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1544), 'matplotlib.pyplot.title', 'plt.title', (['"""Total mice for each treatment regimen"""'], {}), "('Total mice for each treatment regimen')\n", (1503, 1544), True, 'import matplotlib.pyplot as plt\n'), ((849, 877), 'src.get_last_instance_data', 'src.get_last_instance_data', ([], {}), '()\n', (875, 877), False, 'import src\n'), ((1280, 1308), 'src.get_last_instance_data', 'src.get_last_instance_data', ([], {}), '()\n', (1306, 1308), False, 'import src\n'), ((2298, 2359), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of female or male mice in the study"""'], {}), "('Distribution of female or male mice in the study')\n", (2307, 2359), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1772), 'src.get_last_instance_data', 'src.get_last_instance_data', ([], {}), '()\n', (1770, 1772), False, 'import src\n'), ((2579, 2614), 'src.get_four_promising_treatments', 'src.get_four_promising_treatments', ([], {}), '()\n', (2612, 2614), False, 'import src\n'), ((2113, 2141), 'src.get_last_instance_data', 'src.get_last_instance_data', ([], {}), '()\n', (2139, 2141), False, 'import src\n'), ((2823, 2858), 'src.get_four_promising_treatments', 'src.get_four_promising_treatments', ([], {}), '()\n', (2856, 2858), False, 'import src\n'), ((2930, 3021), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {'columns': "['Name', 'LowerB', 'Min', 'Q1', 'IQR', 'Q3', 'Max', 'UpperB']"}), "({}, columns=['Name', 'LowerB', 'Min', 'Q1', 'IQR', 'Q3', 'Max',\n 'UpperB'])\n", (2942, 3021), True, 'import pandas as pd\n'), ((3186, 3214), 'scipy.stats.mstats.idealfourths', 'sts.mstats.idealfourths', (['arr'], {}), '(arr)\n', (3209, 3214), True, 'import scipy.stats as sts\n'), ((3237, 3249), 'scipy.stats.iqr', 'sts.iqr', (['arr'], {}), '(arr)\n', (3244, 3249), True, 'import scipy.stats as sts\n'), ((4523, 4551), 'src.get_last_instance_data', 'src.get_last_instance_data', ([], {}), '()\n', (4549, 4551), False, 'import src\n'), ((4909, 4927), 'src.get_raw_data', 'src.get_raw_data', ([], {}), '()\n', (4925, 4927), False, 'import src\n'), ((5181, 5201), 'matplotlib.pyplot.plot', 'plt.plot', (['mouse_data'], {}), '(mouse_data)\n', (5189, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5214, 5290), 'matplotlib.pyplot.title', 'plt.title', (['f"""Capomulin treated mouse \'{mouse}\' data: Tumor volume over time"""'], {}), '(f"Capomulin treated mouse \'{mouse}\' data: Tumor volume over time")\n', (5223, 5290), True, 'import matplotlib.pyplot as plt\n'), ((5303, 5326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timepoint"""'], {}), "('Timepoint')\n", (5313, 5326), True, 'import matplotlib.pyplot as plt\n'), ((5339, 5354), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(50)'], {}), '(0, 50)\n', (5347, 5354), True, 'import matplotlib.pyplot as plt\n'), ((5366, 5396), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tumor size (mm3)"""'], {}), "('Tumor size (mm3)')\n", (5376, 5396), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5419), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5417, 5419), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5621), 'src.get_mouse_metadata', 'src.get_mouse_metadata', ([], {}), '()\n', (5619, 5621), False, 'import src\n'), ((5754, 5772), 'src.get_raw_data', 'src.get_raw_data', ([], {}), '()\n', (5770, 5772), False, 'import src\n'), ((6046, 6101), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df['Tumor Volume (mm3)']", "df['Weight (g)']"], {}), "(df['Tumor Volume (mm3)'], df['Weight (g)'])\n", (6057, 6101), True, 'import matplotlib.pyplot as plt\n'), ((6113, 6143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mouse Weight (g)"""'], {}), "('Mouse Weight (g)')\n", (6123, 6143), True, 'import matplotlib.pyplot as plt\n'), ((6156, 6196), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average tumor volume (mm3)"""'], {}), "('Average tumor volume (mm3)')\n", (6166, 6196), True, 'import matplotlib.pyplot as plt\n'), ((6209, 6225), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(34)', '(47)'], {}), '(34, 47)\n', (6217, 6225), True, 'import matplotlib.pyplot as plt\n'), ((6237, 6253), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10)', '(28)'], {}), '(10, 28)\n', (6245, 6253), True, 'import matplotlib.pyplot as plt\n'), ((6265, 6275), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6273, 6275), True, 'import matplotlib.pyplot as plt\n'), ((7240, 7268), 'src.get_last_instance_data', 'src.get_last_instance_data', ([], {}), '()\n', (7266, 7268), False, 'import src\n')] |
import numpy as np
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_torch
torch, nn = try_import_torch()
class TorchDistributionWrapper(ActionDistribution):
"""Wrapper class for torch.distributions."""
@override(ActionDistribution)
def __init__(self, inputs, model):
if not isinstance(inputs, torch.Tensor):
inputs = torch.Tensor(inputs)
super().__init__(inputs, model)
# Store the last sample here.
self.last_sample = None
@override(ActionDistribution)
def logp(self, actions):
return self.dist.log_prob(actions)
@override(ActionDistribution)
def entropy(self):
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other):
return torch.distributions.kl.kl_divergence(self.dist, other.dist)
@override(ActionDistribution)
def sample(self):
self.last_sample = self.dist.sample()
return self.last_sample
@override(ActionDistribution)
def sampled_action_logp(self):
assert self.last_sample is not None
return self.logp(self.last_sample)
class TorchCategorical(TorchDistributionWrapper):
"""Wrapper class for PyTorch Categorical distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model=None, temperature=1.0):
if temperature != 1.0:
assert temperature > 0.0, \
"Categorical `temperature` must be > 0.0!"
inputs /= temperature
super().__init__(inputs, model)
self.dist = torch.distributions.categorical.Categorical(
logits=self.inputs)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.dist.probs.argmax(dim=1)
return self.last_sample
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class TorchMultiCategorical(TorchDistributionWrapper):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
@override(TorchDistributionWrapper)
def __init__(self, inputs, model, input_lens):
super().__init__(inputs, model)
# If input_lens is np.ndarray or list, force-make it a tuple.
inputs_split = self.inputs.split(tuple(input_lens), dim=1)
self.cats = [
torch.distributions.categorical.Categorical(logits=input_)
for input_ in inputs_split
]
@override(TorchDistributionWrapper)
def sample(self):
arr = [cat.sample() for cat in self.cats]
self.last_sample = torch.stack(arr, dim=1)
return self.last_sample
@override(ActionDistribution)
def deterministic_sample(self):
arr = [torch.argmax(cat.probs, -1) for cat in self.cats]
self.last_sample = torch.stack(arr, dim=1)
return self.last_sample
@override(TorchDistributionWrapper)
def logp(self, actions):
# # If tensor is provided, unstack it into list.
if isinstance(actions, torch.Tensor):
actions = torch.unbind(actions, dim=1)
logps = torch.stack(
[cat.log_prob(act) for cat, act in zip(self.cats, actions)])
return torch.sum(logps, dim=0)
@override(ActionDistribution)
def multi_entropy(self):
return torch.stack([cat.entropy() for cat in self.cats], dim=1)
@override(TorchDistributionWrapper)
def entropy(self):
return torch.sum(self.multi_entropy(), dim=1)
@override(ActionDistribution)
def multi_kl(self, other):
return torch.stack(
[
torch.distributions.kl.kl_divergence(cat, oth_cat)
for cat, oth_cat in zip(self.cats, other.cats)
],
dim=1,
)
@override(TorchDistributionWrapper)
def kl(self, other):
return torch.sum(self.multi_kl(other), dim=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.sum(action_space.nvec)
class TorchDiagGaussian(TorchDistributionWrapper):
"""Wrapper class for PyTorch Normal distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model):
super().__init__(inputs, model)
mean, log_std = torch.chunk(inputs, 2, dim=1)
self.dist = torch.distributions.normal.Normal(mean, torch.exp(log_std))
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.dist.mean
return self.last_sample
@override(TorchDistributionWrapper)
def logp(self, actions):
return super().logp(actions).sum(-1)
@override(TorchDistributionWrapper)
def entropy(self):
return super().entropy().sum(-1)
@override(TorchDistributionWrapper)
def kl(self, other):
return super().kl(other).sum(-1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchDeterministic(TorchDistributionWrapper):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
"""
@override(ActionDistribution)
def deterministic_sample(self):
return self.inputs
@override(TorchDistributionWrapper)
def sampled_action_logp(self):
return 0.0
@override(TorchDistributionWrapper)
def sample(self):
return self.deterministic_sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
| [
"numpy.prod",
"ray.rllib.utils.try_import_torch",
"numpy.sum",
"ray.rllib.utils.annotations.override"
] | [((187, 205), 'ray.rllib.utils.try_import_torch', 'try_import_torch', ([], {}), '()\n', (203, 205), False, 'from ray.rllib.utils import try_import_torch\n'), ((315, 343), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (323, 343), False, 'from ray.rllib.utils.annotations import override\n'), ((590, 618), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (598, 618), False, 'from ray.rllib.utils.annotations import override\n'), ((697, 725), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (705, 725), False, 'from ray.rllib.utils.annotations import override\n'), ((790, 818), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (798, 818), False, 'from ray.rllib.utils.annotations import override\n'), ((925, 953), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (933, 953), False, 'from ray.rllib.utils.annotations import override\n'), ((1060, 1088), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1068, 1088), False, 'from ray.rllib.utils.annotations import override\n'), ((1331, 1359), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1339, 1359), False, 'from ray.rllib.utils.annotations import override\n'), ((1728, 1756), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1736, 1756), False, 'from ray.rllib.utils.annotations import override\n'), ((1906, 1934), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1914, 1934), False, 'from ray.rllib.utils.annotations import override\n'), ((2166, 2200), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (2174, 2200), False, 'from ray.rllib.utils.annotations import override\n'), ((2577, 2611), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (2585, 2611), False, 'from ray.rllib.utils.annotations import override\n'), ((2773, 2801), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (2781, 2801), False, 'from ray.rllib.utils.annotations import override\n'), ((2992, 3026), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (3000, 3026), False, 'from ray.rllib.utils.annotations import override\n'), ((3357, 3385), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (3365, 3385), False, 'from ray.rllib.utils.annotations import override\n'), ((3493, 3527), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (3501, 3527), False, 'from ray.rllib.utils.annotations import override\n'), ((3611, 3639), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (3619, 3639), False, 'from ray.rllib.utils.annotations import override\n'), ((3893, 3927), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (3901, 3927), False, 'from ray.rllib.utils.annotations import override\n'), ((4031, 4059), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (4039, 4059), False, 'from ray.rllib.utils.annotations import override\n'), ((4282, 4310), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (4290, 4310), False, 'from ray.rllib.utils.annotations import override\n'), ((4530, 4558), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (4538, 4558), False, 'from ray.rllib.utils.annotations import override\n'), ((4675, 4709), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (4683, 4709), False, 'from ray.rllib.utils.annotations import override\n'), ((4790, 4824), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (4798, 4824), False, 'from ray.rllib.utils.annotations import override\n'), ((4895, 4929), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (4903, 4929), False, 'from ray.rllib.utils.annotations import override\n'), ((5020, 5048), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (5028, 5048), False, 'from ray.rllib.utils.annotations import override\n'), ((5420, 5448), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (5428, 5448), False, 'from ray.rllib.utils.annotations import override\n'), ((5518, 5552), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (5526, 5552), False, 'from ray.rllib.utils.annotations import override\n'), ((5613, 5647), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (5621, 5647), False, 'from ray.rllib.utils.annotations import override\n'), ((5737, 5765), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (5745, 5765), False, 'from ray.rllib.utils.annotations import override\n'), ((4140, 4165), 'numpy.sum', 'np.sum', (['action_space.nvec'], {}), '(action_space.nvec)\n', (4146, 4165), True, 'import numpy as np\n'), ((5846, 5873), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (5853, 5873), True, 'import numpy as np\n'), ((5129, 5156), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (5136, 5156), True, 'import numpy as np\n')] |
import random
import copy
import numpy as np
from tk.TKPlayers import HeuristicPlayer
from tk.TKGame import TKGame as Game
from tk.TKGame import Board, WIN_SCORE
from tk.keras.NNet import NNetWrapper as nn
from tk.test.testTKLogick import generate_encoded_state, parse_encoded_state
from tk.keras.NNet import NNetWrapper as NNet
from keras.utils import Progbar
from utils import *
NUM_ITERS = number_of_train_iterations()
NUM_STEPS = 1000
INVALID_ACTION_REWARD = -1
def random_argmax(array):
MAX_DIFF = 2
arg_max = np.argmax(array)
max_value = array[arg_max]
max_value_ids = [arg_max,arg_max,arg_max]
for idx, value in enumerate(array):
if value != INVALID_ACTION_REWARD and max_value - value <= MAX_DIFF:
max_value_ids.append(idx)
return random.choice(max_value_ids)
def generate_train_batch(num_steps):
input_boards = []
target_pis = []
target_vs = []
board = Board()
game = Game()
heuristicPlayer = HeuristicPlayer()
player = 1
print("generate_train_batch")
progbar = Progbar(num_steps)
for x in range(num_steps):
progbar.add(1)
encoded_state = board.get_encoded_state()
canonical_form = game.getCanonicalForm(encoded_state, player)
best_action = heuristicPlayer.play(canonical_form)
game_ended = game.getGameEnded(encoded_state, player)
if game_ended == 0:
input_board = game.getCanonicalForm( copy.deepcopy(encoded_state), player)
encoded_state = board.execute_move(best_action, player)
score = board.get_players_scores()[player]
action_onehot = number_to_onehot(best_action,Board.action_size)
win_probability = float(score) / float(WIN_SCORE)
player *= -1
input_boards.append(input_board)
target_pis.append(action_onehot)
target_vs.append(win_probability)
# print("\n")
# print(parse_encoded_state(input_board))
# print("best_action " + str(best_action))
else:
player == 1
board = Board() # no valid actions or game ended, reset board
encoded_state = board.get_encoded_state()
return input_boards, target_pis, target_vs
#test
# batch = generate_train_batch(NUM_STEPS)
# exit()
# training
g = Game()
n1 = NNet(g)
n1.load_checkpoint('temp',"best.pth.tar")
n1.nnet.model._make_predict_function()
for i in range(NUM_ITERS):
print("iteration " + str(i) + " / " + str(NUM_ITERS))
input_boards, target_pis, target_vs = generate_train_batch(NUM_STEPS)
input_boards = np.asarray(input_boards)
target_pis = np.asarray(target_pis)
target_vs = np.asarray(target_vs)
n1.nnet.model.fit(x = input_boards, y = [target_pis, target_vs], batch_size = int(NUM_STEPS * .6), epochs = 5)
if i % 5 == 0:
n1.save_checkpoint('temp',"best.pth.tar")
loss = n1.nnet.model.test_on_batch(x = input_boards, y = [target_pis, target_vs])
print(loss) | [
"tk.TKPlayers.HeuristicPlayer",
"random.choice",
"keras.utils.Progbar",
"numpy.asarray",
"numpy.argmax",
"copy.deepcopy",
"tk.TKGame.Board",
"tk.keras.NNet.NNetWrapper",
"tk.TKGame.TKGame"
] | [((2109, 2115), 'tk.TKGame.TKGame', 'Game', ([], {}), '()\n', (2113, 2115), True, 'from tk.TKGame import TKGame as Game\n'), ((2121, 2128), 'tk.keras.NNet.NNetWrapper', 'NNet', (['g'], {}), '(g)\n', (2125, 2128), True, 'from tk.keras.NNet import NNetWrapper as NNet\n'), ((522, 538), 'numpy.argmax', 'np.argmax', (['array'], {}), '(array)\n', (531, 538), True, 'import numpy as np\n'), ((758, 786), 'random.choice', 'random.choice', (['max_value_ids'], {}), '(max_value_ids)\n', (771, 786), False, 'import random\n'), ((888, 895), 'tk.TKGame.Board', 'Board', ([], {}), '()\n', (893, 895), False, 'from tk.TKGame import Board, WIN_SCORE\n'), ((904, 910), 'tk.TKGame.TKGame', 'Game', ([], {}), '()\n', (908, 910), True, 'from tk.TKGame import TKGame as Game\n'), ((930, 947), 'tk.TKPlayers.HeuristicPlayer', 'HeuristicPlayer', ([], {}), '()\n', (945, 947), False, 'from tk.TKPlayers import HeuristicPlayer\n'), ((1004, 1022), 'keras.utils.Progbar', 'Progbar', (['num_steps'], {}), '(num_steps)\n', (1011, 1022), False, 'from keras.utils import Progbar\n'), ((2380, 2404), 'numpy.asarray', 'np.asarray', (['input_boards'], {}), '(input_boards)\n', (2390, 2404), True, 'import numpy as np\n'), ((2419, 2441), 'numpy.asarray', 'np.asarray', (['target_pis'], {}), '(target_pis)\n', (2429, 2441), True, 'import numpy as np\n'), ((2455, 2476), 'numpy.asarray', 'np.asarray', (['target_vs'], {}), '(target_vs)\n', (2465, 2476), True, 'import numpy as np\n'), ((1888, 1895), 'tk.TKGame.Board', 'Board', ([], {}), '()\n', (1893, 1895), False, 'from tk.TKGame import Board, WIN_SCORE\n'), ((1353, 1381), 'copy.deepcopy', 'copy.deepcopy', (['encoded_state'], {}), '(encoded_state)\n', (1366, 1381), False, 'import copy\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import absolute_import, division, print_function
import cv2
import os
import numpy as np
from torch.utils import data
class PFLDDatasets(data.Dataset):
""" Dataset to manage the data loading, augmentation and generation. """
def __init__(self, file_list, transforms=None, data_root="", img_size=112):
"""
Parameters
----------
file_list : list
a list of file path and annotations
transforms : function
function for data augmentation
data_root : str
the root path of dataset
img_size : int
the size of image height or width
"""
self.line = None
self.path = None
self.img_size = img_size
self.land = None
self.angle = None
self.data_root = data_root
self.transforms = transforms
with open(file_list, "r") as f:
self.lines = f.readlines()
def __getitem__(self, index):
""" Get the data sample and labels with the index. """
self.line = self.lines[index].strip().split()
# load image
if self.data_root:
self.img = cv2.imread(os.path.join(self.data_root, self.line[0]))
else:
self.img = cv2.imread(self.line[0])
# resize
self.img = cv2.resize(self.img, (self.img_size, self.img_size))
# obtain gt labels
self.land = np.asarray(self.line[1: (106 * 2 + 1)], dtype=np.float32)
self.angle = np.asarray(self.line[(106 * 2 + 1):], dtype=np.float32)
# augmentation
if self.transforms:
self.img = self.transforms(self.img)
return self.img, self.land, self.angle
def __len__(self):
""" Get the size of dataset. """
return len(self.lines)
| [
"os.path.join",
"cv2.resize",
"numpy.asarray",
"cv2.imread"
] | [((1404, 1456), 'cv2.resize', 'cv2.resize', (['self.img', '(self.img_size, self.img_size)'], {}), '(self.img, (self.img_size, self.img_size))\n', (1414, 1456), False, 'import cv2\n'), ((1504, 1558), 'numpy.asarray', 'np.asarray', (['self.line[1:106 * 2 + 1]'], {'dtype': 'np.float32'}), '(self.line[1:106 * 2 + 1], dtype=np.float32)\n', (1514, 1558), True, 'import numpy as np\n'), ((1583, 1636), 'numpy.asarray', 'np.asarray', (['self.line[106 * 2 + 1:]'], {'dtype': 'np.float32'}), '(self.line[106 * 2 + 1:], dtype=np.float32)\n', (1593, 1636), True, 'import numpy as np\n'), ((1343, 1367), 'cv2.imread', 'cv2.imread', (['self.line[0]'], {}), '(self.line[0])\n', (1353, 1367), False, 'import cv2\n'), ((1262, 1304), 'os.path.join', 'os.path.join', (['self.data_root', 'self.line[0]'], {}), '(self.data_root, self.line[0])\n', (1274, 1304), False, 'import os\n')] |
from collections import deque
from abc import ABCMeta, abstractmethod
import numpy as np
import gym
import config as conf
from graph_repr import GraphRepr
class ControllerEnv(gym.Env, metaclass=ABCMeta):
@abstractmethod
def init_values(self):
pass
@abstractmethod
def init_physical_values(self):
pass
@abstractmethod
def calc_physical_values(self):
pass
@abstractmethod
def should_reset(self):
pass
@abstractmethod
def give_error(self):
pass
@abstractmethod
def give_measurement(self):
pass
def __init__(self):
self.init_values()
self.init_physical_values()
self.delay = conf.delay
self.window_width = conf.window_width
self.window_height = conf.window_height
self.total_time = conf.total_time #in s
self.time_available = self.total_time
self.delay = conf.delay
self.delta_time = conf.delta_time
self.last_small_target_change = self.time_available
self.last_big_target_change = self.time_available
self.last_env_force_change = self.time_available
self.target = np.random.uniform(self.min_target, self.max_target)
self.faktor = np.random.uniform(self.min_faktor, self.max_faktor)
self.env_force = np.random.uniform(self.min_env_force, self.max_env_force)
self.init_delay_list()
self.graph = 0
self.was_reset = False
self.action_space = gym.spaces.Box(-conf.action_space_high,
conf.action_space_high, shape=(3,))
self.observation_space = gym.spaces.Box(float('-inf'), float('inf'),
shape=(conf.amount_prev_observations * 3 + 6,))
def init_delay_list(self):
length_needed_for_delay = round(self.delay / self.delta_time + 0.5) # 0.5 for rounding up (to be sure)
self.delay_list = deque(maxlen = length_needed_for_delay)
for _ in range(length_needed_for_delay):
self.delay_list.append(0)
def step(self, new_output):
self.time_available -= self.delta_time
self.delay_list.append(new_output)
self.output = self.delay_list.pop()
self.calc_physical_values()
self.target, self.last_small_target_change = self.change_val(self.target, self.last_small_target_change,
self.time_without_small_target_change, self.max_small_target_change, self.max_target, self.min_target)
self.target, self.last_big_target_change = self.change_val(self.target, self.last_big_target_change,
self.time_without_big_target_change, self.max_big_target_change, self.max_target, self.min_target)
self.env_force, self.last_env_force_change = self.change_val(self.env_force, self.last_env_force_change,
self.time_without_env_force_change, self.max_env_force_change, self.max_env_force, self.min_env_force)
self.was_reset = False
self.measurement = self.give_measurement()
self.error = self.give_error()
observation = [self.error, self.measurement]
return observation, self.get_reward(), self.is_done(), {}
def change_val(self, to_change_value, last_change, time_without_change, max_change, max_val, min_val):
if last_change - self.time_available >= time_without_change:
to_change_value = self.random_change(to_change_value, max_change, max_val, min_val)
last_change = self.time_available
return to_change_value, last_change
def random_change(self, to_change, range, upper_bound, lower_bound):
to_change += np.random.uniform(-range, range)
to_change = lower_bound if to_change < lower_bound else to_change
to_change = upper_bound if to_change > upper_bound else to_change
return to_change
def is_done(self):
if self.time_available <= 0:
return True
elif self.was_reset:
return True
else:
return False
def get_reward(self):
if self.should_reset():
self.was_reset = True
self.reset()
produced_acc = abs(self.output * self.faktor)
reward = abs(self.error) + self.range_positive_reward
if reward > 0:
reward *= self.max_positive_reward / self.range_positive_reward
reward -= ((produced_acc / self.bad_produced_acc) ** 2 ) * self.max_positive_reward
if self.was_reset:
reward -= (self.bad_error + self.bad_produced_acc) * self.time_available * 100
return reward
def render(self, mode='human'):
if self.graph == 0:
self.graph = GraphRepr(self.window_width, self.window_height)
self.graph.add_point(self.total_time - self.time_available, self.measurement)
self.graph.target = self.target
self.graph.re_draw()
def reset(self):
self.time_available = self.total_time # in s
self.last_small_target_change = self.time_available
self.last_big_target_change = self.time_available
self.last_env_force_change = self.time_available
self.target = np.random.uniform(self.min_target, self.max_target)
self.faktor = np.random.uniform(self.min_faktor, self.max_faktor)
self.env_force = np.random.uniform(self.min_env_force, self.max_env_force)
self.init_physical_values()
self.init_delay_list()
if self.graph != 0:
self.graph = 0
return [self.give_error(), self.give_measurement()]
| [
"graph_repr.GraphRepr",
"collections.deque",
"gym.spaces.Box",
"numpy.random.uniform"
] | [((1189, 1240), 'numpy.random.uniform', 'np.random.uniform', (['self.min_target', 'self.max_target'], {}), '(self.min_target, self.max_target)\n', (1206, 1240), True, 'import numpy as np\n'), ((1263, 1314), 'numpy.random.uniform', 'np.random.uniform', (['self.min_faktor', 'self.max_faktor'], {}), '(self.min_faktor, self.max_faktor)\n', (1280, 1314), True, 'import numpy as np\n'), ((1340, 1397), 'numpy.random.uniform', 'np.random.uniform', (['self.min_env_force', 'self.max_env_force'], {}), '(self.min_env_force, self.max_env_force)\n', (1357, 1397), True, 'import numpy as np\n'), ((1522, 1597), 'gym.spaces.Box', 'gym.spaces.Box', (['(-conf.action_space_high)', 'conf.action_space_high'], {'shape': '(3,)'}), '(-conf.action_space_high, conf.action_space_high, shape=(3,))\n', (1536, 1597), False, 'import gym\n'), ((1983, 2020), 'collections.deque', 'deque', ([], {'maxlen': 'length_needed_for_delay'}), '(maxlen=length_needed_for_delay)\n', (1988, 2020), False, 'from collections import deque\n'), ((3863, 3895), 'numpy.random.uniform', 'np.random.uniform', (['(-range)', 'range'], {}), '(-range, range)\n', (3880, 3895), True, 'import numpy as np\n'), ((5388, 5439), 'numpy.random.uniform', 'np.random.uniform', (['self.min_target', 'self.max_target'], {}), '(self.min_target, self.max_target)\n', (5405, 5439), True, 'import numpy as np\n'), ((5462, 5513), 'numpy.random.uniform', 'np.random.uniform', (['self.min_faktor', 'self.max_faktor'], {}), '(self.min_faktor, self.max_faktor)\n', (5479, 5513), True, 'import numpy as np\n'), ((5539, 5596), 'numpy.random.uniform', 'np.random.uniform', (['self.min_env_force', 'self.max_env_force'], {}), '(self.min_env_force, self.max_env_force)\n', (5556, 5596), True, 'import numpy as np\n'), ((4907, 4955), 'graph_repr.GraphRepr', 'GraphRepr', (['self.window_width', 'self.window_height'], {}), '(self.window_width, self.window_height)\n', (4916, 4955), False, 'from graph_repr import GraphRepr\n')] |
import logging
import time
from abc import ABC, abstractmethod
from contextlib import contextmanager
from functools import partial
from pathlib import Path
import numpy as np
from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter
from tqdm import tqdm
from PIL import Image
from tao.utils import vis
_GREEN = (18, 127, 15)
_GRAY = (218, 227, 218)
_BLACK = (0, 0, 0)
COLOR_BOX = COLOR_MASK = [255*x for x in (0.000, 0.447, 0.741)]
COLOR_TEXT = _GRAY
COLOR_TEXT_INACTIVE = _BLACK
COLOR_MASK_INACTIVE = COLOR_BOX_INACTIVE = _GRAY
WIDTH_BOX = 10
WIDTH_BOX_INACTIVE = 1
WIDTH_MASK = 2
BORDER_ALPHA_MASK = 0.9
WIDTH_MASK_INACTIVE = 1
class Tracker(ABC):
@property
def stateless(self):
return False
@abstractmethod
def init(self, image, box):
"""
Args:
image (np.array): Shape (height, width, num_channels). RGB image.
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
"""
pass
@abstractmethod
def update(self, image):
"""
Args:
image (np.array): Shape (height, width, num_channels). RGB image.
Returns:
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
score (float)
"""
pass
def track_yield(self,
img_files,
box,
yield_image=False,
**unused_extra_args):
"""
Args:
img_files (list of str/Path): Ordered list of image paths
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
yield_image (bool): Whether to yield the original image. Useful
if the caller wants to operate on images without having to
re-read them from disk.
Yields:
box (np.array): Shape (5, ), containing (x0, y0, x1, y1, score).
0-indexed coordinates from top-left.
tracker_time (float): Time elapsed in tracker.
image (optional, np.array): Image loaded from img_files; see
yield_image.
"""
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
if not image.mode == 'RGB':
image = image.convert('RGB')
image = np.array(image)
start_time = time.time()
if f == 0:
self.init(image, box)
elapsed_time = time.time() - start_time
box = np.array([box[0], box[1], box[2], box[3], float('inf')])
extra_output = {}
else:
output = self.update(image)
assert len(output) in (2, 3)
box, score = output[:2]
extra_output = output[2] if len(output) == 3 else {}
elapsed_time = time.time() - start_time
box = np.array([box[0], box[1], box[2], box[3], score])
if yield_image:
yield box, elapsed_time, extra_output, image
else:
yield box, elapsed_time, extra_output
@contextmanager
def videowriter(self,
output_video,
width,
height,
fps=30,
ffmpeg_params=None):
if isinstance(output_video, Path):
output_video = str(output_video)
if ffmpeg_params is None:
ffmpeg_params = [
'-vf', "scale=trunc(iw/2)*2:trunc(ih/2)*2", '-pix_fmt',
'yuv420p'
]
with FFMPEG_VideoWriter(
output_video,
size=(width, height),
fps=fps,
ffmpeg_params=ffmpeg_params) as writer:
yield writer
def vis_single_prediction(self,
image,
box,
mask=None,
label=None,
mask_border_width=WIDTH_MASK,
mask_border_alpha=BORDER_ALPHA_MASK,
box_color=COLOR_BOX,
text_color=COLOR_TEXT,
mask_color=COLOR_MASK):
"""
Args:
image (np.array)
box (list-like): x0, y0, x1, y1, score
mask (np.array): Shape (height, width)
"""
if mask is None:
image = vis.vis_bbox(
image, (box[0], box[1], box[2] - box[0], box[3] - box[1]),
fill_color=box_color)
if label is None:
text = f'Object: {box[4]:.02f}'
else:
# text = f'{label}: {box[4]:.02f}'
text = f'{label}'
image = vis.vis_class(image, (box[0], box[1] - 2),
text,
font_scale=0.75,
text_color=text_color)
# if box[4] < 0.8: # Draw gray masks when below threshold.
# mask_color = [100, 100, 100]
if mask is not None:
image = vis.vis_mask(
image,
mask,
mask_color,
border_thick=mask_border_width,
border_alpha=mask_border_alpha)
return image
def vis_image(self,
image,
box,
mask=None,
label=None,
other_boxes=[],
other_masks=[],
other_labels=[],
vis_threshold=0.1):
"""
Args:
image (np.array)
box (list-like): x0, y0, x1, y1, score
mask (np.array): Shape (height, width)
other_boxes (list[list-like]): Contains alternative boxes that
were not selected.
other_masks (list[list-like]): Contains masks for alternative
boxes that were not selected.
"""
return self.vis_single_prediction(image, box, mask, label=label)
def track(self,
img_files,
box,
show_progress=False,
output_video=None,
output_video_fps=30,
visualize_subsample=1,
visualize_threshold=0.1,
return_masks=False,
**tracker_args):
"""
Like self.track, but collect all tracking results in numpy arrays.
Args:
img_files (list of str/Path): Ordered list of image paths
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
output_vis
return_masks (bool): If false, don't return masks. This is helpful
for OxUvA, where collecting all the masks may use too much
memory.
Returns:
boxes (np.array): Shape (num_frames, 5), contains
(x0, y0, x1, y1, score) for each frame. 0-indexed coordinates
from top-left.
times (np.array): Shape (num_frames,), contains timings for each
frame.
"""
frame_num = len(img_files)
boxes = np.zeros((frame_num, 5))
if return_masks:
masks = [None] * frame_num
times = np.zeros(frame_num)
pbar = partial(tqdm, total=len(img_files), disable=not show_progress)
if output_video is None:
for f, (box, elapsed_time, extra) in enumerate(
pbar(self.track_yield(img_files, box, **tracker_args))):
boxes[f] = box
times[f] = elapsed_time
if return_masks:
masks[f] = extra.get('mask', None)
else:
output_video = Path(output_video)
output_video.parent.mkdir(exist_ok=True, parents=True)
# Some videos don't play in Firefox and QuickTime if '-pix_fmt
# yuv420p' is not specified, and '-pix_fmt yuv420p' requires that
# the dimensions be even, so we need the '-vf scale=...' filter.
width, height = Image.open(img_files[0]).size
with self.videowriter(
output_video, width=width, height=height,
fps=output_video_fps) as writer:
track_outputs = self.track_yield(
img_files, box, yield_image=True, **tracker_args)
for f, (box, elapsed_time, extra, image) in enumerate(
pbar(track_outputs)):
mask = extra.get('mask', None)
if mask is not None and mask.shape != image.shape[:2]:
logging.warn(
f'Resizing mask (shape {mask.shape}) to match '
f'image (shape {image.shape[:2]})')
new_h, new_w = image.shape[:2]
mask = np.asarray(
Image.fromarray(mask).resize(
(new_w, new_h), resample=Image.NEAREST))
other_boxes = extra.get('other_boxes', [])
other_masks = extra.get('other_masks', [])
label = extra.get('label', None)
other_labels = extra.get('other_labels', [])
if (f % visualize_subsample) == 0:
writer.write_frame(
self.vis_image(image,
box,
mask,
label=label,
other_boxes=other_boxes,
other_masks=other_masks,
other_labels=other_labels,
vis_threshold=visualize_threshold))
boxes[f] = box
times[f] = elapsed_time
if return_masks:
masks[f] = mask
if return_masks:
return boxes, masks, times
else:
return boxes, None, times
| [
"moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter",
"PIL.Image.open",
"tao.utils.vis.vis_class",
"logging.warn",
"PIL.Image.fromarray",
"pathlib.Path",
"tao.utils.vis.vis_bbox",
"numpy.array",
"numpy.zeros",
"tao.utils.vis.vis_mask",
"time.time"
] | [((7319, 7343), 'numpy.zeros', 'np.zeros', (['(frame_num, 5)'], {}), '((frame_num, 5))\n', (7327, 7343), True, 'import numpy as np\n'), ((7424, 7443), 'numpy.zeros', 'np.zeros', (['frame_num'], {}), '(frame_num)\n', (7432, 7443), True, 'import numpy as np\n'), ((2273, 2293), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (2283, 2293), False, 'from PIL import Image\n'), ((2399, 2414), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2407, 2414), True, 'import numpy as np\n'), ((2441, 2452), 'time.time', 'time.time', ([], {}), '()\n', (2450, 2452), False, 'import time\n'), ((3670, 3766), 'moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter', 'FFMPEG_VideoWriter', (['output_video'], {'size': '(width, height)', 'fps': 'fps', 'ffmpeg_params': 'ffmpeg_params'}), '(output_video, size=(width, height), fps=fps,\n ffmpeg_params=ffmpeg_params)\n', (3688, 3766), False, 'from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter\n'), ((4555, 4652), 'tao.utils.vis.vis_bbox', 'vis.vis_bbox', (['image', '(box[0], box[1], box[2] - box[0], box[3] - box[1])'], {'fill_color': 'box_color'}), '(image, (box[0], box[1], box[2] - box[0], box[3] - box[1]),\n fill_color=box_color)\n', (4567, 4652), False, 'from tao.utils import vis\n'), ((4883, 4975), 'tao.utils.vis.vis_class', 'vis.vis_class', (['image', '(box[0], box[1] - 2)', 'text'], {'font_scale': '(0.75)', 'text_color': 'text_color'}), '(image, (box[0], box[1] - 2), text, font_scale=0.75,\n text_color=text_color)\n', (4896, 4975), False, 'from tao.utils import vis\n'), ((5234, 5339), 'tao.utils.vis.vis_mask', 'vis.vis_mask', (['image', 'mask', 'mask_color'], {'border_thick': 'mask_border_width', 'border_alpha': 'mask_border_alpha'}), '(image, mask, mask_color, border_thick=mask_border_width,\n border_alpha=mask_border_alpha)\n', (5246, 5339), False, 'from tao.utils import vis\n'), ((7893, 7911), 'pathlib.Path', 'Path', (['output_video'], {}), '(output_video)\n', (7897, 7911), False, 'from pathlib import Path\n'), ((2977, 3026), 'numpy.array', 'np.array', (['[box[0], box[1], box[2], box[3], score]'], {}), '([box[0], box[1], box[2], box[3], score])\n', (2985, 3026), True, 'import numpy as np\n'), ((8237, 8261), 'PIL.Image.open', 'Image.open', (['img_files[0]'], {}), '(img_files[0])\n', (8247, 8261), False, 'from PIL import Image\n'), ((2545, 2556), 'time.time', 'time.time', ([], {}), '()\n', (2554, 2556), False, 'import time\n'), ((2930, 2941), 'time.time', 'time.time', ([], {}), '()\n', (2939, 2941), False, 'import time\n'), ((8804, 8906), 'logging.warn', 'logging.warn', (['f"""Resizing mask (shape {mask.shape}) to match image (shape {image.shape[:2]})"""'], {}), "(\n f'Resizing mask (shape {mask.shape}) to match image (shape {image.shape[:2]})'\n )\n", (8816, 8906), False, 'import logging\n'), ((9084, 9105), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (9099, 9105), False, 'from PIL import Image\n')] |
import sys
sys.path.append('../../')
import constants as cnst
import pandas
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
df = pandas.read_csv(cnst.five_pt_likert_scale_result_csv_path)
results = df[['Input.image_url', 'Answer.category.label']].to_numpy()
print(df)
id_hist_dict = {}
categories = ['Strongly disagree', 'Disagree', 'Neither agree nor disagree', 'Agree', 'Strongly agree']
categories_dict = {}
for i, cat in enumerate(categories):
categories_dict[cat] = i
fontsize = 15
for i in range(0, len(results)):
id = int(results[i, 0].split('/')[-1].split('_')[0])
# import ipdb; ipdb.set_trace()
if id not in id_hist_dict:
id_hist_dict[id] = {}
if results[i, -1] in id_hist_dict[id]:
id_hist_dict[id][results[i, -1]] += 1
else:
id_hist_dict[id][results[i, -1]] = 1
ids = []
scores = []
for key in id_hist_dict.keys():
# import ipdb; ipdb.set_trace()
current_dict = id_hist_dict[key]
user_ratings = []
total_imaged_per_id = 0
for i, key_cat in enumerate(categories):
if key_cat in current_dict:
user_ratings.extend(current_dict[key_cat] * [(i + 1), ])
total_imaged_per_id += current_dict[key_cat]
user_ratings = np.bincount(user_ratings).argmax()
# print(total_imaged_per_id)
print(f'{key}: {user_ratings} from {total_imaged_per_id} samples')
ids.append(key)
scores.append(user_ratings)
print(f'Total_mean = {np.mean(scores)}')
plt.bar(ids, scores, align='center', alpha=0.5)
plt.xticks(ids, ids)
plt.ylim(bottom=1, top=5)
plt.ylabel('User Scores', fontsize=fontsize)
plt.xlabel('Style ID', fontsize=fontsize)
# plt.title('Programming language usage')
plt.savefig('bar_graph_style_disentanglement.pdf', format='pdf', bbox_inches='tight')
plt.figure()
# Draw histogram of all
ratings = []
for str_rating in results[:, -1]:
# import ipdb; ipdb.set_trace()
ratings.append(categories_dict[str_rating] + 1)
ratings = np.array(ratings)
# import ipdb; ipdb.set_trace()
plt.hist(ratings, bins=np.arange(ratings.min(), ratings.max()+2) - 0.5)
plt.savefig('rating_hist.png')
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"numpy.bincount",
"matplotlib.pyplot.ylim",
"sys.path.append",
"matplotlib.pyp... | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((109, 125), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (123, 125), True, 'import matplotlib.pyplot as plt\n'), ((152, 210), 'pandas.read_csv', 'pandas.read_csv', (['cnst.five_pt_likert_scale_result_csv_path'], {}), '(cnst.five_pt_likert_scale_result_csv_path)\n', (167, 210), False, 'import pandas\n'), ((1489, 1536), 'matplotlib.pyplot.bar', 'plt.bar', (['ids', 'scores'], {'align': '"""center"""', 'alpha': '(0.5)'}), "(ids, scores, align='center', alpha=0.5)\n", (1496, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1557), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ids', 'ids'], {}), '(ids, ids)\n', (1547, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1583), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(1)', 'top': '(5)'}), '(bottom=1, top=5)\n', (1566, 1583), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1628), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""User Scores"""'], {'fontsize': 'fontsize'}), "('User Scores', fontsize=fontsize)\n", (1594, 1628), True, 'import matplotlib.pyplot as plt\n'), ((1629, 1670), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Style ID"""'], {'fontsize': 'fontsize'}), "('Style ID', fontsize=fontsize)\n", (1639, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1803), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar_graph_style_disentanglement.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('bar_graph_style_disentanglement.pdf', format='pdf',\n bbox_inches='tight')\n", (1725, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1801, 1813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1811, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1984, 2001), 'numpy.array', 'np.array', (['ratings'], {}), '(ratings)\n', (1992, 2001), True, 'import numpy as np\n'), ((2106, 2136), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rating_hist.png"""'], {}), "('rating_hist.png')\n", (2117, 2136), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1280), 'numpy.bincount', 'np.bincount', (['user_ratings'], {}), '(user_ratings)\n', (1266, 1280), True, 'import numpy as np\n'), ((1469, 1484), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1476, 1484), True, 'import numpy as np\n')] |
"""Binomial probability distribution."""
from functools import wraps
import numpy
from scipy import special
from ..baseclass import Dist
class Binomial(Dist):
"""
Binomial probability distribution.
Point density:
comb(N, x) p^x (1-p)^{N-x} x in {0, 1, ..., N}
Examples:
>>> distribution = chaospy.Binomial(5, 0.5)
>>> print(distribution)
Binomial(prob=0.5, size=5)
>>> q = numpy.linspace(0, 1, 8)
>>> print(numpy.around(distribution.inv(q), 4))
[0 1 2 2 3 3 4 5]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0.0312 0.1875 0.5 0.5 0.8125 0.8125 0.9688 1. ]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0.0312 0.1562 0.3125 0.3125 0.3125 0.3125 0.1562 0.0312]
>>> print(numpy.around(distribution.sample(10), 4))
[3 1 4 2 4 2 1 2 2 4]
>>> print(numpy.around(distribution.mom([1, 2, 3]), 4))
[ 2.5 7.5 25. ]
>>> print(numpy.around(distribution.ttr([1, 2, 3]), 4))
[[2.5 2.5 2.5 ]
[1.25 2. 2.25]]
"""
interpret_as_integer = True
def __init__(self, size, prob):
Dist.__init__(self, size=size, prob=prob)
def _cdf(self, x_data, size, prob):
return special.bdtr(numpy.floor(x_data), numpy.floor(size), prob)
def _ppf(self, q_data, size, prob):
return numpy.ceil(special.bdtrik(q_data, numpy.floor(size), prob))
def _pdf(self, x_data, size, prob):
return special.comb(size, x_data)*prob**x_data*(1-prob)**(size-x_data)
def _bnd(self, x_data, size, prob):
return 0, numpy.floor(size)+1
def _mom(self, k_data, size, prob):
x_data = numpy.arange(int(size)+1, dtype=int)
return numpy.sum(x_data**k_data*self._pdf(
x_data, size=numpy.floor(size), prob=prob))
def _ttr(self, k_data, size, prob):
"""Krawtchouk rule"""
from chaospy.quadrature import discretized_stieltjes
abscissas = numpy.arange(0, numpy.floor(size)+1)
weights = self._pdf(abscissas, size, prob)
(alpha, beta), _, _ = discretized_stieltjes(k_data, [abscissas], weights)
return alpha[0, -1], beta[0, -1]
| [
"chaospy.quadrature.discretized_stieltjes",
"scipy.special.comb",
"numpy.floor"
] | [((2150, 2201), 'chaospy.quadrature.discretized_stieltjes', 'discretized_stieltjes', (['k_data', '[abscissas]', 'weights'], {}), '(k_data, [abscissas], weights)\n', (2171, 2201), False, 'from chaospy.quadrature import discretized_stieltjes\n'), ((1317, 1336), 'numpy.floor', 'numpy.floor', (['x_data'], {}), '(x_data)\n', (1328, 1336), False, 'import numpy\n'), ((1338, 1355), 'numpy.floor', 'numpy.floor', (['size'], {}), '(size)\n', (1349, 1355), False, 'import numpy\n'), ((1453, 1470), 'numpy.floor', 'numpy.floor', (['size'], {}), '(size)\n', (1464, 1470), False, 'import numpy\n'), ((1535, 1561), 'scipy.special.comb', 'special.comb', (['size', 'x_data'], {}), '(size, x_data)\n', (1547, 1561), False, 'from scipy import special\n'), ((1658, 1675), 'numpy.floor', 'numpy.floor', (['size'], {}), '(size)\n', (1669, 1675), False, 'import numpy\n'), ((2048, 2065), 'numpy.floor', 'numpy.floor', (['size'], {}), '(size)\n', (2059, 2065), False, 'import numpy\n'), ((1849, 1866), 'numpy.floor', 'numpy.floor', (['size'], {}), '(size)\n', (1860, 1866), False, 'import numpy\n')] |
# from tff import NUM_EPOCHS
from matplotlib import pyplot as plt
import json, ast
import math
import numpy as np
import pickle
import argparse
import pandas as pd
import seaborn as sns
import collections
import tensorflow_federated as tff
from tensorflow.python.framework.constant_op import constant
NUM_CLIENTS = [5,34,338,1692]
NUM_ROUNDS = 150
NUM_EPOCHS = 5
MODES = ['reduction_functions', 'femnist_distribution', 'uniform_vs_num_clients_weighting', 'accuracy_10percent_vs_50percent_clients_comparison', 'accuracy_5_34_338_comparison', 'reduction_functions_comparison','updates_comparison']
modes = ["constant","exponential","linear","sigmoid","reciprocal"]
num_rounds = np.arange(1,NUM_ROUNDS+1)
num_clients = str(NUM_CLIENTS[0])
# def movingaverage(interval, window_size):
# # window = np.ones(int(window_size))/float(window_size)
# # return np.convolve(interval, window, 'same')
# cumsum_vec = np.cumsum(np.insert(interval, 0, 0))
# ma_vec = (cumsum_vec[window_size:] - cumsum_vec[:-window_size]) / window_size
# return ma_vec
##############################################################################################################################################################
################################# Plot the graph of functions for different modes of reducing sampled clients ################################################
##############################################################################################################################################################
def reduction_functions():
#**********************plot constant function***********************#
x = np.arange(0,150,0.1)
y_constant = [338]*len(x)
plt.plot(x,y_constant,label="constant")
#**********************plot exponential function***********************#
y_exponential = [-np.exp((x_head-1)/10.3)+338 for x_head in x if x_head < 60] + [34]*(len(x)-600)
plt.plot(x,y_exponential,label="exponential reduction")
#**********************plot linear function***********************#
y_linear = [-5.065*x_head+338 for x_head in x if x_head < 60] + [34]*(len(x)-600)
plt.plot(x,y_linear,label="linear reduction")
#**********************plot sigmoid function***********************#
y_sigmoid = -304/(1+np.exp(-0.26*(x-20)))+338
plt.plot(x,y_sigmoid,label="sigmoid reduction")
#**********************plot reciprocal function***********************#
y_reciprocal = 50/x+34
plt.plot(x,y_reciprocal,label="reciprocal reduction")
plt.xlim(0,150)
plt.ylim(0,400)
plt.xlabel("Round")
plt.ylabel("Number of clients / Round")
plt.legend()
# plt.title("Reduction functions")
plt.show()
return
##############################################################################################################################################################
############################################## Plot the graph of functions for FEMNIST distribution ##########################################################
##############################################################################################################################################################
def femnist_distribution():
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
client_data_counted_list=[len(emnist_train.create_tf_dataset_for_client(emnist_train.client_ids[x])) for x in range(len(emnist_train.client_ids))] #a list of the number of data in each client correspond to its index
## A dictionary with unique elements from the sequence as keys and their frequencies (counts) as values, in this case key=data count of a client & value=number of clients who have the same data count
counted_client_data = collections.Counter(client_data_counted_list)
## Sort counted_client_data by keys in ascending order and store them in a list of tuples, where each tuple has a (key,value)
counted_client_data_sorted = sorted(counted_client_data.items())
# print(counted_client_data_sorted)
## Unzip
data_count_per_client,num_clients=zip(*counted_client_data_sorted) #zip(*) is the inverse of zip(), unpack the list of tuples(pairs) into two tuples, namely (keys) and (values)
#alternatively
# data_count_per_client,num_clients= dict(counted_client_data_sorted).keys(),dict(counted_client_data_sorted).values()
#-----------------Plot the bar plot of the distribution of clients data---------------------##
plt.rcParams.update({'figure.figsize':(10,6), 'figure.dpi':100})
fig, ax = plt.subplots()
ax.bar(data_count_per_client,num_clients)
ax.set_xlabel('Data amount per client(digits)')
ax.set_ylabel('Frequency(Number of clients)')
# ax.set_title('Data_Distribution_FEMNIST')
plt.show()
return
##############################################################################################################################################################
#################################################### Plot for different weightings strategies ################################################################
##############################################################################################################################################################
def uniform_vs_num_clients_weighting():
with open(f"metrics/num_examples_vs_uniform/{NUM_CLIENTS[1]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
# plot global accuracy & loss for all training rounds
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[1]} clients, weighted by NUM_EXAMPLES")
# num_rounds_av = movingaverage(num_rounds, 4)
# plt.plot(num_rounds_av, global_accuracy[:147])
with open(f"metrics/num_examples_vs_uniform/{NUM_CLIENTS[1]}_clients_uniform_weights_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[1]} clients, weighted by UNIFORM")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
##############################################################################################################################################################
########################################### Plot for the training accuracy of randomly selected 338&1692 clients #############################################
##############################################################################################################################################################
def accuracy_10percent_vs_50percent_clients_comparison():
for i, n in enumerate(NUM_CLIENTS[2:]):
with open(f"metrics/{n}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{n} random clients")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
##############################################################################################################################################################
########################################### Plot for the training accuracy of randomly selected 5&34&338 clients #############################################
##############################################################################################################################################################
def accuracy_5_34_338_comparison():
for n in range(len(NUM_CLIENTS[:-1])):
with open(f"metrics/{NUM_CLIENTS[n]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[n]} random clients")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
#####################################################################################################################
################ Plot accuracy for various modes of varying num of randomly selected/sampled clients#################
#####################################################################################################################
def reduction_functions_comparison(mode):
for mode_index, mode in enumerate(mode):
if mode == "constant":
continue
else:
with open(f"metrics/vary_num_clients_and_rounds/{NUM_CLIENTS[-2]} -> {NUM_CLIENTS[-2]} clients_constant_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(np.arange(len(global_accuracy)), [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[-2]} clients, constant")
with open(f"metrics/vary_num_clients_and_rounds/{NUM_CLIENTS[-2]} -> {NUM_CLIENTS[1]} clients_{mode}_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(np.arange(len(global_accuracy)), [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[-2]} -> {NUM_CLIENTS[1]} clients, {mode} reduction")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
###############################################################################################################################
#### Plot the bar graph of model update percentage & training time of different modes & rounds of reducing sampled clients ####
###############################################################################################################################
def updates_comparison():
#*****************************************************************************************************************#
#**********************plot bar chart of pushed_model_updates in different modes**********************************#
#*****************************************************************************************************************#
with open(f"metrics/vary_num_clients_and_rounds/pushed_model_updates.json","r") as f:
pushed_model_updates = json.load(f)
modes = [mode for mode, _ in pushed_model_updates.items()]
updates = [update for _, update in pushed_model_updates.items()]
with open(f"metrics/vary_num_clients_and_rounds/modes_stopped_round.json","r") as f:
modes_stopped_round = json.load(f)
modes_stopped_round = [value for key,value in modes_stopped_round.items()]
data = {"modes": modes,
"updates": updates}
df = pd.DataFrame(data, columns=['modes', 'updates'])
# plt.figure(figsize=(5, 5),dpi=300)
plots = sns.barplot(x="modes", y="updates", data=df)
# Iterrating over the bars one-by-one
for bar in plots.patches:
# Using Matplotlib's annotate function and
# passing the coordinates where the annotation shall be done
plots.annotate(format(bar.get_height(), '.2f'), #two decimal for pushed_model_updates_percentage
(bar.get_x() + bar.get_width() / 2,
bar.get_height()), ha='center', va='center',
size=12, xytext=(0, 5),
textcoords='offset points')
# Setting the title for the graph
# plt.title("Model updates comparison")
# plt.ylabel("Total Updates",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
plt.ylabel("Total Updates (updates/round)",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
plt.xlabel("Reduction mode",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# Fianlly showing the plot
plt.show()
#*****************************************************************************************************************#
#**********************plot bar chart of averaged pushed_model_updates in different modes*************************#
#*****************************************************************************************************************#
# with open(f"metrics/vary_num_clients_and_rounds/pushed_model_updates.json","r") as f:
# pushed_model_updates = json.load(f)
# modes = [mode for mode, _ in pushed_model_updates.items()]
# updates = [update for _, update in pushed_model_updates.items()]
# with open(f"metrics/vary_num_clients_and_rounds/modes_stopped_round.json","r") as f:
# modes_stopped_round = json.load(f)
# modes_stopped_round = [value for key,value in modes_stopped_round.items()]
# average_updates = [update/stopped_round for update,stopped_round in zip(updates,modes_stopped_round)]
# data = {"modes": modes,
# "average_updates": average_updates}
# df = pd.DataFrame(data, columns=['modes', 'average_updates'])
# # plt.figure(figsize=(5, 5),dpi=300)
# plots = sns.barplot(x="modes", y="average_updates", data=df)
# # Iterrating over the bars one-by-one
# for bar in plots.patches:
# # Using Matplotlib's annotate function and
# # passing the coordinates where the annotation shall be done
# # plots.annotate(format(bar.get_height(), '.2f'), #two decimal for pushed_model_updates_percentage
# plots.annotate(format(int(bar.get_height())), #integer for pushed_model_updates_percentage
# (bar.get_x() + bar.get_width() / 2,
# bar.get_height()), ha='center', va='center',
# size=12, xytext=(0, 5),
# textcoords='offset points')
# # Setting the title for the graph
# # plt.title("Model updates comparison")
# # plt.ylabel("Total Updates",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# plt.ylabel("Average Updates (updates/round)",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# plt.xlabel("Reduction mode",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# # Fianlly showing the plot
# plt.show()
#*****************************************************************************************************************#
#**********************plot bar chart of pushed_model_updates_percentage in different modes***********************#
#*****************************************************************************************************************#
# with open(f"metrics/vary_num_clients/pushed_model_updates_percentage.txt","rb") as fp: #unpickling
# pushed_model_updates_percentage = pickle.load(fp)
# modes = [mode for mode, _ in pushed_model_updates_percentage.items()]
# update_percentages = [update_percentage for _, update_percentage in pushed_model_updates_percentage.items()]
# data = {"modes": modes,
# "update_percentages": update_percentages}
# df = pd.DataFrame(data, columns=['modes', 'update_percentages'])
# # plt.figure(figsize=(5, 5),dpi=300)
# plots = sns.barplot(x="modes", y="update_percentages", data=df)
# # Iterrating over the bars one-by-one
# for bar in plots.patches:
# # Using Matplotlib's annotate function and
# # passing the coordinates where the annotation shall be done
# plots.annotate(format(bar.get_height(), '.2f'),
# (bar.get_x() + bar.get_width() / 2,
# bar.get_height()), ha='center', va='center',
# size=12, xytext=(0, 5),
# textcoords='offset points')
# # Setting the title for the graph
# plt.title("Model updates comparison")
# plt.ylabel("Model updates(%)",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# plt.xlabel("Reduction mode",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# # Fianlly showing the plot
# plt.show()
# #****************************************************************************************************************#
# #**********************plot bar chart of training time in different modes - varied clients***********************#
# #****************************************************************************************************************#
# f = open(f"metrics/vary_num_clients/modes_training_time.json", 'r')
# modes_training_time = json.load(f)
# f.close()
# modes = [mode for mode, _ in modes_training_time.items()]
# training_times = [training_time for _, training_time in modes_training_time.items()]
# # plt.rcParams.update({'figure.figsize':(10,6), 'figure.dpi':300})
# fig, ax = plt.subplots()
# ax.bar(modes,training_times,color=['green', 'red', 'purple', 'blue', 'navy'])
# ax.set_xlabel('Reduction mode',fontweight="bold")
# ax.set_ylabel('Training time(s)',fontweight="bold")
# ax.set_title('Training time comparison')
# label = ["{:.2f}".format(t) for _,t in enumerate(training_times)]
# for rect, label in zip(ax.patches, label):
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label,
# ha='center', va='bottom')
# plt.show()
#**************************************************************************************************************************#
#**********************plot bar chart of training time in different modes -varied clients and rounds***********************#
#**************************************************************************************************************************#
f = open(f"metrics/vary_num_clients_and_rounds/modes_training_time.json", 'r')
modes_training_time = json.load(f)
f.close()
modes = [mode for mode, _ in modes_training_time.items()]
training_times = [training_time for _, training_time in modes_training_time.items()]
# plt.rcParams.update({'figure.figsize':(10,6), 'figure.dpi':300})
fig, ax = plt.subplots()
ax.bar(modes,training_times,color=['green', 'red', 'purple', 'blue', 'navy'])
ax.set_xlabel('Reduction mode',fontweight="bold")
ax.set_ylabel('Training time(s)',fontweight="bold")
# ax.set_title('Training time comparison')
label = ["{:.2f}".format(t) for _,t in enumerate(training_times)]
for rect, label in zip(ax.patches, label):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label,
ha='center', va='bottom')
plt.show()
#******************************************************parsing the command line arguments********************************************************************#
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('mode', nargs=1, type=str, help='Running mode. Must be one of the following modes: {}'.format(MODES))
args = parser.parse_args()
mode = args.mode[0]
return args, mode
if __name__ == '__main__':
args, mode = parse_args() # get argument from the command line
# load the data
print(f'plot mode: {mode}')
if mode == 'reduction_functions':
reduction_functions()
elif mode == 'femnist_distribution':
femnist_distribution()
elif mode == 'uniform_vs_num_clients_weighting':
uniform_vs_num_clients_weighting()
elif mode == 'accuracy_10percent_vs_50percent_clients_comparison':
accuracy_10percent_vs_50percent_clients_comparison()
elif mode == 'accuracy_5_34_338_comparison':
accuracy_5_34_338_comparison()
elif mode == 'reduction_functions_comparison':
reduction_functions_comparison(modes)
elif mode == 'updates_comparison':
updates_comparison()
else:
raise Exception('Unrecognised mode: {}. Possible modes are: {}'.format(mode, MODES))
#############################################################################################################################################################
############################################ Other useful ones but not included in the comand line arguments ################################################
#############################################################################################################################################################
# #-----------------Plot the line graphs of the global/server evaluation set accuracy evaluated on global model vs num_rounds for all num_clients --------------------#
# for n in range(len(NUM_CLIENTS[:-1])):
# with open(f"metrics/{NUM_CLIENTS[n]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs.json", 'r') as f:
# sample_clients = json.load(f)
# sample_clientnum_examples_vs_uniforms = sample_clients.replace(" ",",")
# sample_clients = ast.literal_eval(sample_clients)
# with open(f"metrics/{NUM_CLIENTS[n]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_local.txt","rb") as fp: #unpickling
# local_clients_accuracy = pickle.load(fp)
###############################################################################################################################################################
##################Plot the line graphs of the global/server evaluation set accuracy evaluated on global model vs num_rounds for varied num_clients ############
###############################################################################################################################################################
#----------------- --------------------#
# with open(f"metrics/{NUM_CLIENTS[1]} -> {NUM_CLIENTS[0]} clients_{-2}_steps_{NUM_ROUNDS}_rounds_{NUM_EPOCHS[0]}_epochs_accuracy_global.txt","rb") as fp: #unpickling
# global_accuracy = pickle.load(fp)
# # plot global accuracy & loss for all training rounds for varied clients
# plt.plot(num_rounds, global_accuracy[:NUM_ROUNDS], label=f"{NUM_CLIENTS[1]} -> {NUM_CLIENTS[0]} clients, steps={-2}")
# with open(f"metrics/{NUM_CLIENTS[1]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS[0]}_epochs_accuracy_global.txt","rb") as fp: #unpickling
# global_accuracy = pickle.load(fp)
# # plot global accuracy & loss for all training rounds for fixed clients
# plt.plot(num_rounds, global_accuracy[:NUM_ROUNDS], label=f"{NUM_CLIENTS[1]} clients, steps={0}")
# plt.xlabel('Rounds',size=15)
# plt.ylabel('Global validation accuracy',size=15)
# plt.legend()
# plt.title(f'Global validation accuracy - {NUM_CLIENTS[1]} -> {NUM_CLIENTS[0]} & {NUM_CLIENTS[1]} clients, {NUM_ROUNDS} rounds, {num_epochs} epochs',size=15)
# plt.show()
#-----------------Plot the line graphs of the local/clients' evaluation set accuracy evaluated on global model vs num_rounds for all clients --------------------#
# for n in range(len(NUM_CLIENTS[:-1])):
# plt.figure(figsize=(13, 8), dpi=100)
# for c,client in enumerate(sample_clients):
# plt.plot(num_rounds, local_clients_accuracy[c][:NUM_ROUNDS], label=f"client_{client}")
# plt.legend(prop={'size':10})
# plt.title(f'Local validation accuracy - {NUM_ROUNDS} rounds, {NUM_CLIENTS[n]} clients', size=25)
# plt.xlabel('rounds',size=20)
# plt.ylabel('accuracy',size=20)
# plt.show()
#-----------------Plot the histogram of the num_clients vs local/clients' evaluation set accuracy evaluated on global model for that round --------------------#
# plt.figure(figsize=(13, 8), dpi=100)
# plt.rcParams.update({'figure.figsize':(13,8), 'figure.dpi':100})
# Plot Histogram on num_client vs accuracy
# print(np.shape(np.array(local_clients_accuracy)[:,99]))
# plt.hist(np.array(local_clients_accuracy)[:,NUM_ROUNDS-1], bins=np.arange(0,1,0.01))
# # plt.gca().set(title=f'Frequency Histogram-Evaluation Accuracy @ {NUM_ROUNDS} rounds_{NUM_CLIENTS[1]} clients', ylabel='Frequency(Number of clients)', xlabel='Accuracy')
# plt.title(f'Clients distribution over Local validation accuracy @ {NUM_ROUNDS} rounds, {NUM_EPOCHS} epochs, {NUM_CLIENTS[0]} clients', size=10)
# plt.xlabel('Local validation accuracy', size=20)
# plt.ylabel('Frequency(Number of clients)', size=20)
# plt.show()
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pickle.load",
"collections.Counter",
"matplotlib.pyplot.rcParams.update",
"json.load",
"tensorflow_federated.simulation.datasets.emnist.load_data",
"numpy.exp... | [((681, 709), 'numpy.arange', 'np.arange', (['(1)', '(NUM_ROUNDS + 1)'], {}), '(1, NUM_ROUNDS + 1)\n', (690, 709), True, 'import numpy as np\n'), ((1637, 1659), 'numpy.arange', 'np.arange', (['(0)', '(150)', '(0.1)'], {}), '(0, 150, 0.1)\n', (1646, 1659), True, 'import numpy as np\n'), ((1688, 1729), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_constant'], {'label': '"""constant"""'}), "(x, y_constant, label='constant')\n", (1696, 1729), True, 'from matplotlib import pyplot as plt\n'), ((1906, 1963), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_exponential'], {'label': '"""exponential reduction"""'}), "(x, y_exponential, label='exponential reduction')\n", (1914, 1963), True, 'from matplotlib import pyplot as plt\n'), ((2119, 2166), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_linear'], {'label': '"""linear reduction"""'}), "(x, y_linear, label='linear reduction')\n", (2127, 2166), True, 'from matplotlib import pyplot as plt\n'), ((2287, 2336), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_sigmoid'], {'label': '"""sigmoid reduction"""'}), "(x, y_sigmoid, label='sigmoid reduction')\n", (2295, 2336), True, 'from matplotlib import pyplot as plt\n'), ((2437, 2492), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_reciprocal'], {'label': '"""reciprocal reduction"""'}), "(x, y_reciprocal, label='reciprocal reduction')\n", (2445, 2492), True, 'from matplotlib import pyplot as plt\n'), ((2495, 2511), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(150)'], {}), '(0, 150)\n', (2503, 2511), True, 'from matplotlib import pyplot as plt\n'), ((2513, 2529), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(400)'], {}), '(0, 400)\n', (2521, 2529), True, 'from matplotlib import pyplot as plt\n'), ((2531, 2550), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Round"""'], {}), "('Round')\n", (2541, 2550), True, 'from matplotlib import pyplot as plt\n'), ((2553, 2592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of clients / Round"""'], {}), "('Number of clients / Round')\n", (2563, 2592), True, 'from matplotlib import pyplot as plt\n'), ((2595, 2607), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2605, 2607), True, 'from matplotlib import pyplot as plt\n'), ((2647, 2657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2655, 2657), True, 'from matplotlib import pyplot as plt\n'), ((3208, 3250), 'tensorflow_federated.simulation.datasets.emnist.load_data', 'tff.simulation.datasets.emnist.load_data', ([], {}), '()\n', (3248, 3250), True, 'import tensorflow_federated as tff\n'), ((3697, 3742), 'collections.Counter', 'collections.Counter', (['client_data_counted_list'], {}), '(client_data_counted_list)\n', (3716, 3742), False, 'import collections\n'), ((4406, 4473), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (10, 6), 'figure.dpi': 100}"], {}), "({'figure.figsize': (10, 6), 'figure.dpi': 100})\n", (4425, 4473), True, 'from matplotlib import pyplot as plt\n'), ((4483, 4497), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4495, 4497), True, 'from matplotlib import pyplot as plt\n'), ((4688, 4698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4696, 4698), True, 'from matplotlib import pyplot as plt\n'), ((5483, 5619), 'matplotlib.pyplot.plot', 'plt.plot', (['num_rounds', '[(x * 100) for x in global_accuracy[:NUM_ROUNDS]]'], {'label': 'f"""{NUM_CLIENTS[1]} clients, weighted by NUM_EXAMPLES"""'}), "(num_rounds, [(x * 100) for x in global_accuracy[:NUM_ROUNDS]],\n label=f'{NUM_CLIENTS[1]} clients, weighted by NUM_EXAMPLES')\n", (5491, 5619), True, 'from matplotlib import pyplot as plt\n'), ((5926, 6057), 'matplotlib.pyplot.plot', 'plt.plot', (['num_rounds', '[(x * 100) for x in global_accuracy[:NUM_ROUNDS]]'], {'label': 'f"""{NUM_CLIENTS[1]} clients, weighted by UNIFORM"""'}), "(num_rounds, [(x * 100) for x in global_accuracy[:NUM_ROUNDS]],\n label=f'{NUM_CLIENTS[1]} clients, weighted by UNIFORM')\n", (5934, 6057), True, 'from matplotlib import pyplot as plt\n'), ((6053, 6081), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Round"""'], {'size': '(12)'}), "('Round', size=12)\n", (6063, 6081), True, 'from matplotlib import pyplot as plt\n'), ((6083, 6123), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test accuracy (%)"""'], {'size': '(12)'}), "('Test accuracy (%)', size=12)\n", (6093, 6123), True, 'from matplotlib import pyplot as plt\n'), ((6125, 6137), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6135, 6137), True, 'from matplotlib import pyplot as plt\n'), ((6140, 6150), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6148, 6150), True, 'from matplotlib import pyplot as plt\n'), ((6997, 7025), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Round"""'], {'size': '(12)'}), "('Round', size=12)\n", (7007, 7025), True, 'from matplotlib import pyplot as plt\n'), ((7027, 7067), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test accuracy (%)"""'], {'size': '(12)'}), "('Test accuracy (%)', size=12)\n", (7037, 7067), True, 'from matplotlib import pyplot as plt\n'), ((7069, 7081), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7079, 7081), True, 'from matplotlib import pyplot as plt\n'), ((7084, 7094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7092, 7094), True, 'from matplotlib import pyplot as plt\n'), ((7941, 7969), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Round"""'], {'size': '(12)'}), "('Round', size=12)\n", (7951, 7969), True, 'from matplotlib import pyplot as plt\n'), ((7971, 8011), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test accuracy (%)"""'], {'size': '(12)'}), "('Test accuracy (%)', size=12)\n", (7981, 8011), True, 'from matplotlib import pyplot as plt\n'), ((8013, 8025), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8023, 8025), True, 'from matplotlib import pyplot as plt\n'), ((8028, 8038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8036, 8038), True, 'from matplotlib import pyplot as plt\n'), ((10645, 10693), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['modes', 'updates']"}), "(data, columns=['modes', 'updates'])\n", (10657, 10693), True, 'import pandas as pd\n'), ((10743, 10787), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""modes"""', 'y': '"""updates"""', 'data': 'df'}), "(x='modes', y='updates', data=df)\n", (10754, 10787), True, 'import seaborn as sns\n'), ((11445, 11541), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Updates (updates/round)"""'], {'fontdict': "{'fontsize': 11, 'fontweight': 'bold'}"}), "('Total Updates (updates/round)', fontdict={'fontsize': 11,\n 'fontweight': 'bold'})\n", (11455, 11541), True, 'from matplotlib import pyplot as plt\n'), ((11540, 11617), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Reduction mode"""'], {'fontdict': "{'fontsize': 11, 'fontweight': 'bold'}"}), "('Reduction mode', fontdict={'fontsize': 11, 'fontweight': 'bold'})\n", (11550, 11617), True, 'from matplotlib import pyplot as plt\n'), ((11649, 11659), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11657, 11659), True, 'from matplotlib import pyplot as plt\n'), ((17313, 17325), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17322, 17325), False, 'import json, ast\n'), ((17568, 17582), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (17580, 17582), True, 'from matplotlib import pyplot as plt\n'), ((18073, 18083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18081, 18083), True, 'from matplotlib import pyplot as plt\n'), ((18282, 18307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18305, 18307), False, 'import argparse\n'), ((5409, 5424), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (5420, 5424), False, 'import pickle\n'), ((5908, 5923), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (5919, 5923), False, 'import pickle\n'), ((6897, 7001), 'matplotlib.pyplot.plot', 'plt.plot', (['num_rounds', '[(x * 100) for x in global_accuracy[:NUM_ROUNDS]]'], {'label': 'f"""{n} random clients"""'}), "(num_rounds, [(x * 100) for x in global_accuracy[:NUM_ROUNDS]],\n label=f'{n} random clients')\n", (6905, 7001), True, 'from matplotlib import pyplot as plt\n'), ((7829, 7946), 'matplotlib.pyplot.plot', 'plt.plot', (['num_rounds', '[(x * 100) for x in global_accuracy[:NUM_ROUNDS]]'], {'label': 'f"""{NUM_CLIENTS[n]} random clients"""'}), "(num_rounds, [(x * 100) for x in global_accuracy[:NUM_ROUNDS]],\n label=f'{NUM_CLIENTS[n]} random clients')\n", (7837, 7946), True, 'from matplotlib import pyplot as plt\n'), ((10236, 10248), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10245, 10248), False, 'import json, ast\n'), ((10490, 10502), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10499, 10502), False, 'import json, ast\n'), ((6877, 6892), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (6888, 6892), False, 'import pickle\n'), ((7809, 7824), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (7820, 7824), False, 'import pickle\n'), ((9243, 9271), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Round"""'], {'size': '(12)'}), "('Round', size=12)\n", (9253, 9271), True, 'from matplotlib import pyplot as plt\n'), ((9277, 9317), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test accuracy (%)"""'], {'size': '(12)'}), "('Test accuracy (%)', size=12)\n", (9287, 9317), True, 'from matplotlib import pyplot as plt\n'), ((9323, 9335), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9333, 9335), True, 'from matplotlib import pyplot as plt\n'), ((9342, 9352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9350, 9352), True, 'from matplotlib import pyplot as plt\n'), ((2259, 2283), 'numpy.exp', 'np.exp', (['(-0.26 * (x - 20))'], {}), '(-0.26 * (x - 20))\n', (2265, 2283), True, 'import numpy as np\n'), ((8713, 8728), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (8724, 8728), False, 'import pickle\n'), ((9049, 9064), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9060, 9064), False, 'import pickle\n'), ((1824, 1851), 'numpy.exp', 'np.exp', (['((x_head - 1) / 10.3)'], {}), '((x_head - 1) / 10.3)\n', (1830, 1851), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Standard modules
import logging # https://docs.python.org/3/library/logging.html
import time # https://docs.python.org/3/library/time.html
import statistics # https://docs.python.org/3/library/statistics.html
# Functions or Classes from standard modules
from pprint import pprint # https://docs.python.org/3/library/pprint.html
from timeit import default_timer as timer # https://docs.python.org/3/library/timeit.html
# Imports from modules installed by pip
import numpy as np # https://docs.scipy.org/doc/numpy/reference/routines.html
import pandas as pd # Python Data Analysis Library http://pandas.pydata.org/
from openpyxl import Workbook # read/write xlsx/xlsm files https://openpyxl.readthedocs.io/
import labtoolkit as labtk
def stdevlowpass(*, tolerance=0.05, delay=0.1, readings=10, instrument=False, abortafter=42):
"""Standard deviation low pass filter.
:param tolerance: tolerace upper limit required to pass
:param delay: delay between readings
:param readings: readings to take before applying filter
:param instrument: Instrument that has a measurement function
:param abortafter: the upper limit before bypassing this filter
:returns: list of values as made by readback
"""
try:
run = 0
meas = []
measurethreshold = False
while measurethreshold is not True:
run += 1
if run >= abortafter:
raise Exception(f"Abort limit reached: {abortafter}")
meas.append(instrument.measurement)
# print(meas)
if len(meas) > readings:
meas.pop(0) # remove item at index 0
stddev = statistics.stdev(meas)
# print(stddev)
if stddev < tolerance:
measurethreshold = True
time.sleep(delay)
finally:
return(meas)
class ETC(object):
"""Estimated Time to Completion."""
def __init__(self, numberofpoints):
"""."""
self.listoftimes = []
self.points = numberofpoints + 1
def append(self, timeinseconds, inferprogress=True):
"""Append result of timer."""
# print(timeinseconds)
self.listoftimes.append(timeinseconds)
if inferprogress is True:
self.points -= 1
def ETC(self):
"""Estimate Time to Completion."""
return(f"{statistics.mean(self.listoftimes) * self.points:.2f}")
logging.basicConfig(level=logging.INFO)
with labtk.ResourceManager('') as rm:
resources = rm.list_resources()
# resources = ['GPIfc00:e968:6179::de52:7100::INSTR', 'GPIfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b::INSTR']
pprint(resources)
for ignore in ['ASRL', 'GPIB0::6']:
resources = [x for x in resources if not x.startswith(ignore)]
pprint(resources)
pool = labtk.visaenumerate(rm, resources)
pprint(pool)
for a in pool:
print(f"Discovered {a} ||| {pool[a]}")
# print(each)
print(f"Discovered {len(pool)} instruments")
print("Attaching drivers to recognised instruments")
pprint(pool)
instrument = labtk.Instruments()
pprint(instrument)
for driver in labtk.driverclasses:
pod = labtk.driverdispatcher(pool, getattr(labtk, driver).REGISTER)
if len(pod) != 0:
setattr(instrument, driver, pod)
pprint(instrument)
if instrument.SpectrumAnalyser[0].query(':INSTrument:SELect?') != 'SA':
instrument.SpectrumAnalyser[0].write(':INSTrument:SELect SA')
time.sleep(3) # Loading delay?
instrument.SpectrumAnalyser[0].write(':SENSe:ROSCillator:OUTPUT:STATe ON')
instrument.SpectrumAnalyser[0].write(':CALibration:AUTO OFF')
instrument.SpectrumAnalyser[0].frequencyspan = 1e3
instrument.SpectrumAnalyser[0].resolutionbandwidth = 1e3
instrument.SignalGenerator[0].amplimit = 10
rfpath = input('RF Path (Ref, UUT) : ')
wb = Workbook()
ws = wb.active
# ws.title = input("Sheetname --> ")
ws.title = rfpath
ws.append(["Frequency", "Mean dBm", "list dBm"])
instrument.SignalGenerator[0].amplimit = 0
instrument.SignalGenerator[0].amplitude = 0
instrument.SignalGenerator[0].output = True
EstimatedTime = ETC((18e9 - 1e9) / 100e6) # CALCulate number of steps in test
try:
for frequency in np.arange(1e9, 18e9 + 1, 100e6): # arange is upto but not including max value, thus + 1
print(frequency)
instrument.SpectrumAnalyser[0].frequency = frequency
instrument.SignalGenerator[0].frequency = frequency
start = timer()
time.sleep(1)
try:
# SpectrumAnalyser[0].measurement
measurements = stdevlowpass(
instrument=instrument.SpectrumAnalyser[0],
tolerance=0.05,
delay=0.1,
readings=10,
abortafter=24)
finally:
print(measurements)
ws.append([frequency, statistics.mean(measurements)] + measurements)
EstimatedTime.append(timer() - start) # end - start
print(f"Estimated time to finish: {EstimatedTime.ETC()} s")
print()
finally:
instrument.SignalGenerator[0].output = False
wb.save(f'E:/Path-{rfpath}.xlsx')
| [
"logging.basicConfig",
"statistics.mean",
"statistics.stdev",
"labtoolkit.ResourceManager",
"timeit.default_timer",
"labtoolkit.Instruments",
"time.sleep",
"labtoolkit.visaenumerate",
"openpyxl.Workbook",
"pprint.pprint",
"numpy.arange"
] | [((2463, 2502), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2482, 2502), False, 'import logging\n'), ((2509, 2534), 'labtoolkit.ResourceManager', 'labtk.ResourceManager', (['""""""'], {}), "('')\n", (2530, 2534), True, 'import labtoolkit as labtk\n'), ((2693, 2710), 'pprint.pprint', 'pprint', (['resources'], {}), '(resources)\n', (2699, 2710), False, 'from pprint import pprint\n'), ((2826, 2843), 'pprint.pprint', 'pprint', (['resources'], {}), '(resources)\n', (2832, 2843), False, 'from pprint import pprint\n'), ((2856, 2890), 'labtoolkit.visaenumerate', 'labtk.visaenumerate', (['rm', 'resources'], {}), '(rm, resources)\n', (2875, 2890), True, 'import labtoolkit as labtk\n'), ((2895, 2907), 'pprint.pprint', 'pprint', (['pool'], {}), '(pool)\n', (2901, 2907), False, 'from pprint import pprint\n'), ((3106, 3118), 'pprint.pprint', 'pprint', (['pool'], {}), '(pool)\n', (3112, 3118), False, 'from pprint import pprint\n'), ((3136, 3155), 'labtoolkit.Instruments', 'labtk.Instruments', ([], {}), '()\n', (3153, 3155), True, 'import labtoolkit as labtk\n'), ((3160, 3178), 'pprint.pprint', 'pprint', (['instrument'], {}), '(instrument)\n', (3166, 3178), False, 'from pprint import pprint\n'), ((3371, 3389), 'pprint.pprint', 'pprint', (['instrument'], {}), '(instrument)\n', (3377, 3389), False, 'from pprint import pprint\n'), ((3941, 3951), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (3949, 3951), False, 'from openpyxl import Workbook\n'), ((3545, 3558), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3555, 3558), False, 'import time\n'), ((4348, 4403), 'numpy.arange', 'np.arange', (['(1000000000.0)', '(18000000000.0 + 1)', '(100000000.0)'], {}), '(1000000000.0, 18000000000.0 + 1, 100000000.0)\n', (4357, 4403), True, 'import numpy as np\n'), ((1847, 1864), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1857, 1864), False, 'import time\n'), ((4616, 4623), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4621, 4623), True, 'from timeit import default_timer as timer\n'), ((4636, 4649), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4646, 4649), False, 'import time\n'), ((1697, 1719), 'statistics.stdev', 'statistics.stdev', (['meas'], {}), '(meas)\n', (1713, 1719), False, 'import statistics\n'), ((2406, 2439), 'statistics.mean', 'statistics.mean', (['self.listoftimes'], {}), '(self.listoftimes)\n', (2421, 2439), False, 'import statistics\n'), ((5139, 5146), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5144, 5146), True, 'from timeit import default_timer as timer\n'), ((5055, 5084), 'statistics.mean', 'statistics.mean', (['measurements'], {}), '(measurements)\n', (5070, 5084), False, 'import statistics\n')] |
import json
import argparse
import pathlib
import time
import datetime
import subprocess
import numpy
import tqdm
import pybullet as bullet
import igl
def get_time_stamp():
return datetime.datetime.now().strftime("%Y-%b-%d-%H-%M-%S")
def save_mesh(meshes, out_path, index):
Vs = []
Fs = []
offset = 0
for i in range(bullet.getNumBodies()):
try:
V = meshes[i][0].copy()
except:
breakpoint()
F = meshes[i][1].copy()
t, q = bullet.getBasePositionAndOrientation(i)
R = numpy.array(bullet.getMatrixFromQuaternion(q)).reshape(3, 3)
V = V @ R.T + t
F += offset
offset += V.shape[0]
Vs.append(V)
Fs.append(F)
V = numpy.concatenate(Vs)
F = numpy.concatenate(Fs)
igl.write_triangle_mesh(str(out_path / f"m_{index:04d}.obj"), V, F)
def object_from_obj(filename, mass=1, mesh_scale=[1, 1, 1]):
col_flags = bullet.URDF_INITIALIZE_SAT_FEATURES
if mass == 0: # static objects
col_flags = bullet.GEOM_FORCE_CONCAVE_TRIMESH
collision_shape_id = bullet.createCollisionShape(
shapeType=bullet.GEOM_MESH,
fileName=str(filename),
collisionFramePosition=[0, 0, 0],
meshScale=mesh_scale,
flags=col_flags)
visual_shape_id = bullet.createVisualShape(
shapeType=bullet.GEOM_MESH,
fileName=str(filename),
visualFramePosition=[0, 0, 0],
meshScale=mesh_scale)
body = bullet.createMultiBody(
baseMass=mass,
baseInertialFramePosition=[0, 0, 0],
baseCollisionShapeIndex=collision_shape_id,
baseVisualShapeIndex=visual_shape_id,
basePosition=[0, 0, 0])
average_vertex = numpy.zeros(3)
if mass > 0:
# compute inertial frame, assuming mass at vertices
average_vertex = numpy.average(
numpy.array(bullet.getMeshData(body)[1]), axis=0)
# bullet.resetBasePositionAndOrientation(body, [100000,0,0],[0,0,0,1])
bullet.removeBody(body)
shift = -average_vertex
collision_shape_id2 = bullet.createCollisionShape(
shapeType=bullet.GEOM_MESH,
fileName=str(filename),
collisionFramePosition=shift,
meshScale=mesh_scale,
flags=col_flags)
visual_shape_id2 = bullet.createVisualShape(
shapeType=bullet.GEOM_MESH,
fileName=str(filename),
visualFramePosition=shift,
meshScale=mesh_scale)
body = bullet.createMultiBody(
baseMass=mass,
baseInertialFramePosition=[0, 0, 0],
baseCollisionShapeIndex=collision_shape_id2,
baseVisualShapeIndex=visual_shape_id2,
basePosition=[0, 0, 0])
if mass > 0:
# make dynamics objects random color
color = (numpy.random.random(3)).tolist() + [1]
bullet.changeVisualShape(body, -1, rgbaColor=color)
bullet.changeVisualShape(
body, -1, flags=bullet.VISUAL_SHAPE_DOUBLE_SIDED)
return body, average_vertex
def convert_to_convex_mesh(in_path, out_path):
bullet.vhacd(
str(in_path), str(out_path), "vhacd_log.txt", concavity=0,
maxNumVerticesPerCH=1024, depth=32, resolution=1000000,
convexhullApproximation=0)
def print_simulation_parameters():
for param, val in bullet.getPhysicsEngineParameters().items():
print(f"{param}: {val}")
print()
def run_simulation(fixture, meshes_path, out_path, args):
rigid_body_problem = fixture["rigid_body_problem"]
gravity = rigid_body_problem.get("gravity", [0, 0, 0])
timestep = args.timestep
if timestep is None:
timestep = fixture.get("timestep", 1e-2)
if args.use_gui:
bullet.connect(bullet.GUI)
bullet.configureDebugVisualizer(flag=bullet.COV_ENABLE_Y_AXIS_UP)
bullet.configureDebugVisualizer(bullet.COV_ENABLE_RENDERING, 0)
else:
bullet.connect(bullet.DIRECT)
print("Default parameters:")
print_simulation_parameters()
bullet.setPhysicsEngineParameter(
numSolverIterations=2500,
solverResidualThreshold=1e-12,
enableSAT=args.enable_sat,
enableConeFriction=args.enable_cone_friction)
if args.use_ccd:
bullet.setPhysicsEngineParameter(allowedCcdPenetration=0.0)
bullet.setGravity(*gravity)
bullet.setTimeStep(timestep)
# plane = bullet.loadURDF("plane_implicit.urdf")#, pos = object_from_obj("meshes/plane.obj", mass=0)
# bullet.changeDynamics(plane,-1,lateralFriction=1, frictionAnchor = 1, contactStiffness=30000, contactDamping=10000)
# orn = bullet.getQuaternionFromEuler([ -numpy.pi/2,0, 0])
# bullet.resetBasePositionAndOrientation(plane, [0,0,0], orn)
meshes = []
convex_meshes_path = pathlib.Path(__file__).resolve().parent / "meshes"
# combined_friction = friction_a * friction_b so take the sqrt
mu = args.mu
if mu is None:
mu = rigid_body_problem.get("coefficient_friction", 0.0)
mu = numpy.sqrt(mu)
for body in rigid_body_problem["rigid_bodies"]:
if not body.get("enabled", True):
continue
mesh_path = meshes_path / body["mesh"]
mass = body.get("density", 1000) # Assumes the volume is 1 m³
is_dof_fixed = body.get("is_dof_fixed", False)
if (body.get("type", "dynamic") == "static"
or (isinstance(is_dof_fixed, list) and all(is_dof_fixed))
or is_dof_fixed):
mass = 0 # static object
if args.make_convex and mass > 0:
try:
convex_mesh_path = (
convex_meshes_path / mesh_path.relative_to(meshes_path))
convex_mesh_path.parent.mkdir(parents=True, exist_ok=True)
except:
convex_mesh_path = convex_meshes_path / mesh_path.name
if not convex_mesh_path.exists():
convert_to_convex_mesh(mesh_path, convex_mesh_path)
mesh_path = convex_mesh_path
V, F = igl.read_triangle_mesh(str(mesh_path))
mesh_scale = body.get("scale", [1, 1, 1])
if isinstance(mesh_scale, float):
mesh_scale = [mesh_scale] * 3
if "dimensions" in body:
org_dim = V.max(axis=0) - V.min(axis=0)
org_dim[org_dim <= 0] = 1
mesh_scale = body["dimensions"] / org_dim
body_id, com_shift = object_from_obj(
mesh_path, mass=mass, mesh_scale=mesh_scale)
bullet.changeDynamics(
body_id, -1, lateralFriction=mu, frictionAnchor=False)
if args.use_ccd:
bullet.changeDynamics(
body_id, -1, ccdSweptSphereRadius=args.ccd_radius)
pos = body.get("position", [0, 0, 0])
eul = numpy.deg2rad(body.get("rotation", [0, 0, 0]))
orn = bullet.getQuaternionFromEuler(eul)
lin_vel = body.get("linear_velocity", [0, 0, 0])
ang_vel = numpy.deg2rad(body.get("angular_velocity", [0, 0, 0]))
com_pos = pos + args.shift_mag * com_shift
meshes.append((V * mesh_scale - args.shift_mag * com_shift, F))
bullet.resetBasePositionAndOrientation(body_id, com_pos, orn)
bullet.resetBaseVelocity(body_id, lin_vel, ang_vel)
max_steps = int(numpy.ceil(fixture.get("max_time", 5) / timestep))
if args.use_gui:
bullet.configureDebugVisualizer(bullet.COV_ENABLE_RENDERING, 1)
cameraPitch = 0.
cameraYaw = -180.
bullet.resetDebugVisualizerCamera(
cameraDistance=args.camera_distance,
cameraYaw=cameraYaw,
cameraPitch=cameraPitch,
cameraTargetPosition=args.camera_target)
index = 0
save_mesh(meshes, out_path, 0)
skip_frames = 0 if timestep >= 1e-2 else 1e-2 / timestep
prev_save = 0
if args.use_gui:
prev_step = 0
step_id = bullet.addUserDebugParameter("Step", 1, -1, prev_step)
prev_run = 0
run_id = bullet.addUserDebugParameter("Run", 1, -1, prev_run)
run_sim = False
print("Using parameters:")
print_simulation_parameters()
pbar = tqdm.tqdm(total=(max_steps + 1))
i = 0
while (args.use_gui and bullet.isConnected()) or i <= max_steps:
take_step = not args.use_gui or run_sim
if args.use_gui:
step = bullet.readUserDebugParameter(step_id)
if step != prev_step:
prev_step = step
take_step = True
run = bullet.readUserDebugParameter(run_id)
if run != prev_run:
prev_run = run
run_sim = not run_sim
take_step = run_sim
if take_step:
bullet.stepSimulation()
pbar.update(1)
if i - prev_save >= skip_frames:
index += 1
save_mesh(meshes, out_path, index)
prev_save = i
i += 1
# if args.use_gui:
# time.sleep(1e-3)
pbar.close()
def parse_args():
parser = argparse.ArgumentParser(
description="Test Rigid IPC examples in Bullet")
parser.add_argument(
"-i", "--input", "--json-file", metavar="path/to/input",
type=pathlib.Path, dest="input", help="path to input json(s)",
nargs="+")
parser.add_argument(
"--shift-mag", type=int, default=1,
help=("Shift the collision/visual (due to OBJ file not centered) "
"values -1=negative shift, 0=no shift, 1=positive shift"))
parser.add_argument(
"--camera-distance", type=float, default=7,
help="Camera Distance (to target)")
parser.add_argument(
"--camera-target", type=float, default=[-1, 1, 1], nargs=3,
help="Camera target (lookat) position")
parser.add_argument(
"--enable-sat", action="store_true", default=False,
dest="enable_sat", help="Enable Separating Axis Test (SAT) collision")
parser.add_argument(
"--disable-cone-friction", action="store_false", default=True,
dest="enable_cone_friction",
help="Disable Cone friction (instead use the pyramid friction model)")
parser.add_argument(
"--make-convex", action="store_true", default=False,
help="Convert dynamic bodies to convex meshes (using V-HACD)")
parser.add_argument(
"--dt", "--timestep", type=float, default=None, dest="timestep",
help="timestep")
parser.add_argument(
"--mu", type=float, default=None, dest="mu", help="coeff. friction")
parser.add_argument(
"--use-ccd", action="store_true", default=False, dest="use_ccd",
help="enable CCD using swept spheres")
parser.add_argument(
"--ccd-radius", type=float, default=0.002, dest="ccd_radius",
help="CCD swept sphere radius")
parser.add_argument(
"--erp", type=float, default=None,
help="error reduction parameter")
parser.add_argument("--no-video", action="store_true", default=False,
help="skip rendering")
parser.add_argument("--use-gui", action="store_true", default=False,
help="use Bullet GUI")
args = parser.parse_args()
inputs = []
for input in args.input:
if input.is_file() and input.suffix == ".json":
inputs.append(input.resolve())
elif input.is_dir():
for glob_input in input.glob('**/*.json'):
inputs.append(glob_input.resolve())
args.input = inputs
return args
def main():
args = parse_args()
root_path = pathlib.Path(__file__).resolve().parents[2]
meshes_path = root_path / "meshes"
renderer = root_path / "build" / "release" / "tools" / "render_simulation"
if not renderer.exists():
renderer = None
for input in args.input:
with open(input) as f:
fixture = json.load(f)
if args.timestep is None:
args.timestep = fixture.get("timestep", 1e-2)
try:
out_path = input.relative_to(
root_path / "fixtures" / "3D").with_suffix("")
except:
out_path = input.stem
out_path = ("output" / out_path).resolve()
folder_name = "_".join(
([] if args.timestep is None else [f"timestep={args.timestep:g}"])
+ (["sat"] if args.enable_sat else [])
+ ([] if args.mu is None else [f"mu={args.mu:g}"]))
out_path /= folder_name
print("out_path:", out_path)
out_path.mkdir(exist_ok=True, parents=True)
try:
run_simulation(fixture, meshes_path, out_path, args)
except Exception as e:
print(e)
try:
bullet.resetSimulation()
except Exception as e:
print(e)
# Render simulation
if renderer is not None and not args.no_video:
print("Rendering simulation")
video_name = f"{input.stem}-{get_time_stamp()}-chrono.mp4"
subprocess.run([str(renderer),
"-i", out_path,
"-o", out_path / video_name,
"--fps", "100"])
print()
if __name__ == "__main__":
main()
| [
"pybullet.getMeshData",
"pybullet.getMatrixFromQuaternion",
"numpy.sqrt",
"pybullet.setGravity",
"pybullet.setTimeStep",
"pybullet.setPhysicsEngineParameter",
"pybullet.resetBaseVelocity",
"pybullet.getPhysicsEngineParameters",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.random.random",
... | [((744, 765), 'numpy.concatenate', 'numpy.concatenate', (['Vs'], {}), '(Vs)\n', (761, 765), False, 'import numpy\n'), ((774, 795), 'numpy.concatenate', 'numpy.concatenate', (['Fs'], {}), '(Fs)\n', (791, 795), False, 'import numpy\n'), ((1492, 1681), 'pybullet.createMultiBody', 'bullet.createMultiBody', ([], {'baseMass': 'mass', 'baseInertialFramePosition': '[0, 0, 0]', 'baseCollisionShapeIndex': 'collision_shape_id', 'baseVisualShapeIndex': 'visual_shape_id', 'basePosition': '[0, 0, 0]'}), '(baseMass=mass, baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=collision_shape_id, baseVisualShapeIndex=\n visual_shape_id, basePosition=[0, 0, 0])\n', (1514, 1681), True, 'import pybullet as bullet\n'), ((1736, 1750), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (1747, 1750), False, 'import numpy\n'), ((2961, 3035), 'pybullet.changeVisualShape', 'bullet.changeVisualShape', (['body', '(-1)'], {'flags': 'bullet.VISUAL_SHAPE_DOUBLE_SIDED'}), '(body, -1, flags=bullet.VISUAL_SHAPE_DOUBLE_SIDED)\n', (2985, 3035), True, 'import pybullet as bullet\n'), ((4063, 4233), 'pybullet.setPhysicsEngineParameter', 'bullet.setPhysicsEngineParameter', ([], {'numSolverIterations': '(2500)', 'solverResidualThreshold': '(1e-12)', 'enableSAT': 'args.enable_sat', 'enableConeFriction': 'args.enable_cone_friction'}), '(numSolverIterations=2500,\n solverResidualThreshold=1e-12, enableSAT=args.enable_sat,\n enableConeFriction=args.enable_cone_friction)\n', (4095, 4233), True, 'import pybullet as bullet\n'), ((4352, 4379), 'pybullet.setGravity', 'bullet.setGravity', (['*gravity'], {}), '(*gravity)\n', (4369, 4379), True, 'import pybullet as bullet\n'), ((4384, 4412), 'pybullet.setTimeStep', 'bullet.setTimeStep', (['timestep'], {}), '(timestep)\n', (4402, 4412), True, 'import pybullet as bullet\n'), ((5042, 5056), 'numpy.sqrt', 'numpy.sqrt', (['mu'], {}), '(mu)\n', (5052, 5056), False, 'import numpy\n'), ((8151, 8181), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': '(max_steps + 1)'}), '(total=max_steps + 1)\n', (8160, 8181), False, 'import tqdm\n'), ((9051, 9123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test Rigid IPC examples in Bullet"""'}), "(description='Test Rigid IPC examples in Bullet')\n", (9074, 9123), False, 'import argparse\n'), ((342, 363), 'pybullet.getNumBodies', 'bullet.getNumBodies', ([], {}), '()\n', (361, 363), True, 'import pybullet as bullet\n'), ((504, 543), 'pybullet.getBasePositionAndOrientation', 'bullet.getBasePositionAndOrientation', (['i'], {}), '(i)\n', (540, 543), True, 'import pybullet as bullet\n'), ((2018, 2041), 'pybullet.removeBody', 'bullet.removeBody', (['body'], {}), '(body)\n', (2035, 2041), True, 'import pybullet as bullet\n'), ((2533, 2724), 'pybullet.createMultiBody', 'bullet.createMultiBody', ([], {'baseMass': 'mass', 'baseInertialFramePosition': '[0, 0, 0]', 'baseCollisionShapeIndex': 'collision_shape_id2', 'baseVisualShapeIndex': 'visual_shape_id2', 'basePosition': '[0, 0, 0]'}), '(baseMass=mass, baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=collision_shape_id2, baseVisualShapeIndex=\n visual_shape_id2, basePosition=[0, 0, 0])\n', (2555, 2724), True, 'import pybullet as bullet\n'), ((2904, 2955), 'pybullet.changeVisualShape', 'bullet.changeVisualShape', (['body', '(-1)'], {'rgbaColor': 'color'}), '(body, -1, rgbaColor=color)\n', (2928, 2955), True, 'import pybullet as bullet\n'), ((3769, 3795), 'pybullet.connect', 'bullet.connect', (['bullet.GUI'], {}), '(bullet.GUI)\n', (3783, 3795), True, 'import pybullet as bullet\n'), ((3804, 3869), 'pybullet.configureDebugVisualizer', 'bullet.configureDebugVisualizer', ([], {'flag': 'bullet.COV_ENABLE_Y_AXIS_UP'}), '(flag=bullet.COV_ENABLE_Y_AXIS_UP)\n', (3835, 3869), True, 'import pybullet as bullet\n'), ((3878, 3941), 'pybullet.configureDebugVisualizer', 'bullet.configureDebugVisualizer', (['bullet.COV_ENABLE_RENDERING', '(0)'], {}), '(bullet.COV_ENABLE_RENDERING, 0)\n', (3909, 3941), True, 'import pybullet as bullet\n'), ((3960, 3989), 'pybullet.connect', 'bullet.connect', (['bullet.DIRECT'], {}), '(bullet.DIRECT)\n', (3974, 3989), True, 'import pybullet as bullet\n'), ((4288, 4347), 'pybullet.setPhysicsEngineParameter', 'bullet.setPhysicsEngineParameter', ([], {'allowedCcdPenetration': '(0.0)'}), '(allowedCcdPenetration=0.0)\n', (4320, 4347), True, 'import pybullet as bullet\n'), ((6521, 6597), 'pybullet.changeDynamics', 'bullet.changeDynamics', (['body_id', '(-1)'], {'lateralFriction': 'mu', 'frictionAnchor': '(False)'}), '(body_id, -1, lateralFriction=mu, frictionAnchor=False)\n', (6542, 6597), True, 'import pybullet as bullet\n'), ((6860, 6894), 'pybullet.getQuaternionFromEuler', 'bullet.getQuaternionFromEuler', (['eul'], {}), '(eul)\n', (6889, 6894), True, 'import pybullet as bullet\n'), ((7159, 7220), 'pybullet.resetBasePositionAndOrientation', 'bullet.resetBasePositionAndOrientation', (['body_id', 'com_pos', 'orn'], {}), '(body_id, com_pos, orn)\n', (7197, 7220), True, 'import pybullet as bullet\n'), ((7229, 7280), 'pybullet.resetBaseVelocity', 'bullet.resetBaseVelocity', (['body_id', 'lin_vel', 'ang_vel'], {}), '(body_id, lin_vel, ang_vel)\n', (7253, 7280), True, 'import pybullet as bullet\n'), ((7383, 7446), 'pybullet.configureDebugVisualizer', 'bullet.configureDebugVisualizer', (['bullet.COV_ENABLE_RENDERING', '(1)'], {}), '(bullet.COV_ENABLE_RENDERING, 1)\n', (7414, 7446), True, 'import pybullet as bullet\n'), ((7506, 7672), 'pybullet.resetDebugVisualizerCamera', 'bullet.resetDebugVisualizerCamera', ([], {'cameraDistance': 'args.camera_distance', 'cameraYaw': 'cameraYaw', 'cameraPitch': 'cameraPitch', 'cameraTargetPosition': 'args.camera_target'}), '(cameraDistance=args.camera_distance,\n cameraYaw=cameraYaw, cameraPitch=cameraPitch, cameraTargetPosition=args\n .camera_target)\n', (7539, 7672), True, 'import pybullet as bullet\n'), ((7903, 7957), 'pybullet.addUserDebugParameter', 'bullet.addUserDebugParameter', (['"""Step"""', '(1)', '(-1)', 'prev_step'], {}), "('Step', 1, -1, prev_step)\n", (7931, 7957), True, 'import pybullet as bullet\n'), ((7996, 8048), 'pybullet.addUserDebugParameter', 'bullet.addUserDebugParameter', (['"""Run"""', '(1)', '(-1)', 'prev_run'], {}), "('Run', 1, -1, prev_run)\n", (8024, 8048), True, 'import pybullet as bullet\n'), ((187, 210), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (208, 210), False, 'import datetime\n'), ((3370, 3405), 'pybullet.getPhysicsEngineParameters', 'bullet.getPhysicsEngineParameters', ([], {}), '()\n', (3403, 3405), True, 'import pybullet as bullet\n'), ((6648, 6720), 'pybullet.changeDynamics', 'bullet.changeDynamics', (['body_id', '(-1)'], {'ccdSweptSphereRadius': 'args.ccd_radius'}), '(body_id, -1, ccdSweptSphereRadius=args.ccd_radius)\n', (6669, 6720), True, 'import pybullet as bullet\n'), ((8222, 8242), 'pybullet.isConnected', 'bullet.isConnected', ([], {}), '()\n', (8240, 8242), True, 'import pybullet as bullet\n'), ((8355, 8393), 'pybullet.readUserDebugParameter', 'bullet.readUserDebugParameter', (['step_id'], {}), '(step_id)\n', (8384, 8393), True, 'import pybullet as bullet\n'), ((8513, 8550), 'pybullet.readUserDebugParameter', 'bullet.readUserDebugParameter', (['run_id'], {}), '(run_id)\n', (8542, 8550), True, 'import pybullet as bullet\n'), ((8723, 8746), 'pybullet.stepSimulation', 'bullet.stepSimulation', ([], {}), '()\n', (8744, 8746), True, 'import pybullet as bullet\n'), ((11889, 11901), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11898, 11901), False, 'import json\n'), ((12718, 12742), 'pybullet.resetSimulation', 'bullet.resetSimulation', ([], {}), '()\n', (12740, 12742), True, 'import pybullet as bullet\n'), ((568, 601), 'pybullet.getMatrixFromQuaternion', 'bullet.getMatrixFromQuaternion', (['q'], {}), '(q)\n', (598, 601), True, 'import pybullet as bullet\n'), ((1892, 1916), 'pybullet.getMeshData', 'bullet.getMeshData', (['body'], {}), '(body)\n', (1910, 1916), True, 'import pybullet as bullet\n'), ((2857, 2879), 'numpy.random.random', 'numpy.random.random', (['(3)'], {}), '(3)\n', (2876, 2879), False, 'import numpy\n'), ((4813, 4835), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (4825, 4835), False, 'import pathlib\n'), ((11589, 11611), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (11601, 11611), False, 'import pathlib\n')] |
import io
import numpy as np
from tqdm import tqdm
import csv
import json
def load_vectors(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in tqdm(fin):
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.array(list(map(float, tokens[1:])))
return int(d), data
n, data = load_vectors("wiki-news-300d-1M.vec")
texts = []
with open('normolize.csv', 'r') as f:
reader = csv.reader(f, delimiter=",")
for row in reader:
text = row[0].split(' ')
emo = row[1].strip()
emo = 1 if emo[0] == 'p' else 0
texts.append((text, emo))
word2vec = []
for text, emo in tqdm(texts):
tmp = dict()
tmp['text'] = np.zeros(n)
tmp['lable'] = emo
cnt = 0
for word in text:
try:
tmp['text'] += data[word]
except Exception as e:
cnt += 1
tmp['text'] /= len(text)
tmp['text'] = tmp['text'].tolist()
tmp['missing word'] = cnt
word2vec.append(tmp)
print("ok, begin writing...")
with open("word2vec_train.json", "w") as f:
f.write(json.dumps(word2vec[:30000] ,sort_keys=True, indent=4, separators=(',', ': ')))
with open("word2vec_test.json", "w") as f:
f.write(json.dumps(word2vec[30000:40000] ,sort_keys=True, indent=4, separators=(',', ': ')))
with open("word2vec_check.json", "w") as f:
f.write(json.dumps(word2vec[40000:] ,sort_keys=True, indent=4, separators=(',', ': ')))
| [
"json.dumps",
"tqdm.tqdm",
"io.open",
"numpy.zeros",
"csv.reader"
] | [((728, 739), 'tqdm.tqdm', 'tqdm', (['texts'], {}), '(texts)\n', (732, 739), False, 'from tqdm import tqdm\n'), ((110, 178), 'io.open', 'io.open', (['fname', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""ignore"""'}), "(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n", (117, 178), False, 'import io\n'), ((253, 262), 'tqdm.tqdm', 'tqdm', (['fin'], {}), '(fin)\n', (257, 262), False, 'from tqdm import tqdm\n'), ((508, 536), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (518, 536), False, 'import csv\n'), ((776, 787), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (784, 787), True, 'import numpy as np\n'), ((1159, 1237), 'json.dumps', 'json.dumps', (['word2vec[:30000]'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(word2vec[:30000], sort_keys=True, indent=4, separators=(',', ': '))\n", (1169, 1237), False, 'import json\n'), ((1295, 1382), 'json.dumps', 'json.dumps', (['word2vec[30000:40000]'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(word2vec[30000:40000], sort_keys=True, indent=4, separators=(',',\n ': '))\n", (1305, 1382), False, 'import json\n'), ((1437, 1515), 'json.dumps', 'json.dumps', (['word2vec[40000:]'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(word2vec[40000:], sort_keys=True, indent=4, separators=(',', ': '))\n", (1447, 1515), False, 'import json\n')] |
import unittest
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sbvar import utils
import tellurium as te
from sbvar.experiment import TwoWayExperiment
ant_uni = '''
J0: S1 -> S2; k1*S1;
J1: S2 -> S3; k2*S2;
k1= 0.1; k2 = 0.2;
S1 = 10; S2 = 0; S3 = 0;
'''
ant_bi = '''
J0: $Xo -> S1; 1 + Xo*(32+(S1/0.75)^3.2)/(1 +(S1/4.3)^3.2);
J1: S1 -> $X1; k1*S1;
Xo = 0.09; X1 = 0.0;
S1 = 0.5; k1 = 3.2;
'''
class TestTwoWayExperiment(unittest.TestCase):
def setUp(self) -> None:
self.sim_kwargs = {'start':0, 'end':100, 'points':100, 'steps':None}
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':(0, 10), 'num1':5,
'bounds2':(0.08, 0.1), 'num2':3,
}
self.rr = te.loada(ant_bi)
self.exp = TwoWayExperiment(self.rr, selections=None,
conserved_moiety=False, **self.sim_kwargs, **self.param_kwargs)
def test_init(self):
self.assertCountEqual(self.exp.species_ids, ['S1'])
self.assertCountEqual(self.exp.boundary_ids, ["Xo", "X1"])
self.assertCountEqual(self.exp.flux_ids, ["S1'"])
self.assertCountEqual(self.exp.reaction_ids, ["J0", "J1"])
self.assertEqual(self.exp.dim, 2)
self.assertEqual(self.exp.param_list, ['S1', 'Xo'])
self.assertEqual(self.exp.bounds_list,
[self.param_kwargs['bounds1'], self.param_kwargs['bounds2']])
self.assertEqual(self.exp.levels_list,
[None, None])
self.assertEqual(self.exp.simulations, None)
self.assertEqual(self.exp.steady_states, None)
def test_set_conditions(self):
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':(0, 10), 'num1':5,
'bounds2':(0.08, 0.1), 'num2':3,
}
expected = np.array(
[[ 0. , 0.08],
[ 2.5 , 0.08],
[ 5. , 0.08],
[ 7.5 , 0.08],
[10. , 0.08],
[ 0. , 0.09],
[ 2.5 , 0.09],
[ 5. , 0.09],
[ 7.5 , 0.09],
[10. , 0.09],
[ 0. , 0.1 ],
[ 2.5 , 0.1 ],
[ 5. , 0.1 ],
[ 7.5 , 0.1 ],
[10. , 0.1 ]])
self.exp = TwoWayExperiment(self.rr, **self.sim_kwargs, **self.param_kwargs)
self.assertTrue(np.allclose(self.exp.conditions, expected))
np.testing.assert_equal(self.exp.conditions_list,
[[0, 2.5, 5, 7.5, 10], [0.08, 0.09, 0.1]])
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':(0, 10), 'num1':5,
'levels2': [0.89, 0.9, 0.91]
}
expected = np.array(
[[ 0. , 0.89],
[ 2.5 , 0.89],
[ 5. , 0.89],
[ 7.5 , 0.89],
[10. , 0.89],
[ 0. , 0.9 ],
[ 2.5 , 0.9 ],
[ 5. , 0.9 ],
[ 7.5 , 0.9 ],
[10. , 0.9 ],
[ 0. , 0.91],
[ 2.5 , 0.91],
[ 5. , 0.91],
[ 7.5 , 0.91],
[10. , 0.91]])
self.exp = TwoWayExperiment(self.rr, **self.sim_kwargs, **self.param_kwargs)
self.assertTrue(np.allclose(self.exp.conditions, expected))
np.testing.assert_equal(self.exp.conditions_list,
[[0, 2.5, 5, 7.5, 10], [0.89, 0.9, 0.91]])
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':(0, 10), 'num1':5,
'levels2': 5
}
self.assertRaises(TypeError, TwoWayExperiment, self.rr,
**self.sim_kwargs, **self.param_kwargs)
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':5, 'num1':5,
'bounds2':(0.8, 1), 'num2':3,
}
self.assertRaises(TypeError, TwoWayExperiment, self.rr,
**self.sim_kwargs, **self.param_kwargs)
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':(0, 10, 15), 'num1':5,
'bounds2':(0.8, 1), 'num2':3,
}
self.assertRaises(ValueError, TwoWayExperiment, self.rr,
**self.sim_kwargs, **self.param_kwargs)
self.param_kwargs = {
'param1':'S1', 'param2':'Xo',
'bounds1':(0, 'a'), 'num1':5,
'bounds2':(0.8, 1), 'num2':3,
}
self.param_kwargs = {'param':'S1', 'bounds':(0, 'a'), 'num':5}
self.assertRaises(TypeError, TwoWayExperiment, self.rr,
**self.sim_kwargs, **self.param_kwargs)
def test_get_conditions_df(self):
expected = pd.DataFrame(np.array(
[[ 0. , 0.08],
[ 2.5 , 0.08],
[ 5. , 0.08],
[ 7.5 , 0.08],
[10. , 0.08],
[ 0. , 0.09],
[ 2.5 , 0.09],
[ 5. , 0.09],
[ 7.5 , 0.09],
[10. , 0.09],
[ 0. , 0.1 ],
[ 2.5 , 0.1 ],
[ 5. , 0.1 ],
[ 7.5 , 0.1 ],
[10. , 0.1 ]]), columns=['S1', 'Xo'])
pd.testing.assert_frame_equal(expected, self.exp.get_conditions_df())
def test_conditions_to_meshes(self):
x, y = [0, 2.5, 5, 7.5, 10], [0.08, 0.09, 0.1]
expected = np.meshgrid(x, y)
np.testing.assert_allclose(self.exp.conditions_to_meshes(), expected)
def vector_to_mesh(self, v):
x, y = [0, 2.5, 5, 7.5, 10], [0.08, 0.09, 1.]
X, Y = np.meshgrid(x, y)
np.testing.assert_allclose(self.exp.vector_to_mesh(x), X)
np.testing.assert_allclose(self.exp.vector_to_mesh(y), Y)
def test_iter_conditions(self):
def save_value():
x = self.exp.rr[self.param_kwargs['param1']]
y = self.exp.rr[self.param_kwargs['param2']]
return ([x, y])
expected = np.array(
[[ 0. , 0.08],
[ 2.5 , 0.08],
[ 5. , 0.08],
[ 7.5 , 0.08],
[10. , 0.08],
[ 0. , 0.09],
[ 2.5 , 0.09],
[ 5. , 0.09],
[ 7.5 , 0.09],
[10. , 0.09],
[ 0. , 0.1 ],
[ 2.5 , 0.1 ],
[ 5. , 0.1 ],
[ 7.5 , 0.1 ],
[10. , 0.1 ]])
values = self.exp.iter_conditions(save_value)
np.testing.assert_allclose(expected, values)
def test_get_mesh(self):
t_init = np.empty((3, 5))
for j, x in enumerate([0, 2.5, 5, 7.5, 10]):
for i, y in enumerate([0.08, 0.09, 0.1]):
self.rr.reset()
self.rr['S1'] = x
self.rr['Xo'] = y
out = self.rr.simulate(**self.sim_kwargs, selections=['S1'])
t_init[i, j] = out.flatten()[0]
# check initial condition
mesh = self.exp.get_mesh("S1", steady_state=False, step=0)
np.testing.assert_allclose(mesh, t_init)
mesh = self.exp.get_mesh("S1", steady_state=False, time=0)
np.testing.assert_allclose(mesh, t_init)
t_final = np.empty((3, 5))
for j, x in enumerate([0, 2.5, 5, 7.5, 10]):
for i, y in enumerate([0.08, 0.09, 0.1]):
self.rr.reset()
self.rr['S1'] = x
self.rr['Xo'] = y
out = self.rr.simulate(**self.sim_kwargs, selections=['S1'])
t_final[i, j] = out.flatten()[-1]
# check final time point
mesh = self.exp.get_mesh("S1", steady_state=False, step=-1)
np.testing.assert_allclose(mesh, t_final)
end = self.sim_kwargs['end']
mesh = self.exp.get_mesh("S1", steady_state=False, time=end)
np.testing.assert_allclose(mesh, t_final)
ss = np.empty((3, 5))
for j, x in enumerate([0, 2.5, 5, 7.5, 10]):
for i, y in enumerate([0.08, 0.09, 0.1]):
self.rr.reset()
self.rr['S1'] = x
self.rr['Xo'] = y
self.rr.steadyStateApproximate()
out = self.rr.getSteadyStateValues()[0]
ss[i, j] = out
# self.exp.conserved_moiety = True
self.exp.calc_steady_state(approximate=True)
mesh = self.exp.get_mesh("S1", steady_state=True)
np.testing.assert_allclose(mesh, ss)
def test_plot_mesh(self):
self.exp.simulate()
combos = [
('contourf', '2d'), ('contour', '2d'),
('contourf', '3d'), ('contour', '3d'),
('surface', '3d')]
for kind, proj in combos:
self.exp.plot_mesh('S1', kind=kind, projection=proj)
plt.close()
for kind, proj in combos:
self.exp.plot_mesh('S1', kind=kind, projection=proj,
steady_state=False, step=0)
plt.close()
for kind, proj in combos:
self.exp.plot_mesh('S1', kind=kind, projection=proj,
steady_state=False, time=3)
plt.close()
self.assertRaises(ValueError, self.exp.plot_mesh, 'S1',
kind='surface', projection='2d')
self.assertRaises(ValueError, self.exp.plot_mesh, 'S1',
time=1, step=0)
if __name__ == '__main__':
unittest.main()
| [
"sbvar.experiment.TwoWayExperiment",
"numpy.allclose",
"numpy.testing.assert_equal",
"tellurium.loada",
"numpy.testing.assert_allclose",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.empty",
"unittest.main",
"numpy.meshgrid"
] | [((9347, 9362), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9360, 9362), False, 'import unittest\n'), ((808, 824), 'tellurium.loada', 'te.loada', (['ant_bi'], {}), '(ant_bi)\n', (816, 824), True, 'import tellurium as te\n'), ((844, 955), 'sbvar.experiment.TwoWayExperiment', 'TwoWayExperiment', (['self.rr'], {'selections': 'None', 'conserved_moiety': '(False)'}), '(self.rr, selections=None, conserved_moiety=False, **self.\n sim_kwargs, **self.param_kwargs)\n', (860, 955), False, 'from sbvar.experiment import TwoWayExperiment\n'), ((1880, 2091), 'numpy.array', 'np.array', (['[[0.0, 0.08], [2.5, 0.08], [5.0, 0.08], [7.5, 0.08], [10.0, 0.08], [0.0, \n 0.09], [2.5, 0.09], [5.0, 0.09], [7.5, 0.09], [10.0, 0.09], [0.0, 0.1],\n [2.5, 0.1], [5.0, 0.1], [7.5, 0.1], [10.0, 0.1]]'], {}), '([[0.0, 0.08], [2.5, 0.08], [5.0, 0.08], [7.5, 0.08], [10.0, 0.08],\n [0.0, 0.09], [2.5, 0.09], [5.0, 0.09], [7.5, 0.09], [10.0, 0.09], [0.0,\n 0.1], [2.5, 0.1], [5.0, 0.1], [7.5, 0.1], [10.0, 0.1]])\n', (1888, 2091), True, 'import numpy as np\n'), ((2331, 2396), 'sbvar.experiment.TwoWayExperiment', 'TwoWayExperiment', (['self.rr'], {}), '(self.rr, **self.sim_kwargs, **self.param_kwargs)\n', (2347, 2396), False, 'from sbvar.experiment import TwoWayExperiment\n'), ((2473, 2570), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.exp.conditions_list', '[[0, 2.5, 5, 7.5, 10], [0.08, 0.09, 0.1]]'], {}), '(self.exp.conditions_list, [[0, 2.5, 5, 7.5, 10], [\n 0.08, 0.09, 0.1]])\n', (2496, 2570), True, 'import numpy as np\n'), ((2768, 2980), 'numpy.array', 'np.array', (['[[0.0, 0.89], [2.5, 0.89], [5.0, 0.89], [7.5, 0.89], [10.0, 0.89], [0.0, \n 0.9], [2.5, 0.9], [5.0, 0.9], [7.5, 0.9], [10.0, 0.9], [0.0, 0.91], [\n 2.5, 0.91], [5.0, 0.91], [7.5, 0.91], [10.0, 0.91]]'], {}), '([[0.0, 0.89], [2.5, 0.89], [5.0, 0.89], [7.5, 0.89], [10.0, 0.89],\n [0.0, 0.9], [2.5, 0.9], [5.0, 0.9], [7.5, 0.9], [10.0, 0.9], [0.0, 0.91\n ], [2.5, 0.91], [5.0, 0.91], [7.5, 0.91], [10.0, 0.91]])\n', (2776, 2980), True, 'import numpy as np\n'), ((3219, 3284), 'sbvar.experiment.TwoWayExperiment', 'TwoWayExperiment', (['self.rr'], {}), '(self.rr, **self.sim_kwargs, **self.param_kwargs)\n', (3235, 3284), False, 'from sbvar.experiment import TwoWayExperiment\n'), ((3361, 3458), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.exp.conditions_list', '[[0, 2.5, 5, 7.5, 10], [0.89, 0.9, 0.91]]'], {}), '(self.exp.conditions_list, [[0, 2.5, 5, 7.5, 10], [\n 0.89, 0.9, 0.91]])\n', (3384, 3458), True, 'import numpy as np\n'), ((5393, 5410), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5404, 5410), True, 'import numpy as np\n'), ((5592, 5609), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5603, 5609), True, 'import numpy as np\n'), ((5970, 6181), 'numpy.array', 'np.array', (['[[0.0, 0.08], [2.5, 0.08], [5.0, 0.08], [7.5, 0.08], [10.0, 0.08], [0.0, \n 0.09], [2.5, 0.09], [5.0, 0.09], [7.5, 0.09], [10.0, 0.09], [0.0, 0.1],\n [2.5, 0.1], [5.0, 0.1], [7.5, 0.1], [10.0, 0.1]]'], {}), '([[0.0, 0.08], [2.5, 0.08], [5.0, 0.08], [7.5, 0.08], [10.0, 0.08],\n [0.0, 0.09], [2.5, 0.09], [5.0, 0.09], [7.5, 0.09], [10.0, 0.09], [0.0,\n 0.1], [2.5, 0.1], [5.0, 0.1], [7.5, 0.1], [10.0, 0.1]])\n', (5978, 6181), True, 'import numpy as np\n'), ((6464, 6508), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected', 'values'], {}), '(expected, values)\n', (6490, 6508), True, 'import numpy as np\n'), ((6556, 6572), 'numpy.empty', 'np.empty', (['(3, 5)'], {}), '((3, 5))\n', (6564, 6572), True, 'import numpy as np\n'), ((7015, 7055), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mesh', 't_init'], {}), '(mesh, t_init)\n', (7041, 7055), True, 'import numpy as np\n'), ((7131, 7171), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mesh', 't_init'], {}), '(mesh, t_init)\n', (7157, 7171), True, 'import numpy as np\n'), ((7191, 7207), 'numpy.empty', 'np.empty', (['(3, 5)'], {}), '((3, 5))\n', (7199, 7207), True, 'import numpy as np\n'), ((7660, 7701), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mesh', 't_final'], {}), '(mesh, t_final)\n', (7686, 7701), True, 'import numpy as np\n'), ((7816, 7857), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mesh', 't_final'], {}), '(mesh, t_final)\n', (7842, 7857), True, 'import numpy as np\n'), ((7872, 7888), 'numpy.empty', 'np.empty', (['(3, 5)'], {}), '((3, 5))\n', (7880, 7888), True, 'import numpy as np\n'), ((8394, 8430), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mesh', 'ss'], {}), '(mesh, ss)\n', (8420, 8430), True, 'import numpy as np\n'), ((2421, 2463), 'numpy.allclose', 'np.allclose', (['self.exp.conditions', 'expected'], {}), '(self.exp.conditions, expected)\n', (2432, 2463), True, 'import numpy as np\n'), ((3309, 3351), 'numpy.allclose', 'np.allclose', (['self.exp.conditions', 'expected'], {}), '(self.exp.conditions, expected)\n', (3320, 3351), True, 'import numpy as np\n'), ((4744, 4955), 'numpy.array', 'np.array', (['[[0.0, 0.08], [2.5, 0.08], [5.0, 0.08], [7.5, 0.08], [10.0, 0.08], [0.0, \n 0.09], [2.5, 0.09], [5.0, 0.09], [7.5, 0.09], [10.0, 0.09], [0.0, 0.1],\n [2.5, 0.1], [5.0, 0.1], [7.5, 0.1], [10.0, 0.1]]'], {}), '([[0.0, 0.08], [2.5, 0.08], [5.0, 0.08], [7.5, 0.08], [10.0, 0.08],\n [0.0, 0.09], [2.5, 0.09], [5.0, 0.09], [7.5, 0.09], [10.0, 0.09], [0.0,\n 0.1], [2.5, 0.1], [5.0, 0.1], [7.5, 0.1], [10.0, 0.1]])\n', (4752, 4955), True, 'import numpy as np\n'), ((8756, 8767), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8765, 8767), True, 'import matplotlib.pyplot as plt\n'), ((8924, 8935), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8933, 8935), True, 'import matplotlib.pyplot as plt\n'), ((9092, 9103), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9101, 9103), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import cv2
import random
SAMPLING_RATE = 1
NOISE_RATE = 0.5
# def get_video_frames(filepath, fix_height, fix_width):
# frame_list = []
# cap = cv2.VideoCapture()
# if cap.open(filepath):
# fps, n_frames, frame_height, frame_width = (int(cap.get(cv2.CAP_PROP_FPS)),
# int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
# int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
# int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
# for i in range(n_frames):
# _, frame = cap.read()
# if i % SAMPLING_RATE == 0:
# frame = np.array(frame, dtype=np.float32)
# if len(frame.shape) != 3:
# continue
# frame = cv2.resize(frame, (fix_width, fix_height))
# frame_list.append(frame)
# cap.release()
# noisy_frame_list = add_frame_noise(frame_list)
# frame_list = np.array(frame_list)
# noisy_frame_list = np.array(noisy_frame_list)
# return frame_list, noisy_frame_list
def get_frame_list(filelist, fix_height=128, fix_width=128):
frame_list = []
for file in filelist:
cap = cv2.VideoCapture()
if cap.open(file):
fps, n_frames, frame_height, frame_width = (int(cap.get(cv2.CAP_PROP_FPS)),
int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
frame = np.zeros(1, dtype=np.float32)
while len(frame.shape) != 3:
pos = random.randrange(n_frames)
cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
_, frame = cap.read()
frame = np.array(frame, dtype=np.float32)
frame = cv2.resize(frame, (fix_width, fix_height))
frame_list.append(frame)
cap.release()
noisy_frame_list = add_frame_noise(frame_list)
frame_list = np.array(frame_list, dtype=np.float32)
noisy_frame_list = np.array(noisy_frame_list, dtype=np.float32)
return frame_list, noisy_frame_list
def add_frame_noise(inputs):
if isinstance(inputs, list):
outputs = []
for i in range(len(inputs)):
if not isinstance(inputs[i], np.ndarray):
raise Exception('Inputs must be a numpy array or a list of numpy arrays.')
else:
frame_height, frame_width, channel = inputs[i].shape
frame_noisy = inputs[i] + NOISE_RATE * np.random.randn(frame_height, frame_width, channel)
outputs.append(frame_noisy)
return outputs
elif isinstance(inputs, np.ndarray):
frame_height, frame_width = inputs.shape
frame_noisy = inputs + NOISE_RATE * np.random.randn(frame_height, frame_width)
return frame_noisy
else:
raise Exception('Inputs must be a numpy array or a list of numpy arrays.')
# def get_batch(frame_list, noisy_frame_list, batch_size=64):
# frame_num = len(frame_list)
# fill_num = batch_size % frame_num
# fill_indices = random.sample(range(frame_num), fill_num)
# indices = list(range(frame_num)) * (batch_size//frame_num) + fill_indices
# batch_frame = frame_list[indices]
# batch_noisy_frame = noisy_frame_list[indices]
# return batch_frame, batch_noisy_frame
def get_batch(frame_list, noisy_frame_list, batch_size=64):
if len(frame_list) != len(noisy_frame_list):
raise Exception('Original frame list size is not equal to noisy list.')
indices = np.random.choice(len(frame_list), batch_size)
batch_frame = frame_list[indices]
batch_noisy_frame = noisy_frame_list[indices]
return batch_frame, batch_noisy_frame
| [
"random.randrange",
"numpy.array",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.resize",
"numpy.random.randn"
] | [((2191, 2229), 'numpy.array', 'np.array', (['frame_list'], {'dtype': 'np.float32'}), '(frame_list, dtype=np.float32)\n', (2199, 2229), True, 'import numpy as np\n'), ((2253, 2297), 'numpy.array', 'np.array', (['noisy_frame_list'], {'dtype': 'np.float32'}), '(noisy_frame_list, dtype=np.float32)\n', (2261, 2297), True, 'import numpy as np\n'), ((1284, 1302), 'cv2.VideoCapture', 'cv2.VideoCapture', ([], {}), '()\n', (1300, 1302), False, 'import cv2\n'), ((1727, 1756), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (1735, 1756), True, 'import numpy as np\n'), ((2017, 2059), 'cv2.resize', 'cv2.resize', (['frame', '(fix_width, fix_height)'], {}), '(frame, (fix_width, fix_height))\n', (2027, 2059), False, 'import cv2\n'), ((1820, 1846), 'random.randrange', 'random.randrange', (['n_frames'], {}), '(n_frames)\n', (1836, 1846), False, 'import random\n'), ((1963, 1996), 'numpy.array', 'np.array', (['frame'], {'dtype': 'np.float32'}), '(frame, dtype=np.float32)\n', (1971, 1996), True, 'import numpy as np\n'), ((3001, 3043), 'numpy.random.randn', 'np.random.randn', (['frame_height', 'frame_width'], {}), '(frame_height, frame_width)\n', (3016, 3043), True, 'import numpy as np\n'), ((2748, 2799), 'numpy.random.randn', 'np.random.randn', (['frame_height', 'frame_width', 'channel'], {}), '(frame_height, frame_width, channel)\n', (2763, 2799), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow import gfile
from tensorflow import app
from my_utils import mylog
import my_utils
#%%
FLAGS = flags.FLAGS
if __name__ == "__main__":
flags.DEFINE_string("input_data_pattern",
"input/GENERATED_DATA/f2train/*.tfrecord",
"files to process")
flags.DEFINE_string("output_path","/tmp/", "Path for generated data.")
flags.DEFINE_integer("file_from", 11, "start from, eg., the 11th file")
flags.DEFINE_integer("file_to", 15, "process 15 - 11 files")
flags.DEFINE_bool("parallel", True, "parallel processing")
flags.DEFINE_string("feature_names",
"video_id,labels,mean_rgb,mean_audio,num_frames,std_rgb,std_audio",
"features to pick")
#%%
# basic operation example
if False:
filename = 'input/GENERATED_DATA/f2train/Atrain__.tfrecord'
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
ex_iter = tf.python_io.tf_record_iterator(filename, options=opts)
example = next(ex_iter)
in_ex = tf.train.Example.FromString(example)
mean_rgb = in_ex.features.feature['mean_rgb'].float_list.value
median_rgb = in_ex.features.feature['median_rgb'].float_list.value
skew_rgb = np.array(mean_rgb) - np.array(median_rgb)
mean_audio = in_ex.features.feature['mean_audio'].float_list.value
median_audio = in_ex.features.feature['median_audio'].float_list.value
skew_audio = np.array(mean_audio) - np.array(median_audio)
out_ex = tf.train.Example(features=tf.train.Features(feature={
'video_id': in_ex.features.feature['video_id'],
'labels': in_ex.features.feature['labels'],
'mean_rgb': in_ex.features.feature['mean_rgb'],
'mean_audio': in_ex.features.feature['mean_audio'],
'skew_rgb': my_utils._floatlist_feature(skew_rgb),
'skew_audio': my_utils._floatlist_feature(skew_audio),
'std_rgb': in_ex.features.feature['std_rgb'],
'std_audio': in_ex.features.feature['std_audio'],
'top_1_rgb': in_ex.features.feature['top_1_rgb'],
'top_2_rgb': in_ex.features.feature['top_2_rgb'],
'top_3_rgb': in_ex.features.feature['top_3_rgb'],
'top_4_rgb': in_ex.features.feature['top_4_rgb'],
'top_5_rgb': in_ex.features.feature['top_5_rgb'],
'num_frames': in_ex.features.feature['num_frames'],
'std_all_rgb': in_ex.features.feature['std_all_rgb'],
'std_all_audio': in_ex.features.feature['std_all_audio']
} ) )
#%%
def select_features_from_tfexample(input_tfexample, feats=None):
if feats==None:
feats = ['video_id','labels','mean_rgb','mean_audio','num_frames',
'std_rgb','std_audio',
'top_1_rgb','top_2_rgb','top_3_rgb','top_4_rgb','top_5_rgb',
'top_1_audio','top_2_audio','top_3_audio',
'top_4_audio','top_5_aduio']
feats = set(['video_id','labels'] + feats) # make sure uniqueness and vid/labs
fdict = { x : input_tfexample.features.feature[x] for x in feats
if x not in ['mmdf_rgb', 'mmdf_audio']}
if 'mmdf_rgb' in feats:
mean_rgb = input_tfexample.features.feature['mean_rgb'].float_list.value
median_rgb = input_tfexample.features.feature['median_rgb'].float_list.value
mmdf_rgb = np.array(mean_rgb) - np.array(median_rgb)
fdict['mmdf_rgb'] = my_utils._floatlist_feature(mmdf_rgb)
if 'mmdf_audio' in feats:
mean_audio = input_tfexample.features.feature['mean_audio'].float_list.value
median_audio = input_tfexample.features.feature['median_audio'].float_list.value
mmdf_audio = np.array(mean_audio) - np.array(median_audio)
fdict['mmdf_audio'] = my_utils._floatlist_feature(mmdf_audio)
output_tfexample = tf.train.Example(features=tf.train.Features(feature=fdict))
return output_tfexample
def pick_features_from_file(input_fn, out_fn, feats=None):
start_time = time.time()
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
ex_iter = tf.python_io.tf_record_iterator(input_fn, options=opts)
num_examples = 0
with tf.python_io.TFRecordWriter(out_fn, options=opts) as tfwriter:
out_examples = []
for input_bytes in ex_iter: #two loops to split read/write operations
input_example = tf.train.Example.FromString(input_bytes)
out_examples.append(select_features_from_tfexample(input_example, feats))
for example in out_examples:
tfwriter.write(example.SerializeToString())
num_examples += 1
seconds_per_file = time.time() - start_time
num_examples_per_sec = num_examples / seconds_per_file
mylog("Processed in {:.0f} sec: {}, Examples: {}, Examples/second: {:.0f}.".format(
seconds_per_file,input_fn, num_examples, num_examples_per_sec))
def process_one_file(filenames):
input_file, output_file = filenames
feats=None or FLAGS.feature_names.split(',')
pick_features_from_file(input_file, output_file, feats)
def main(unused_argv):
print("tensorflow version: %s" % tf.__version__)
all_frame_files = gfile.Glob(FLAGS.input_data_pattern)
f_fullpath = all_frame_files[FLAGS.file_from : FLAGS.file_to]
f_fns = [x.split('/')[-1] for x in f_fullpath]
exist_files = gfile.Glob(FLAGS.output_path + "C*tfrecord")
exist_fn = [x.split('/')[-1].replace('CAtr', 'Atr') for x in exist_files]
yet_2_split = [x for x,y in zip(f_fullpath, f_fns) if y not in exist_fn]
vf = [FLAGS.output_path + 'C' + x.split('/')[-1] for x in yet_2_split]
mylog('number of files suggested: %d'%len(f_fullpath))
mylog('number of files yet to process: %d'%len(yet_2_split))
if FLAGS.parallel:
from concurrent import futures
executor = futures.ProcessPoolExecutor(max_workers=2)
executor.map(process_one_file, zip(yet_2_split, vf))
else:
for filenames in zip(yet_2_split, vf):
#mylog('processing: {}'.format(filenames))
process_one_file(filenames)
mylog("done")
if __name__ == "__main__":
app.run()
| [
"tensorflow.flags.DEFINE_string",
"tensorflow.python_io.tf_record_iterator",
"my_utils.mylog",
"tensorflow.flags.DEFINE_bool",
"my_utils._floatlist_feature",
"tensorflow.gfile.Glob",
"tensorflow.python_io.TFRecordWriter",
"numpy.array",
"tensorflow.train.Features",
"concurrent.futures.ProcessPoolE... | [((285, 393), 'tensorflow.flags.DEFINE_string', 'flags.DEFINE_string', (['"""input_data_pattern"""', '"""input/GENERATED_DATA/f2train/*.tfrecord"""', '"""files to process"""'], {}), "('input_data_pattern',\n 'input/GENERATED_DATA/f2train/*.tfrecord', 'files to process')\n", (304, 393), False, 'from tensorflow import flags\n'), ((408, 479), 'tensorflow.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_path"""', '"""/tmp/"""', '"""Path for generated data."""'], {}), "('output_path', '/tmp/', 'Path for generated data.')\n", (427, 479), False, 'from tensorflow import flags\n'), ((481, 552), 'tensorflow.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""file_from"""', '(11)', '"""start from, eg., the 11th file"""'], {}), "('file_from', 11, 'start from, eg., the 11th file')\n", (501, 552), False, 'from tensorflow import flags\n'), ((555, 615), 'tensorflow.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""file_to"""', '(15)', '"""process 15 - 11 files"""'], {}), "('file_to', 15, 'process 15 - 11 files')\n", (575, 615), False, 'from tensorflow import flags\n'), ((620, 678), 'tensorflow.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""parallel"""', '(True)', '"""parallel processing"""'], {}), "('parallel', True, 'parallel processing')\n", (637, 678), False, 'from tensorflow import flags\n'), ((681, 813), 'tensorflow.flags.DEFINE_string', 'flags.DEFINE_string', (['"""feature_names"""', '"""video_id,labels,mean_rgb,mean_audio,num_frames,std_rgb,std_audio"""', '"""features to pick"""'], {}), "('feature_names',\n 'video_id,labels,mean_rgb,mean_audio,num_frames,std_rgb,std_audio',\n 'features to pick')\n", (700, 813), False, 'from tensorflow import flags\n'), ((936, 1007), 'tensorflow.python_io.TFRecordOptions', 'tf.python_io.TFRecordOptions', (['tf.python_io.TFRecordCompressionType.ZLIB'], {}), '(tf.python_io.TFRecordCompressionType.ZLIB)\n', (964, 1007), True, 'import tensorflow as tf\n'), ((1027, 1082), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', (['filename'], {'options': 'opts'}), '(filename, options=opts)\n', (1058, 1082), True, 'import tensorflow as tf\n'), ((1119, 1155), 'tensorflow.train.Example.FromString', 'tf.train.Example.FromString', (['example'], {}), '(example)\n', (1146, 1155), True, 'import tensorflow as tf\n'), ((4292, 4303), 'time.time', 'time.time', ([], {}), '()\n', (4301, 4303), False, 'import time\n'), ((4315, 4386), 'tensorflow.python_io.TFRecordOptions', 'tf.python_io.TFRecordOptions', (['tf.python_io.TFRecordCompressionType.ZLIB'], {}), '(tf.python_io.TFRecordCompressionType.ZLIB)\n', (4343, 4386), True, 'import tensorflow as tf\n'), ((4401, 4456), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', (['input_fn'], {'options': 'opts'}), '(input_fn, options=opts)\n', (4432, 4456), True, 'import tensorflow as tf\n'), ((5497, 5533), 'tensorflow.gfile.Glob', 'gfile.Glob', (['FLAGS.input_data_pattern'], {}), '(FLAGS.input_data_pattern)\n', (5507, 5533), False, 'from tensorflow import gfile\n'), ((5664, 5708), 'tensorflow.gfile.Glob', 'gfile.Glob', (["(FLAGS.output_path + 'C*tfrecord')"], {}), "(FLAGS.output_path + 'C*tfrecord')\n", (5674, 5708), False, 'from tensorflow import gfile\n'), ((6379, 6392), 'my_utils.mylog', 'mylog', (['"""done"""'], {}), "('done')\n", (6384, 6392), False, 'from my_utils import mylog\n'), ((6426, 6435), 'tensorflow.app.run', 'app.run', ([], {}), '()\n', (6433, 6435), False, 'from tensorflow import app\n'), ((1306, 1324), 'numpy.array', 'np.array', (['mean_rgb'], {}), '(mean_rgb)\n', (1314, 1324), True, 'import numpy as np\n'), ((1327, 1347), 'numpy.array', 'np.array', (['median_rgb'], {}), '(median_rgb)\n', (1335, 1347), True, 'import numpy as np\n'), ((1510, 1530), 'numpy.array', 'np.array', (['mean_audio'], {}), '(mean_audio)\n', (1518, 1530), True, 'import numpy as np\n'), ((1533, 1555), 'numpy.array', 'np.array', (['median_audio'], {}), '(median_audio)\n', (1541, 1555), True, 'import numpy as np\n'), ((3714, 3751), 'my_utils._floatlist_feature', 'my_utils._floatlist_feature', (['mmdf_rgb'], {}), '(mmdf_rgb)\n', (3741, 3751), False, 'import my_utils\n'), ((4055, 4094), 'my_utils._floatlist_feature', 'my_utils._floatlist_feature', (['mmdf_audio'], {}), '(mmdf_audio)\n', (4082, 4094), False, 'import my_utils\n'), ((4492, 4541), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['out_fn'], {'options': 'opts'}), '(out_fn, options=opts)\n', (4519, 4541), True, 'import tensorflow as tf\n'), ((4965, 4976), 'time.time', 'time.time', ([], {}), '()\n', (4974, 4976), False, 'import time\n'), ((6137, 6179), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': '(2)'}), '(max_workers=2)\n', (6164, 6179), False, 'from concurrent import futures\n'), ((3644, 3662), 'numpy.array', 'np.array', (['mean_rgb'], {}), '(mean_rgb)\n', (3652, 3662), True, 'import numpy as np\n'), ((3665, 3685), 'numpy.array', 'np.array', (['median_rgb'], {}), '(median_rgb)\n', (3673, 3685), True, 'import numpy as np\n'), ((3979, 3999), 'numpy.array', 'np.array', (['mean_audio'], {}), '(mean_audio)\n', (3987, 3999), True, 'import numpy as np\n'), ((4002, 4024), 'numpy.array', 'np.array', (['median_audio'], {}), '(median_audio)\n', (4010, 4024), True, 'import numpy as np\n'), ((4153, 4185), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'fdict'}), '(feature=fdict)\n', (4170, 4185), True, 'import tensorflow as tf\n'), ((4687, 4727), 'tensorflow.train.Example.FromString', 'tf.train.Example.FromString', (['input_bytes'], {}), '(input_bytes)\n', (4714, 4727), True, 'import tensorflow as tf\n'), ((1913, 1950), 'my_utils._floatlist_feature', 'my_utils._floatlist_feature', (['skew_rgb'], {}), '(skew_rgb)\n', (1940, 1950), False, 'import my_utils\n'), ((1981, 2020), 'my_utils._floatlist_feature', 'my_utils._floatlist_feature', (['skew_audio'], {}), '(skew_audio)\n', (2008, 2020), False, 'import my_utils\n')] |
import torch
import shutil
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import logging
import datetime
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = [0] * len(np.unique(dataset.targets))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
beta = 0.9999
effective_num = 1.0 - np.power(beta, label_to_count)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
# weight for each sample
weights = [per_cls_weights[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples
def calc_confusion_mat(val_loader, model, args):
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print('Class Accuracy : ')
print(cls_acc)
classes = [str(x) for x in args.cls_num_list]
plot_confusion_matrix(all_targets, all_preds, classes)
plt.savefig(os.path.join(args.root_log, args.store_name, 'confusion_matrix.png'))
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def prepare_folders(args):
folders_util = [args.root_log, args.root_model,
os.path.join(args.root_log, args.store_name),
os.path.join(args.root_model, args.store_name)]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.mkdir(folder)
def prepare_folders_eval(args):
folders_util = [args.root_eval,
os.path.join(args.root_eval, args.store_name)]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.mkdir(folder)
def save_checkpoint(logdir, state, is_best):
filename = '%s/ckpt.pth.tar' % logdir
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class Confidence_Diagram():
def __init__(self,n_classes):
# self.logdir = logdir
self.num_class = n_classes
self.correct = np.zeros((10,n_classes))
self.count = np.zeros((10,n_classes))
self.confidence = np.zeros((10,n_classes))
# self.file_name = file_name
def aggregate_stats(self,prob,predicted,targets):
# prob [batch,w,h]
# predicted [batch,w,h]
# targets [batch, w,h]
bins = prob // 0.1 #[batch,w,h] confidence
bins = bins.masked_fill(bins == 10.,9)
# for i in range(targets.shape[1]):
# mask = predicted == targets[:,i,:,:]
for i in np.linspace(0,9,10):
mask_bin = bins == i
for j in range(16):
mask_class = targets == j
self.correct[int(i),int(j)] += (predicted[mask_bin*mask_class] == targets[mask_bin*mask_class]).sum()
self.count[int(i),int(j)] += (mask_bin*mask_class).sum()
self.confidence[int(i),int(j)] += prob[mask_bin*mask_class].sum()
def compute_ece(self):
self.accuracy = self.correct/self.count
self.confidence = self.confidence/self.count
# import ipdb;ipdb.set_trace()
ratio = self.count/np.expand_dims(np.nansum(self.count,0),0)
self.ece_cls = np.nansum(ratio*abs(self.accuracy - self.confidence),0)
self.ece = np.sum(self.ece_cls)/self.num_class
def save(self,logdir):
with open(os.path.join(logdir,'calibration.csv'), mode='w') as calibration:
writer = csv.writer(calibration, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# import ipdb;ipdb.set_trace()
writer.writerow(['mean accuracy per bin:']+(np.nansum(self.accuracy,1)/self.num_class).tolist())
writer.writerow(['ece per class:'] + self.ece_cls.tolist())
writer.writerow(['mean ece:',self.ece])
def print(self):
print(np.nansum(self.accuracy,1)/self.num_class)
print(self.ece_cls)
print(self.ece)
def get_logger(logdir):
logger = logging.getLogger("imbalance")
ts = str(datetime.datetime.now()).split(".")[0].replace(" ", "_")
ts = ts.replace(":", "_").replace("-", "_")
file_path = os.path.join(logdir, "run_{}.log".format(ts))
hdlr = logging.FileHandler(file_path)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger | [
"logging.getLogger",
"torch.max",
"numpy.array",
"torch.DoubleTensor",
"numpy.arange",
"os.path.exists",
"numpy.linspace",
"logging.FileHandler",
"os.mkdir",
"sklearn.metrics.confusion_matrix",
"matplotlib.use",
"torch.save",
"numpy.nansum",
"numpy.unique",
"torch.multinomial",
"numpy.... | [((74, 95), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (88, 95), False, 'import matplotlib\n'), ((2368, 2379), 'numpy.diag', 'np.diag', (['cf'], {}), '(cf)\n', (2375, 2379), True, 'import numpy as np\n'), ((3050, 3082), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3066, 3082), False, 'from sklearn.metrics import confusion_matrix\n'), ((3102, 3116), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3114, 3116), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4869), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (4852, 4869), False, 'import torch\n'), ((8094, 8124), 'logging.getLogger', 'logging.getLogger', (['"""imbalance"""'], {}), "('imbalance')\n", (8111, 8124), False, 'import logging\n'), ((8316, 8346), 'logging.FileHandler', 'logging.FileHandler', (['file_path'], {}), '(file_path)\n', (8335, 8346), False, 'import logging\n'), ((8363, 8421), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (8380, 8421), False, 'import logging\n'), ((1385, 1412), 'torch.DoubleTensor', 'torch.DoubleTensor', (['weights'], {}), '(weights)\n', (1403, 1412), False, 'import torch\n'), ((1814, 1829), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1827, 1829), False, 'import torch\n'), ((2589, 2657), 'os.path.join', 'os.path.join', (['args.root_log', 'args.store_name', '"""confusion_matrix.png"""'], {}), "(args.root_log, args.store_name, 'confusion_matrix.png')\n", (2601, 2657), False, 'import os\n'), ((4195, 4239), 'os.path.join', 'os.path.join', (['args.root_log', 'args.store_name'], {}), '(args.root_log, args.store_name)\n', (4207, 4239), False, 'import os\n'), ((4261, 4307), 'os.path.join', 'os.path.join', (['args.root_model', 'args.store_name'], {}), '(args.root_model, args.store_name)\n', (4273, 4307), False, 'import os\n'), ((4551, 4596), 'os.path.join', 'os.path.join', (['args.root_eval', 'args.store_name'], {}), '(args.root_eval, args.store_name)\n', (4563, 4596), False, 'import os\n'), ((5565, 5580), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5578, 5580), False, 'import torch\n'), ((6127, 6152), 'numpy.zeros', 'np.zeros', (['(10, n_classes)'], {}), '((10, n_classes))\n', (6135, 6152), True, 'import numpy as np\n'), ((6173, 6198), 'numpy.zeros', 'np.zeros', (['(10, n_classes)'], {}), '((10, n_classes))\n', (6181, 6198), True, 'import numpy as np\n'), ((6224, 6249), 'numpy.zeros', 'np.zeros', (['(10, n_classes)'], {}), '((10, n_classes))\n', (6232, 6249), True, 'import numpy as np\n'), ((6652, 6673), 'numpy.linspace', 'np.linspace', (['(0)', '(9)', '(10)'], {}), '(0, 9, 10)\n', (6663, 6673), True, 'import numpy as np\n'), ((1122, 1152), 'numpy.power', 'np.power', (['beta', 'label_to_count'], {}), '(beta, label_to_count)\n', (1130, 1152), True, 'import numpy as np\n'), ((1194, 1217), 'numpy.array', 'np.array', (['effective_num'], {}), '(effective_num)\n', (1202, 1217), True, 'import numpy as np\n'), ((2137, 2157), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (2146, 2157), False, 'import torch\n'), ((2269, 2309), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_targets', 'all_preds'], {}), '(all_targets, all_preds)\n', (2285, 2309), False, 'from sklearn.metrics import confusion_matrix\n'), ((3263, 3285), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (3272, 3285), True, 'import numpy as np\n'), ((3305, 3327), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (3314, 3327), True, 'import numpy as np\n'), ((4356, 4378), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (4370, 4378), False, 'import os\n'), ((4439, 4455), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (4447, 4455), False, 'import os\n'), ((4645, 4667), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (4659, 4667), False, 'import os\n'), ((4728, 4744), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (4736, 4744), False, 'import os\n'), ((7391, 7411), 'numpy.sum', 'np.sum', (['self.ece_cls'], {}), '(self.ece_cls)\n', (7397, 7411), True, 'import numpy as np\n'), ((907, 933), 'numpy.unique', 'np.unique', (['dataset.targets'], {}), '(dataset.targets)\n', (916, 933), True, 'import numpy as np\n'), ((7266, 7290), 'numpy.nansum', 'np.nansum', (['self.count', '(0)'], {}), '(self.count, 0)\n', (7275, 7290), True, 'import numpy as np\n'), ((7481, 7520), 'os.path.join', 'os.path.join', (['logdir', '"""calibration.csv"""'], {}), "(logdir, 'calibration.csv')\n", (7493, 7520), False, 'import os\n'), ((7961, 7988), 'numpy.nansum', 'np.nansum', (['self.accuracy', '(1)'], {}), '(self.accuracy, 1)\n', (7970, 7988), True, 'import numpy as np\n'), ((1559, 1626), 'torch.multinomial', 'torch.multinomial', (['self.weights', 'self.num_samples'], {'replacement': '(True)'}), '(self.weights, self.num_samples, replacement=True)\n', (1576, 1626), False, 'import torch\n'), ((8138, 8161), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8159, 8161), False, 'import datetime\n'), ((7748, 7775), 'numpy.nansum', 'np.nansum', (['self.accuracy', '(1)'], {}), '(self.accuracy, 1)\n', (7757, 7775), True, 'import numpy as np\n')] |
"""
Created on Apr 2, 2014
@author: sstober
"""
import os
import glob
import logging
log = logging.getLogger(__name__)
import numpy as np
# TODO: replace imports
# from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
# from pylearn2.utils.timing import log_timing
# from pylearn2.utils import serial
# from pylearn2.format.target_format import OneHotFormatter
import librosa
# pip install librosa
# from sklearn import preprocessing
from deepthought3.util.fs_util import load
from deepthought3.datasets.rwanda2013rhythms.LabelConverter import LabelConverter
from deepthought3.util.timeseries_util import frame
class EEGDataset(DenseDesignMatrix):
"""
classdocs
"""
class Like(object):
"""
Helper class for lazy people to load an EEGDataset with similar parameters
Note: This is quite a hack as __new__ should return instances of Like.
Instead, it returns the loaded EEGDataset
"""
def __new__(Like, base, **override): # reference to copy initialize values from
params = base.params.copy()
log.debug("base params: {}".format(params))
log.debug("override params: {}".format(override))
for key, value in override.items():
params[key] = value
log.debug("merged params: {}".format(params))
return EEGDataset(**params)
def __init__(
self,
path,
suffix="", # required data file parameters
subjects="all", # optional selector (list) or 'all'
start_sample=0,
stop_sample=None, # optional for selection of sub-sequences
frame_size=-1,
hop_size=-1, # values > 0 will lead to windowing
label_mode="tempo",
name="", # optional name
n_fft=0,
n_freq_bins=None,
save_matrix_path=None,
channels=None,
resample=None,
stimulus_id_filter=None,
keep_metadata=False,
spectrum_log_amplitude=False,
spectrum_normalization_mode=None,
include_phase=False,
layout="tf", # 2D axes layout tf=time x features or ft= features x time
):
"""
Constructor
"""
# save params
self.params = locals().copy()
del self.params["self"]
# print self.params
self.name = name
self.include_phase = include_phase
self.spectrum_normalization_mode = spectrum_normalization_mode
self.spectrum_log_amplitude = spectrum_log_amplitude
self.datafiles = []
subject_paths = glob.glob(os.path.join(path, "Sub*"))
for path in subject_paths:
dataset_filename = os.path.join(path, "dataset" + suffix + ".pklz")
if os.path.isfile(dataset_filename):
log.debug("addding {}".format(dataset_filename))
self.datafiles.append(dataset_filename)
else:
log.warn("file does not exists {}".format(dataset_filename))
self.datafiles.sort()
if subjects == "all":
subjects = np.arange(0, len(self.datafiles))
assert subjects is not None and len(subjects) > 0
self.label_mode = label_mode
self.label_converter = LabelConverter()
if stimulus_id_filter is None:
stimulus_id_filter = []
self.stimulus_id_filter = stimulus_id_filter
self.subject_partitions = []
# used to keep track of original subjects
self.sequence_partitions = []
# used to keep track of original sequences
self.trial_partitions = []
# keeps track of original trials
# metadata: [subject, trial_no, stimulus, channel, start, ]
self.metadata = []
sequences = []
labels = []
n_sequences = 0
last_raw_label = -1
for i in range(len(self.datafiles)):
if i in subjects:
with log_timing(log, "loading data from {}".format(self.datafiles[i])):
self.subject_partitions.append(n_sequences)
# save start of next subject
subject_sequences, subject_labels, channel_meta = load(
self.datafiles[i]
)
subject_trial_no = -1
for j in range(len(subject_sequences)):
l = subject_labels[j]
# get raw label
if l in stimulus_id_filter:
# log.debug('skipping stimulus {}'.format(l));
continue
c = channel_meta[j][0]
if (
channels is not None and not c in channels
): # apply optional channel filter
log.debug("skipping channel {}".format(c))
continue
self.sequence_partitions.append(n_sequences)
# save start of next sequence
if l != last_raw_label: # if raw label changed...
self.trial_partitions.append(n_sequences)
# ...save start of next trial
subject_trial_no += 1
# increment subject_trial_no counter
last_raw_label = l
l = self.label_converter.get_label(l[0], self.label_mode)
# convert to label_mode view
s = subject_sequences[j]
s = s[start_sample:stop_sample]
# get sub-sequence in original space
# down-sample if requested
if resample is not None and resample[0] != resample[1]:
s = librosa.resample(s, resample[0], resample[1])
if n_fft is not None and n_fft > 0: # Optionally:
# transform to spectogram
hop_length = n_fft / 4
"""
from http://theremin.ucsd.edu/~bmcfee/librosadoc/librosa.html
>>> # Get a power spectrogram from a waveform y
>>> S = np.abs(librosa.stft(y)) ** 2
>>> log_S = librosa.logamplitude(S)
"""
# s = np.abs(librosa.core.stft(s,
# n_fft=n_fft,
# hop_length=hop_length)
# )**2;
S = librosa.core.stft(s, n_fft=n_fft, hop_length=hop_length)
# mag = np.abs(S); # magnitude spectrum
mag = np.abs(S) ** 2
# power spectrum
# phase = np.unwrap(np.angle(S));
phase = np.angle(S)
if n_freq_bins is not None: # Optionally:
mag = mag[0:n_freq_bins, :]
# cut off high bands
phase = phase[0:n_freq_bins, :]
if self.spectrum_log_amplitude:
mag = librosa.logamplitude(mag)
s = mag
# for normalization
"""
NOTE on normalization:
It depends on the structure of a neural network and (even more)
on the properties of data. There is no best normalization algorithm
because if there would be one, it would be used everywhere by default...
In theory, there is no requirement for the data to be normalized at all.
This is a purely practical thing because in practice convergence could
take forever if your input is spread out too much. The simplest would be
to just normalize it by scaling your data to (-1,1) (or (0,1) depending
on activation function), and in most cases it does work. If your
algorithm converges well, then this is your answer. If not, there are
too many possible problems and methods to outline here without knowing
the actual data.
"""
## normalize to mean 0, std 1
if self.spectrum_normalization_mode == "mean0_std1":
# s = preprocessing.scale(s, axis=0);
mean = np.mean(s)
std = np.std(s)
s = (s - mean) / std
## normalize by linear transform to [0,1]
elif self.spectrum_normalization_mode == "linear_0_1":
s = s / np.max(s)
## normalize by linear transform to [-1,1]
elif self.spectrum_normalization_mode == "linear_-1_1":
s = -1 + 2 * (s - np.min(s)) / (np.max(s) - np.min(s))
elif self.spectrum_normalization_mode is not None:
raise ValueError(
"unsupported spectrum normalization mode {}".format(
self.spectrum_normalization_mode
)
)
# print s.mean(axis=0)
# print s.std(axis=0)
# include phase information if requested
if self.include_phase:
# normalize phase to [-1.1]
phase = phase / np.pi
s = np.vstack([s, phase])
# transpose to fit pylearn2 layout
s = np.transpose(s)
else:
# normalize to max amplitude 1
s = librosa.util.normalize(s)
s = np.asfarray(s, dtype="float32")
if frame_size > 0 and hop_size > 0:
s, l = self._split_sequence(s, l, frame_size, hop_size)
# print s.shape
n_sequences += len(s)
sequences.append(s)
labels.extend(l)
if keep_metadata:
self.metadata.append(
{
"subject": i, # subject
"trial_no": subject_trial_no, # trial_no
"stimulus": last_raw_label[0], # stimulus
"channel": c, # channel
"start": self.sequence_partitions[-1], # start
"stop": n_sequences, # stop
}
)
# turn into numpy arrays
sequences = np.vstack(sequences)
print(sequences.shape)
labels = np.hstack(labels)
# one_hot_y = one_hot(labels)
one_hot_formatter = OneHotFormatter(labels.max() + 1)
one_hot_y = one_hot_formatter.format(labels)
self.labels = labels
# save for later
if n_fft > 0:
sequences = np.array([sequences])
# re-arrange dimensions
sequences = sequences.swapaxes(0, 1).swapaxes(1, 2).swapaxes(2, 3)
if layout == "ft":
sequences = sequences.swapaxes(1, 2)
log.debug("final dataset shape: {} (b,0,1,c)".format(sequences.shape))
print("final dataset shape: {} (b,0,1,c)".format(sequences.shape))
super(EEGDataset, self).__init__(
topo_view=sequences, y=one_hot_y, axes=["b", 0, 1, "c"]
)
else:
# if layout == 'ft':
# sequences = sequences.swapaxes(1,2)
super(EEGDataset, self).__init__(
X=sequences, y=one_hot_y, axes=["b", 0, 1, "c"]
)
log.debug(
'generated dataset "{}" with shape X={} y={} labels={} '.format(
self.name, self.X.shape, self.y.shape, self.labels.shape
)
)
if save_matrix_path is not None:
matrix = DenseDesignMatrix(X=sequences, y=one_hot_y)
with log_timing(
log, "saving DenseDesignMatrix to {}".format(save_matrix_path)
):
serial.save(save_matrix_path, matrix)
def get_class_labels(self):
return self.label_converter.get_class_labels(self.label_mode)
def _split_sequence(self, sequence, label, frame_length, hop_length):
# log.debug('splitting sequence with len {} with label {} into {}-frames with hop={}'.format(
# len(sequence), label, frame_length, hop_length));
labels = []
frames = frame(sequence, frame_length=frame_length, hop_length=hop_length)
frame_labels = []
for i in range(0, frames.shape[0]):
frame_labels.append(label)
labels.append(frame_labels)
return frames, labels
| [
"logging.getLogger",
"numpy.hstack",
"deepthought3.util.timeseries_util.frame",
"numpy.array",
"deepthought3.util.fs_util.load",
"numpy.asfarray",
"librosa.resample",
"numpy.mean",
"numpy.max",
"librosa.logamplitude",
"numpy.vstack",
"numpy.min",
"deepthought3.datasets.rwanda2013rhythms.Labe... | [((94, 121), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'import logging\n'), ((3261, 3277), 'deepthought3.datasets.rwanda2013rhythms.LabelConverter.LabelConverter', 'LabelConverter', ([], {}), '()\n', (3275, 3277), False, 'from deepthought3.datasets.rwanda2013rhythms.LabelConverter import LabelConverter\n'), ((11877, 11897), 'numpy.vstack', 'np.vstack', (['sequences'], {}), '(sequences)\n', (11886, 11897), True, 'import numpy as np\n'), ((11947, 11964), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (11956, 11964), True, 'import numpy as np\n'), ((13857, 13922), 'deepthought3.util.timeseries_util.frame', 'frame', (['sequence'], {'frame_length': 'frame_length', 'hop_length': 'hop_length'}), '(sequence, frame_length=frame_length, hop_length=hop_length)\n', (13862, 13922), False, 'from deepthought3.util.timeseries_util import frame\n'), ((2608, 2634), 'os.path.join', 'os.path.join', (['path', '"""Sub*"""'], {}), "(path, 'Sub*')\n", (2620, 2634), False, 'import os\n'), ((2702, 2750), 'os.path.join', 'os.path.join', (['path', "('dataset' + suffix + '.pklz')"], {}), "(path, 'dataset' + suffix + '.pklz')\n", (2714, 2750), False, 'import os\n'), ((2766, 2798), 'os.path.isfile', 'os.path.isfile', (['dataset_filename'], {}), '(dataset_filename)\n', (2780, 2798), False, 'import os\n'), ((12221, 12242), 'numpy.array', 'np.array', (['[sequences]'], {}), '([sequences])\n', (12229, 12242), True, 'import numpy as np\n'), ((4199, 4222), 'deepthought3.util.fs_util.load', 'load', (['self.datafiles[i]'], {}), '(self.datafiles[i])\n', (4203, 4222), False, 'from deepthought3.util.fs_util import load\n'), ((10830, 10861), 'numpy.asfarray', 'np.asfarray', (['s'], {'dtype': '"""float32"""'}), "(s, dtype='float32')\n", (10841, 10861), True, 'import numpy as np\n'), ((5926, 5971), 'librosa.resample', 'librosa.resample', (['s', 'resample[0]', 'resample[1]'], {}), '(s, resample[0], resample[1])\n', (5942, 5971), False, 'import librosa\n'), ((6942, 6998), 'librosa.core.stft', 'librosa.core.stft', (['s'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(s, n_fft=n_fft, hop_length=hop_length)\n', (6959, 6998), False, 'import librosa\n'), ((7294, 7305), 'numpy.angle', 'np.angle', (['S'], {}), '(S)\n', (7302, 7305), True, 'import numpy as np\n'), ((10638, 10653), 'numpy.transpose', 'np.transpose', (['s'], {}), '(s)\n', (10650, 10653), True, 'import numpy as np\n'), ((10775, 10800), 'librosa.util.normalize', 'librosa.util.normalize', (['s'], {}), '(s)\n', (10797, 10800), False, 'import librosa\n'), ((7136, 7145), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (7142, 7145), True, 'import numpy as np\n'), ((7657, 7682), 'librosa.logamplitude', 'librosa.logamplitude', (['mag'], {}), '(mag)\n', (7677, 7682), False, 'import librosa\n'), ((9224, 9234), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (9231, 9234), True, 'import numpy as np\n'), ((9273, 9282), 'numpy.std', 'np.std', (['s'], {}), '(s)\n', (9279, 9282), True, 'import numpy as np\n'), ((10520, 10541), 'numpy.vstack', 'np.vstack', (['[s, phase]'], {}), '([s, phase])\n', (10529, 10541), True, 'import numpy as np\n'), ((9530, 9539), 'numpy.max', 'np.max', (['s'], {}), '(s)\n', (9536, 9539), True, 'import numpy as np\n'), ((9760, 9769), 'numpy.max', 'np.max', (['s'], {}), '(s)\n', (9766, 9769), True, 'import numpy as np\n'), ((9772, 9781), 'numpy.min', 'np.min', (['s'], {}), '(s)\n', (9778, 9781), True, 'import numpy as np\n'), ((9746, 9755), 'numpy.min', 'np.min', (['s'], {}), '(s)\n', (9752, 9755), True, 'import numpy as np\n')] |
from io import BytesIO
from typing import Any, Dict
import numpy as np
import torch
from PIL import Image
class JPEG_Compressor:
def __init__(self, quality: int = 30) -> None:
"""
Parameters
----------
quality : int, optional
Quality of compressed image, from [0, 100], by default 30
"""
self.quality = quality
def __call__(self, model, data: Dict[str, Any]) -> torch.ByteTensor:
"""
Parameters
----------
model : [type]
data : Dict[str, Any]
From dataloader
Returns
-------
torch.ByteTensor
[C, H, W], the compressed tensor
"""
clean_img = data["img"]
# Convert to PIL image
np_img = clean_img.permute(1, 2, 0).numpy()
pil_img = Image.fromarray(np_img)
# Compress to JPEG
with BytesIO() as f:
pil_img.save(f, format="JPEG", optimize=True, quality=self.quality)
f.seek(0)
jpg_img = Image.open(f)
jpg_img.load()
# Convert back to torch tensor
jpg_img_np = np.array(jpg_img)
jpg_img_t = torch.tensor(jpg_img_np).permute(2, 0, 1)
return jpg_img_t
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"io.BytesIO",
"numpy.array",
"torch.tensor"
] | [((831, 854), 'PIL.Image.fromarray', 'Image.fromarray', (['np_img'], {}), '(np_img)\n', (846, 854), False, 'from PIL import Image\n'), ((1138, 1155), 'numpy.array', 'np.array', (['jpg_img'], {}), '(jpg_img)\n', (1146, 1155), True, 'import numpy as np\n'), ((896, 905), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (903, 905), False, 'from io import BytesIO\n'), ((1036, 1049), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1046, 1049), False, 'from PIL import Image\n'), ((1176, 1200), 'torch.tensor', 'torch.tensor', (['jpg_img_np'], {}), '(jpg_img_np)\n', (1188, 1200), False, 'import torch\n')] |
import xarray as _xr
import copy as _copy
import xgcm as _xgcm
import numpy as _np
import warnings as _warnings
import sys as _sys
from . import compute as _compute
from . import plot as _plot
from . import animate as _animate
from . import utils as _utils
from . subsample import _subsampleMethdos
from . compute import _computeMethdos
from . plot import _plotMethdos
from . animate import _animateMethdos
try:
import cartopy.crs as _ccrs
except ImportError:
pass
try:
from scipy import spatial as _spatial
except ImportError:
pass
try:
from dask.diagnostics import ProgressBar as _ProgressBar
except ImportError:
pass
# TODO: add more xgcm options. E.g., default boundary method.
# TODO: add attributes to new coordinates (XU, XV, ...)
# TODO: implement xgcm autogenerate in _set_coords, set_grid_coords, set_coords when released
# TODO: _create_grid will be useless with the future release of xgcm. We will pass dictionary in xgcm.Grid,
# and we can have the option of usining comodo attributes (currently cleaned up so switched off)
class OceanDataset:
"""
OceanDataset combines a xarray.Dataset with other objects used by OceanSpy (e.g., xgcm.Grid).
Additional objects are attached to the xarray.Dataset as global attributes.
OceanDataset adds, reads, and decodes dataset global attributes.
"""
def __init__(self,
dataset):
"""
Parameters
----------
dataset: xarray.Dataset
The multi-dimensional, in memory, array database.
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html
"""
# Check parameters
if not isinstance(dataset, _xr.Dataset):
raise TypeError("`dataset` must be a xarray.Dataset")
# Initialize dataset
self._ds = dataset.copy()
# Apply aliases
self = self._apply_aliases()
def __copy__(self):
"""
Shallow copy
"""
return OceanDataset(dataset = self.dataset.copy())
def __deepcopy__(self):
"""
Deep copy
"""
return OceanDataset(dataset = self.dataset.copy(deep=True))
def __repr__(self):
main_info = ['<oceanspy.OceanDataset>']
main_info.append('\nMain attributes:')
if self.dataset is not None:
main_info.append(" .dataset: %s" % self.dataset.__repr__()[self.dataset.__repr__().find('<'):
self.dataset.__repr__().find('>')+1])
if self.grid is not None:
main_info.append(" .grid: %s" % self.grid.__repr__()[self.grid.__repr__().find('<'):
self.grid.__repr__().find('>')+1])
if self.projection is not None:
main_info.append(" .projection: %s" % self.projection.__repr__()[self.projection.__repr__().find('<'):
self.projection.__repr__().find('>')+1])
more_info = ['\n\nMore attributes:']
if self.name:
more_info.append(" .name: %s" % self.name)
if self.description:
more_info.append(" .description: %s" % self.description)
if self.parameters:
more_info.append(" .parameters: %s" % type(self.parameters))
if self.aliases:
more_info.append(" .aliases: %s" % type(self.aliases))
if self.grid_coords:
more_info.append(" .grid_coords: %s" % type(self.grid_coords))
if self.grid_periodic:
more_info.append(" .grid_periodic: %s" % type(self.grid_periodic))
info = '\n'.join(main_info)
if len(more_info)>1:
info = info+'\n'.join(more_info)
return info
# ==================================
# IMPORT (used by open_oceandataset)
# ==================================
def _shift_averages(self):
"""
Shift average variables to time_midp.
Average variables must have attribute original_output = 'average'.
"""
for var in self._ds.data_vars:
original_output = self._ds[var].attrs.pop('original_output', None)
if original_output == 'average':
self._ds[var] = self._ds[var].drop('time').isel(time=slice(1, None)).rename({'time': 'time_midp'})
if original_output is not None:
self._ds[var].attrs['original_output'] = original_output
return self
def _set_coords(self, fillna=False, coords1Dfrom2D=False, coords2Dfrom1D=False, coordsUVfromG=False):
"""
Set dataset coordinates: dimensions + 2D horizontal coordinates.
Parameters
----------
fillna: bool
If True, fill NaNs in 2D coordinates propagating backward and forward.
coords1Dfrom2D: bool
If True, compute 1D coordinates from 2D coordinates (means).
Use with rectilinear grid only!
coords2Dfrom1D: bool
If True, compute 2D coordinates from 1D coordinates (brodacast).
coordsUVfromCG: bool
If True, compute missing coords (U and V points) from G points.
"""
# Check parameters
if not isinstance(fillna, bool):
raise TypeError('`fillna` must be bool')
if not isinstance(coords1Dfrom2D, bool):
raise TypeError('`coords1Dfrom2D` must be bool')
if not isinstance(coordsUVfromG, bool):
raise TypeError('`coordsUVfromG` must be bool')
if coords1Dfrom2D and coords2Dfrom1D:
raise TypeError('`coords1Dfrom2D` and `coords2Dfrom1D` can not be both True')
# Copy because the dataset will change
self = _copy.copy(self)
# Coordinates are dimensions only
self._ds = self._ds.reset_coords()
# Fill nans (e.g., because of exch2)
if fillna:
coords = ['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV']
dims = ['X', 'Y', 'Xp1', 'Yp1', 'Xp1', 'Y', 'X', 'Yp1']
for i, (coord, dim) in enumerate(zip(coords, dims)):
if coord in self._ds.variables:
self._ds[coord] = self._ds[coord].ffill(dim).bfill(dim).persist()
# Get U and V by rolling G
if coordsUVfromG:
for i, (point_pos, dim2roll) in enumerate(zip(['U', 'V'], ['Yp1', 'Xp1'])):
for dim in ['Y', 'X']:
coord = self._ds[dim+'G'].rolling(**{dim2roll: 2}).mean().dropna(dim2roll)
coord = coord.drop(coord.coords).rename({dim2roll: dim2roll[0]})
self._ds[dim+point_pos] = coord
if 'units' in self._ds[dim+'G'].attrs:
self._ds[dim+point_pos].attrs['units'] = self._ds[dim+'G'].attrs['units']
# For cartesian grid we can use 1D coordinates
if coords1Dfrom2D:
# Take mean
self._ds['Y'] = self._ds['YC'].mean('X', keep_attrs=True).persist()
self._ds['X'] = self._ds['XC'].mean('Y', keep_attrs=True).persist()
self._ds['Yp1'] = self._ds['YG'].mean('Xp1', keep_attrs=True).persist()
self._ds['Xp1'] = self._ds['XG'].mean('Yp1', keep_attrs=True).persist()
# Get 2D coordinates broadcasting 1D
if coords2Dfrom1D:
# Broadcast
self._ds['YC'], self._ds['XC'] = _xr.broadcast(self._ds['Y'], self._ds['X'])
self._ds['YG'], self._ds['XG'] = _xr.broadcast(self._ds['Yp1'], self._ds['Xp1'])
self._ds['YU'], self._ds['XU'] = _xr.broadcast(self._ds['Y'], self._ds['Xp1'])
self._ds['YV'], self._ds['XV'] = _xr.broadcast(self._ds['Yp1'], self._ds['X'])
# Add units
for i, (D2, D1) in enumerate(zip(['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV'],
['Y', 'X', 'Yp1', 'Xp1', 'Y', 'Xp1', 'Yp1', 'X'])):
if 'units' in self._ds[D1].attrs: self._ds[D2].attrs['units'] = self._ds[D1].attrs['units']
# Set 2D coordinates
self._ds = self._ds.set_coords(['YC', 'XC',
'YG', 'XG',
'YU', 'XU',
'YV', 'XV'])
return self
def import_MITgcm_rect_nc(self, shift_averages = True):
"""
Set coordinates of a dataset from a MITgcm run with rectilinear grid and data stored in NetCDF format.
Open and concatentate dataset before running this function.
Parameters
----------
shift_averages: bool
If True, shift average variable to time_midp.
Average variables must have attribute original_output = 'average'
"""
# Check parameters
if not isinstance(shift_averages, bool):
raise TypeError('`shift_averages` must be bool')
# Shift averages
if shift_averages is True:
self = self._shift_averages()
# Set coordinates
self = self._set_coords(fillna=True, coords1Dfrom2D=True)
grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5},
'X' : {'X': None, 'Xp1': 0.5},
'Z' : {'Z': None, 'Zp1': 0.5, 'Zu': 0.5, 'Zl': -0.5},
'time' : {'time': -0.5}}
self = self.set_grid_coords(grid_coords = grid_coords, add_midp=True)
return self
def import_MITgcm_rect_bin(self, shift_averages = True):
"""
Set coordinates of a dataset from a MITgcm run with rectilinear grid and data stored in bin format.
Open and concatentate dataset before running this function.
Parameters
----------
shift_averages: bool
If True, shift average variable to time_midp.
Average variables must have attribute original_output = 'average'
"""
# Check parameters
if not isinstance(shift_averages, bool):
raise TypeError('`shift_averages` must be bool')
# Shift averages
if shift_averages is True:
self = self._shift_averages()
# Set coordinates
self = self._set_coords(coords2Dfrom1D=True)
grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5},
'X' : {'X': None, 'Xp1': 0.5},
'Z' : {'Z': None, 'Zp1': 0.5, 'Zu': 0.5, 'Zl': -0.5},
'time' : {'time': -0.5}}
self = self.set_grid_coords(grid_coords = grid_coords, add_midp=True)
return self
def import_MITgcm_curv_nc(self, shift_averages = True):
"""
Set coordinates of a dataset from a MITgcm run with curvilinear grid and data stored in NetCDF format.
Open and concatentate dataset before running this function.
Parameters
----------
shift_averages: bool
If True, shift average variable to time_midp.
Average variables must have attribute original_output = 'average'
"""
# Check parameters
if not isinstance(shift_averages, bool):
raise TypeError('`shift_averages` must be bool')
# Shift averages
if shift_averages is True:
self = self._shift_averages()
# Set coordinates
self = self._set_coords(coordsUVfromG=True)
grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5},
'X' : {'X': None, 'Xp1': 0.5},
'Z' : {'Z': None, 'Zp1': 0.5, 'Zu': 0.5, 'Zl': -0.5},
'time' : {'time': -0.5}}
self = self.set_grid_coords(grid_coords = grid_coords, add_midp=True)
return self
# ===========
# ATTRIBUTES
# ===========
# -------------------
# name
# -------------------
@property
def name(self):
"""
Name of the OceanDataset
"""
name = self._read_from_global_attr('name')
return name
@name.setter
def name(self, name):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('name'))
def set_name(self, name, overwrite=None):
"""
Set name of the OceanDataset.
Parameters
----------
name: str
Name of the OceanDataset
overwrite: bool or None
If None, raise error if name has been previously set.
If True, overwrite previous name.
If False, combine with previous name.
"""
# Check parameters
if not isinstance(name, str):
raise TypeError("`name` must be str")
# Set name
self = self._store_as_global_attr(name = 'name',
attr = name,
overwrite = overwrite)
return self
# -------------------
# description
# -------------------
@property
def description(self):
"""
Description of the OceanDataset
"""
description = self._read_from_global_attr('description')
return description
@description.setter
def description(self, description):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('description'))
def set_description(self, description, overwrite=None):
"""
Set description of the OceanDataset.
Parameters
----------
description: str
Desription of the OceanDataset
overwrite: bool or None
If None, raise error if description has been previously set.
If True, overwrite previous description.
If False, combine with previous description.
"""
# Check parameters
if not isinstance(description, str):
raise TypeError("`description` must be str")
# Set description
self = self._store_as_global_attr(name = 'description',
attr = description,
overwrite = overwrite)
return self
# -------------------
# dataset
# -------------------
@property
def dataset(self):
"""
xarray.Dataset: A multi-dimensional, in memory, array database.
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html
"""
# Show _ds with renamed variables.
dataset = self._ds.copy()
if self.aliases:
aliases = {ospy: custom for ospy, custom in self.aliases.items()
if ospy in self._ds
or ospy in self._ds.dims}
dataset = dataset.rename(aliases)
return dataset
@dataset.setter
def dataset(self, dataset):
"""
Inhibit setter
"""
raise AttributeError("Set a new dataset using `oceanspy.OceanDataset(dataset)`")
# -------------------
# aliases
# -------------------
@property
def aliases(self):
"""
A dictionary to connect custom variable names to OceanSpy reference names.
Keys are OceanSpy names, values are custom names: {'ospy_name': 'custom_name'}
"""
aliases = self._read_from_global_attr('aliases')
return aliases
@property
def _aliases_flipped(self):
"""
Flip aliases: Keys are values names, values are ospy_name names: {'ospy_name': 'custom_name'}
"""
if self.aliases:
aliases_flipped = {custom: ospy for ospy, custom in self.aliases.items()}
else: return self.aliases
return aliases_flipped
@aliases.setter
def aliases(self, aliases):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('aliases'))
def set_aliases(self, aliases, overwrite=None):
"""
Set aliases to connect custom variables names to OceanSpy reference names.
Parameters
----------
aliases: dict
Keys are OceanSpy names, values are custom names: {'ospy_name': 'custom_name'}
overwrite: bool or None
If None, raise error if aliases has been previously set.
If True, overwrite previous aliases.
If False, combine with previous aliases.
"""
# Check parameters
if not isinstance(aliases, dict):
raise TypeError("`aliases` must be dict")
# Set aliases
self = self._store_as_global_attr(name = 'aliases',
attr = aliases,
overwrite = overwrite)
# Apply aliases
self = self._apply_aliases()
return self
def _apply_aliases(self):
"""
Check if there are variables with custom name in _ds, and rename to ospy name
"""
if self._aliases_flipped:
aliases = {custom: ospy for custom, ospy in self._aliases_flipped.items()
if custom in self._ds.variables
or custom in self._ds.dims}
self._ds = self._ds.rename(aliases)
return self
# -------------------
# parameters
# -------------------
@property
def parameters(self):
"""
A dictionary defining model parameters that are used by OceanSpy.
{'parameter_name': parameter value}
If a parameter is not available, use default.
"""
from oceanspy import DEFAULT_PARAMETERS
parameters = self._read_from_global_attr('parameters')
if parameters is None:
parameters = DEFAULT_PARAMETERS
else:
parameters = {**DEFAULT_PARAMETERS, **parameters}
return parameters
@parameters.setter
def parameters(self, parameters):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('parameters'))
def set_parameters(self, parameters):
"""
Set model parameters used by OceanSpy (see oceanspy.DEFAULT_PARAMETERS)
Parameters
----------
parameters: dict
{'parameter_name': parameter_value}
"""
from oceanspy import DEFAULT_PARAMETERS, AVAILABLE_PARAMETERS, TYPE_PARAMETERS
# Check parameters
if not isinstance(parameters, dict):
raise TypeError("`parameters` must be dict")
# Check parameters
warn_params = []
for key, value in parameters.items():
if key not in DEFAULT_PARAMETERS.keys(): warn_params = warn_params + [key]
else:
if not isinstance(value, TYPE_PARAMETERS[key]):
raise TypeError("Invalid [{}]. Check oceanspy.TYPE_PARAMETERS".format(key))
if key in AVAILABLE_PARAMETERS.keys() and value not in AVAILABLE_PARAMETERS[key]:
raise ValueError("Requested [{}] not available. Check oceanspy.AVAILABLE_PARAMETERS".format(key))
if len(warn_params)!=0:
_warnings.warn(("{} are not OceanSpy parameters").format(warn_params), stacklevel=2)
# Set parameters
self = self._store_as_global_attr(name = 'parameters',
attr = parameters,
overwrite = True)
return self
# -------------------
# grid_coords
# -------------------
@property
def grid_coords(self):
"""
Grid coordinates used by xgcm.Grid
References
----------
https://xgcm.readthedocs.io/en/stable/grids.html#Grid-Metadata
"""
grid_coords = self._read_from_global_attr('grid_coords')
return grid_coords
@grid_coords.setter
def grid_coords(self, grid_coords):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('grid_coords'))
def set_grid_coords(self, grid_coords, add_midp=False, overwrite=None):
"""
Set grid coordinates used by xgcm.Grid (see oceanspy.OCEANSPY_AXES).
Parameters
----------
grid_coords: str
Grid coordinates used by xgcm.Grid.
Keys are axis, and values are dict with key=dim and value=c_grid_axis_shift.
Available c_grid_axis_shift are {0.5, None, -0.5}
add_midp: bool
If true, add inner dimension (mid points) to axis with outer dimension only.
The new dimension will be called as the outer dimension + '_midp'
overwrite: bool or None
If None, raise error if grid_coords has been previously set.
If True, overwrite previous grid_coors.
If False, combine with previous grid_coors.
References
----------
https://xgcm.readthedocs.io/en/stable/grids.html#Grid-Metadata
"""
# Check parameters
if not isinstance(grid_coords, dict):
raise TypeError("`grid_coords` must be dict")
if not isinstance(add_midp, (bool, type(None))):
raise TypeError("`add_midp` must be bool")
# Check axes
_check_oceanspy_axes(list(grid_coords.keys()))
# Check shifts
list_shift = [0.5, None, -0.5]
for axis in grid_coords:
if grid_coords[axis] is None: continue
elif not isinstance(grid_coords[axis], dict):
example_grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5}}
raise TypeError("Invalid grid_coords. grid_coords example: {}".format(example_grid_coords))
else:
for dim in grid_coords[axis]:
if grid_coords[axis][dim] not in list_shift:
raise ValueError("[{}] not a valid c_grid_axis_shift."
" Available options are {}".format(grid_coords[axis][dim],
list_shift))
# Set grid_coords
self = self._store_as_global_attr(name = 'grid_coords',
attr = grid_coords,
overwrite = overwrite)
if add_midp:
grid_coords = {}
for axis in self.grid_coords:
if len(self.grid_coords[axis])==1 and list(self.grid_coords[axis].values())[0] is not None:
# Deal with aliases
dim = list(self.grid_coords[axis].keys())[0]
if self._aliases_flipped and dim in self._aliases_flipped:
_dim = self._aliases_flipped[dim]
self = self.set_aliases({_dim+'_midp': dim+'_midp'}, overwrite=False)
else: _dim = dim
# Midpoints are averages of outpoints
midp = (self._ds[_dim].values[:-1]+self._ds[_dim].diff(_dim)/2).rename({_dim: _dim+'_midp'})
self._ds[_dim+'_midp'] = _xr.DataArray(midp,
dims=(_dim+'_midp'))
if 'units' in self._ds[_dim].attrs:
self._ds[_dim+'_midp'].attrs['units'] = self._ds[_dim].attrs['units']
# Update grid_coords
grid_coords[axis] = {**self.grid_coords[axis], dim+'_midp': None}
self = self._store_as_global_attr(name = 'grid_coords',
attr = grid_coords,
overwrite = False)
return self
# -------------------
# grid_periodic
# -------------------
@property
def grid_periodic(self):
"""
List of xgcm.Grid axes that are periodic
"""
grid_periodic = self._read_from_global_attr('grid_periodic')
if not grid_periodic:
grid_periodic = []
return grid_periodic
@grid_periodic.setter
def grid_periodic(self, grid_periodic):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('grid_periodic'))
def set_grid_periodic(self, grid_periodic, overwrite=None):
"""
Set grid axes that need to be treated as periodic by xgcm.Grid.
Axis that are not set periodic are non-periodic by default.
Note that this is opposite than xgcm, which sets periodic=True by default.
Parameters
----------
grid_periodic: list
List of periodic axes.
Available axis are {'X', 'Y', 'Z', 'time'}.
overwrite: bool or None
If None, raise error if grid_periodic has been previously set.
If True, overwrite previous grid_periodic.
If False, combine with previous grid_periodic.
"""
# Check parameters
if not isinstance(grid_periodic, list):
raise TypeError("`grid_periodic` must be list")
# Check axes
_check_oceanspy_axes(grid_periodic)
# Set grid_periodic
self = self._store_as_global_attr(name = 'grid_periodic',
attr = grid_periodic,
overwrite = overwrite)
return self
# -------------------
# grid
# -------------------
@property
def grid(self):
"""
xgcm.Grid: A collection of axis, which is a group of coordinates that all lie along the same physical dimension but describe different positions relative to a grid cell.
References
----------
https://xgcm.readthedocs.io/en/stable/api.html#Grid
"""
dataset = self.dataset.copy()
coords = self.grid_coords
periodic = self.grid_periodic
grid = _create_grid(dataset, coords, periodic)
return grid
@property
def _grid(self):
"""
xgcm.Grid using aliases
"""
aliases = self.aliases
coords = self.grid_coords
if aliases and coords:
# Flip aliases
aliases = {custom: ospy for ospy, custom in aliases.items()}
# Rename coords
for axis in coords:
for dim in coords[axis]:
if dim in aliases:
coords[axis][aliases[dim]] = coords[axis].pop(dim)
dataset = self._ds.copy()
periodic = self.grid_periodic
grid = _create_grid(dataset, coords, periodic)
return grid
@grid.setter
def grid(self, grid):
"""
Inhibit setter
"""
raise AttributeError("Set a new grid using .set_grid_coords and .set_periodic")
@_grid.setter
def _grid(self, grid):
"""
Inhibit setter
"""
raise AttributeError("Set a new _grid using .set_grid_coords and .set_periodic")
# -------------------
# projection
# -------------------
@property
def projection(self):
"""
Projection of the OceanDataset.
"""
projection = self._read_from_global_attr('projection')
if projection:
if projection=='None':
projection = eval(projection)
else:
if 'cartopy' not in _sys.modules:
_warnings.warn(("cartopy is not available, so projection is None").format(da.name), stacklevel=2)
projection = None
else:
projection = eval('_ccrs.{}'.format(projection))
return projection
@projection.setter
def projection(self, projection):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('projection'))
def set_projection(self, projection, **kwargs):
"""
Projection of the OceanDataset.
Parameters
----------
projection: str
cartopy projection of the OceanDataset
**kwargs:
Keyword arguments used by cartopy
E.g., central_longitude=0.0 for PlateCarree
References
----------
https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
"""
# Check parameters
if not isinstance(projection, (type(None), str)):
raise TypeError("`projection` must be str or None")
if projection is not None:
if not hasattr(_ccrs, projection):
raise TypeError("{} is not a cartopy projection".format(projection))
projection = '{}(**{})'.format(projection, kwargs)
else:
projection = str(projection)
# Set projection
self = self._store_as_global_attr(name = 'projection',
attr = projection,
overwrite = True)
return self
# ===========
# METHODS
# ===========
def create_tree(self, grid_pos = 'C'):
"""
Create a scipy.spatial.cKDTree for quick nearest-neighbor lookup.
Parameters
-----------
grid_pos: str
Grid position. Option: {'C', 'G', 'U', 'V'}
Reference grid: https://mitgcm.readthedocs.io/en/latest/algorithm/horiz-grid.html
Returns
-------
tree: scipy.spatial.cKDTree
Return tree that can be used to query a point.
References
----------
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html
"""
if 'scipy' not in _sys.modules:
raise ImportError("cKDTree can not be created because scipy is not installed")
# Check parameters
if not isinstance(grid_pos, str):
raise TypeError('`grid_pos` must be str')
grid_pos_list = ['C', 'G', 'U', 'V']
if grid_pos not in grid_pos_list:
raise ValueError(("`grid_pos` must be on of {}:"
"\nhttps://mitgcm.readthedocs.io/en/latest/algorithm/horiz-grid.html").format(grid_pos_list))
# Convert if is not cartesian
Y = self._ds['Y'+grid_pos]
X = self._ds['X'+grid_pos]
R = self.parameters['rSphere']
if R: x, y, z = _utils.spherical2cartesian(Y = Y, X = X, R = R)
else: x = X; y = Y; z = _xr.zeros_like(Y)
# Stack
x_stack = x.stack(points=x.dims).values
y_stack = y.stack(points=y.dims).values
z_stack = z.stack(points=z.dims).values
# Construct KD-tree
tree = _spatial.cKDTree(_np.column_stack((x_stack, y_stack, z_stack)))
return tree
def merge_into_oceandataset(self, obj, overwrite=False):
"""
Merge a dataset or DataArray into the oceandataset
Parameters
----------
obj: xarray.DataArray or xarray.Dataset
xarray object to merge
overwrite: bool or None
If True, overwrite existing DataArrays with same name.
If False, use xarray.merge
"""
# Check and make dataset
if not isinstance(obj, (_xr.DataArray, _xr.Dataset)):
raise TypeError('`obj` must be xarray.DataArray or xarray.Dataset')
obj = obj.drop(obj.coords)
if isinstance(obj, _xr.DataArray):
if obj.name is None:
raise ValueError("xarray.DataArray doesn't have a name. Set it using da.rename()")
else:
obj = obj.to_dataset()
if not isinstance(overwrite, bool):
raise TypeError("`overwrite` must be bool")
# Merge
dataset = self.dataset
var2drop = [var for var in obj.variables if var in dataset]
if overwrite is False:
obj = obj.drop(var2drop)
if len(var2drop)!=0: _warnings.warn('{} will not be merged.'
'\nSet `overwrite=True` if you wish otherwise.'.format(var2drop), stacklevel=2)
else:
if len(var2drop)!=0: _warnings.warn('{} will be overwritten.'.format(var2drop), stacklevel=2)
for var in obj.data_vars:
dataset[var] = obj[var]
return OceanDataset(dataset)
def set_coords(self, fillna=False, coords1Dfrom2D=False, coords2Dfrom1D=False, coordsUVfromG=False):
"""
Set dataset coordinates: dimensions + 2D horizontal coordinates.
Parameters
----------
fillna: bool
If True, fill NaNs in 2D coordinates propagating backward and forward.
coords1Dfrom2D: bool
If True, compute 1D coordinates from 2D coordinates (means).
Use with rectilinear grid only!
coords2Dfrom1D: bool
If True, compute 2D coordinates from 1D coordinates (brodacast).
coordsUVfromCG: bool
If True, compute missing coords (U and V points) from G points.
"""
# Check parameters
if not isinstance(fillna, bool):
raise TypeError('`fillna` must be bool')
if not isinstance(coords1Dfrom2D, bool):
raise TypeError('`coords1Dfrom2D` must be bool')
if not isinstance(coordsUVfromG, bool):
raise TypeError('`coordsUVfromG` must be bool')
if coords1Dfrom2D and coords2Dfrom1D:
raise TypeError('`coords1Dfrom2D` and `coords2Dfrom1D` can not be both True')
# Copy because the dataset will change
self = _copy.copy(self)
# Coordinates are dimensions only
self._ds = self._ds.reset_coords()
# Fill nans (e.g., because of exch2)
if fillna:
coords = ['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV']
dims = ['X', 'Y', 'Xp1', 'Yp1', 'Xp1', 'Y', 'X', 'Yp1']
for i, (coord, dim) in enumerate(zip(coords, dims)):
if coord in self._ds.variables:
self._ds[coord] = self._ds[coord].ffill(dim).bfill(dim).persist()
# Get U and V by rolling G
if coordsUVfromG:
for i, (point_pos, dim2roll) in enumerate(zip(['U', 'V'], ['Yp1', 'Xp1'])):
for dim in ['Y', 'X']:
coord = self._ds[dim+'G'].rolling(**{dim2roll: 2}).mean().dropna(dim2roll)
coord = coord.drop(coord.coords).rename({dim2roll: dim2roll[0]})
self._ds[dim+point_pos] = coord
if 'units' in self._ds[dim+'G'].attrs:
self._ds[dim+point_pos].attrs['units'] = self._ds[dim+'G'].attrs['units']
# For cartesian grid we can use 1D coordinates
if coords1Dfrom2D:
# Take mean
self._ds['Y'] = self._ds['YC'].mean('X', keep_attrs=True).persist()
self._ds['X'] = self._ds['XC'].mean('Y', keep_attrs=True).persist()
self._ds['Yp1'] = self._ds['YG'].mean('Xp1', keep_attrs=True).persist()
self._ds['Xp1'] = self._ds['XG'].mean('Yp1', keep_attrs=True).persist()
# Get 2D coordinates broadcasting 1D
if coords2Dfrom1D:
# Broadcast
self._ds['YC'], self._ds['XC'] = _xr.broadcast(self._ds['Y'], self._ds['X'])
self._ds['YG'], self._ds['XG'] = _xr.broadcast(self._ds['Yp1'], self._ds['Xp1'])
self._ds['YU'], self._ds['XU'] = _xr.broadcast(self._ds['Y'], self._ds['Xp1'])
self._ds['YV'], self._ds['XV'] = _xr.broadcast(self._ds['Yp1'], self._ds['X'])
# Add units
for i, (D2, D1) in enumerate(zip(['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV'],
['Y', 'X', 'Yp1', 'Xp1', 'Y', 'Xp1', 'Yp1', 'X'])):
if 'units' in self._ds[D1].attrs: self._ds[D2].attrs['units'] = self._ds[D1].attrs['units']
# Set 2D coordinates
self._ds = self._ds.set_coords(['YC', 'XC',
'YG', 'XG',
'YU', 'XU',
'YV', 'XV'])
return self
def to_netcdf(self, path, **kwargs):
"""
Write dataset contents to a netCDF file.
Parameters
----------
path: str
Path to which to save this dataset.
**kwargs:
Keyword arguments for xarray.DataSet.to_netcdf()
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html
"""
# Check parameters
if not isinstance(path, str):
raise TypeError('`path` must be str')
# to_netcdf doesn't like coordinates attribute
dataset = self.dataset
for var in dataset.variables:
attrs = dataset[var].attrs
coordinates = attrs.pop('coordinates', None)
dataset[var].attrs = attrs
if coordinates is not None: dataset[var].attrs['_coordinates'] = coordinates
compute = kwargs.pop('compute', None)
print('Writing dataset to [{}].'.format(path))
if compute is None or compute is False:
delayed_obj = dataset.to_netcdf(path, compute=False, **kwargs)
with _ProgressBar():
results = delayed_obj.compute()
else:
dataset.to_netcdf(path, compute=compute, **kwargs)
def _store_as_global_attr(self, name, attr, overwrite):
"""
Store an OceanSpy attribute as dataset global attribute.
Parameters
----------
name: str
Name of the attribute. Attribute will be stored as 'OceanSpy_'+name.
attr: str or dict
Attribute to store
overwrite: bool or None
If None, raise error if attr has been previously set.
If True, overwrite previous attributes.
If False, combine with previous attributes.
"""
# Check parameters
if not isinstance(name, str):
raise TypeError("`name` must be str")
if not isinstance(attr, (str, dict, list)):
raise TypeError("`attr` must be str, dict, or list")
if not isinstance(overwrite, (bool, type(None))):
raise TypeError("`overwrite` must be bool or None")
# Attribute name
name = 'OceanSpy_'+name
if overwrite is None and name in self._ds.attrs:
raise ValueError("[{}] has been previously set: "
"`overwrite` must be bool".format(name.replace("OceanSpy_", "")))
# Copy because attributes are added to _ds
self = _copy.copy(self)
# Store
if not overwrite and name in self._ds.attrs:
prev_attr = self._ds.attrs[name]
if prev_attr[0] == "{" and prev_attr[-1] == "}":
attr = {**eval(prev_attr), **attr}
elif prev_attr[0] == "[" and prev_attr[-1] == "]":
attr = list(set(eval(prev_attr) + attr))
else:
attr = prev_attr + '_' + attr
self._ds.attrs[name] = str(attr)
return self
def _read_from_global_attr(self, name):
"""
Read an OceanSpy attribute stored as dataset global attribute.
Parameters
----------
name: str
Name of the attribute. Attribute will be read from 'OceanSpy_'+name.
Returns
-------
attr: str or dict
Attribute that has been read
"""
if not isinstance(name, str):
raise TypeError("`name` must be str")
# Attribute name
name = 'OceanSpy_'+name
# Check if attributes exists
if name not in self._ds.attrs:
return None
# Read attribute
attr = self._ds.attrs[name]
if (attr[0]=='{' and attr[-1]=='}') or (attr[0]=='[' and attr[-1]==']'):
attr = eval(attr)
return attr
# ===========
# SHORTCUTS
# ===========
@property
def subsample(self):
"""
Access subsampling functions.
Examples
--------
>>> od = ospy.open_oceandataset.get_started()
>>> od.subsample.cutout(ZRange=[0, -100], varList=['Temp'])
"""
return _subsampleMethdos(self)
@property
def compute(self):
"""
Access computing functions, and merge the computed dataset into the oceandataset.
Set overwrite=True to overwrite DataArrays already existing in the oceandataset.
Examples
--------
>>> od = ospy.open_oceandataset.get_started()
>>> od.compute.gradient(varNameList='Temp', overwrite=True)
"""
return _computeMethdos(self)
@property
def plot(self):
"""
Access plotting functions.
Examples
--------
>>> od = ospy.open_oceandataset.get_started()
>>> od.plot.TS_diagram(meanAxes=['time', 'Z'], cutout_kwargs={'ZRange': [0, -100]})
"""
return _plotMethdos(self)
@property
def animate(self):
"""
Access animating functions.
Examples
--------
>>> od = ospy.open_oceandataset.get_started()
>>> od.animate.TS_diagram(meanAxes=['time', 'Z'], cutout_kwargs={'ZRange': [0, -100]})
"""
return _animateMethdos(self)
# ERROR HANDLING
def _check_oceanspy_axes(axes2check):
"""
Check axes
"""
from oceanspy import OCEANSPY_AXES
for axis in axes2check:
if axis not in OCEANSPY_AXES:
raise ValueError(_wrong_axes_error_message(axes2check))
def _wrong_axes_error_message(axes2check):
from oceanspy import OCEANSPY_AXES
return ("{} contains non-valid axes."
" OceanSpy axes are: {}").format(axes2check, OCEANSPY_AXES)
def _setter_error_message(attribute_name):
"""
Use the same error message for attributes
"""
return "Set new `{}` using .set_{}".format(attribute_name, attribute_name)
# USEFUL FUNCTIONS
def _create_grid(dataset, coords, periodic):
# Clean up comodo (currently force user to specify axis using set_coords).
for dim in dataset.dims:
dataset[dim].attrs.pop('axis', None)
dataset[dim].attrs.pop('c_grid_axis_shift', None)
# Add comodo attributes.
# We won't need this step in the future because future versions of xgcm will allow to pass coords in Grid.
warn_dims = []
if coords:
for axis in coords:
for dim in coords[axis]:
shift = coords[axis][dim]
if dim in dataset.dims:
dataset[dim].attrs['axis'] = axis
if shift:
dataset[dim].attrs['c_grid_axis_shift'] = str(shift)
else:
warn_dims = warn_dims + [dim]
if len(warn_dims)!=0:
_warnings.warn('{} are not dimensions of the dataset and will be omitted'.format(warn_dims), stacklevel=2)
# Create grid
grid = _xgcm.Grid(dataset, periodic = periodic)
if len(grid.axes)==0:
grid = None
return grid
| [
"oceanspy.DEFAULT_PARAMETERS.keys",
"xarray.broadcast",
"numpy.column_stack",
"oceanspy.AVAILABLE_PARAMETERS.keys",
"xarray.zeros_like",
"xgcm.Grid",
"xarray.DataArray",
"dask.diagnostics.ProgressBar",
"copy.copy"
] | [((45381, 45419), 'xgcm.Grid', '_xgcm.Grid', (['dataset'], {'periodic': 'periodic'}), '(dataset, periodic=periodic)\n', (45391, 45419), True, 'import xgcm as _xgcm\n'), ((6083, 6099), 'copy.copy', '_copy.copy', (['self'], {}), '(self)\n', (6093, 6099), True, 'import copy as _copy\n'), ((35361, 35377), 'copy.copy', '_copy.copy', (['self'], {}), '(self)\n', (35371, 35377), True, 'import copy as _copy\n'), ((40658, 40674), 'copy.copy', '_copy.copy', (['self'], {}), '(self)\n', (40668, 40674), True, 'import copy as _copy\n'), ((7813, 7856), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Y']", "self._ds['X']"], {}), "(self._ds['Y'], self._ds['X'])\n", (7826, 7856), True, 'import xarray as _xr\n'), ((7904, 7951), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Yp1']", "self._ds['Xp1']"], {}), "(self._ds['Yp1'], self._ds['Xp1'])\n", (7917, 7951), True, 'import xarray as _xr\n'), ((7997, 8042), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Y']", "self._ds['Xp1']"], {}), "(self._ds['Y'], self._ds['Xp1'])\n", (8010, 8042), True, 'import xarray as _xr\n'), ((8090, 8135), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Yp1']", "self._ds['X']"], {}), "(self._ds['Yp1'], self._ds['X'])\n", (8103, 8135), True, 'import xarray as _xr\n'), ((32114, 32131), 'xarray.zeros_like', '_xr.zeros_like', (['Y'], {}), '(Y)\n', (32128, 32131), True, 'import xarray as _xr\n'), ((32362, 32407), 'numpy.column_stack', '_np.column_stack', (['(x_stack, y_stack, z_stack)'], {}), '((x_stack, y_stack, z_stack))\n', (32378, 32407), True, 'import numpy as _np\n'), ((37116, 37159), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Y']", "self._ds['X']"], {}), "(self._ds['Y'], self._ds['X'])\n", (37129, 37159), True, 'import xarray as _xr\n'), ((37207, 37254), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Yp1']", "self._ds['Xp1']"], {}), "(self._ds['Yp1'], self._ds['Xp1'])\n", (37220, 37254), True, 'import xarray as _xr\n'), ((37300, 37345), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Y']", "self._ds['Xp1']"], {}), "(self._ds['Y'], self._ds['Xp1'])\n", (37313, 37345), True, 'import xarray as _xr\n'), ((37393, 37438), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Yp1']", "self._ds['X']"], {}), "(self._ds['Yp1'], self._ds['X'])\n", (37406, 37438), True, 'import xarray as _xr\n'), ((19684, 19709), 'oceanspy.DEFAULT_PARAMETERS.keys', 'DEFAULT_PARAMETERS.keys', ([], {}), '()\n', (19707, 19709), False, 'from oceanspy import DEFAULT_PARAMETERS, AVAILABLE_PARAMETERS, TYPE_PARAMETERS\n'), ((39217, 39231), 'dask.diagnostics.ProgressBar', '_ProgressBar', ([], {}), '()\n', (39229, 39231), True, 'from dask.diagnostics import ProgressBar as _ProgressBar\n'), ((24377, 24417), 'xarray.DataArray', '_xr.DataArray', (['midp'], {'dims': "(_dim + '_midp')"}), "(midp, dims=_dim + '_midp')\n", (24390, 24417), True, 'import xarray as _xr\n'), ((19949, 19976), 'oceanspy.AVAILABLE_PARAMETERS.keys', 'AVAILABLE_PARAMETERS.keys', ([], {}), '()\n', (19974, 19976), False, 'from oceanspy import DEFAULT_PARAMETERS, AVAILABLE_PARAMETERS, TYPE_PARAMETERS\n')] |
import pandas as pd
import numpy as np
import aif360.datasets
# generators for biased data by models
def feature_bias(rho_a, rho_z, N, d, mu):
'''
Bias that occurs when different protected attributes have different means (mu)
Parameters
-----------
rho_a : float
p(a = 1)
rho_z : float
p(z = 1)
N : int
number of samples
mu : matrix like, 2xD
mu[0] is the mean for a=0, mu[0][0] is the mean for a=0, z=0,
D = len(mu[0][0]) = number of features
d : int
total number of features
Returns
--------
df : DataFrame
a data frame with N rows and columns: a,y,z, x0:xD
'''
# portion of disadvantaged group
p_a = [1-rho_a, rho_a]
# portion of allocation of target variable
p_z = [1-rho_z, rho_z]
cov = np.eye(d)
a = np.random.choice([0,1], p=p_a, size=N)
z = np.random.choice([0,1], p=p_z, size=N)
y = z
x = [np.random.multivariate_normal(mu[a_i][z_i],cov) for a_i, z_i in zip(a,z)]
x = np.asarray(x)
# concatenate the data and p
data = np.concatenate([np.asarray([a,z,y]).T,x],axis=1)
labels =['a','z','y']
labels.extend(['x'+str(i) for i in range(d)])
df = pd.DataFrame(data=data, columns = labels)
return df
def subspace_bias(rho_a, rho_z, N, d, d_shared, mu):
'''
Bias that occurs when different features are informative for different protected classes
(d not shared)
Parameters
-----------
rho_a : float
p(a = 1)
rho_z : float
p(z=1)
N : int
number of samples
mu : matrix like, 2xD
mu[0] is the mean for z=0, D = len(mu[0]) = number of features
d : int
total number of features
d_shared : int
number of shared features
Returns
--------
df : DataFrame
a data frame with N rows and columns: a,y,z, x0:xD
'''
p_a = [1-rho_a, rho_a]
p_z = [1-rho_z, rho_z]
cov = np.eye(d)
d_noise = d-d_shared # noise dims per row
d_total = d + d_noise # total dims
a = np.random.choice([0,1], p=p_a, size=N)
z = np.random.choice([0,1], p=p_z, size=N)
y = z
labels_protected = np.asarray([a,z,y]).T
x_z = [np.random.multivariate_normal(mu[z_i],cov) for z_i in z]
x_n = np.random.multivariate_normal([0]*d_noise,np.eye(d_noise),N)
# functions for combining noise and true vectors
x_a = {0: lambda x,n: np.concatenate((x,n)),
1: lambda x,n: np.concatenate((n, x[d_shared-1:d], x[:d_noise]))}
x = [x_a[a](x_zi,x_ni) for a,x_zi,x_ni in zip(a,x_z,x_n)]
x = np.asarray(x)
# concatenate the data and p
data = np.concatenate([labels_protected,x],axis=1)
labels =['a','z','y']
labels.extend(['x'+str(i) for i in range(d_total)])
df = pd.DataFrame(data=data, columns = labels)
return df
def label_bias(rho_a, rho_z, beta, N, d, mu):
'''
Bias where the labeling errors are correlated with the protected attribute
Parameters
-----------
rho_a : float
p(a = 1)
rho_z : float
p(z=1)
beta : float
error rate in y, p(y=z) = 1-beta
N : int
number of samples
d : int
number of features
mu : matrix like, 2xD
mu[0] is the mean for z=0, D = len(mu[0]) = number of features
Returns
--------
df : DataFrame
a data frame with N rows and columns: a,y,z, x0:xD
'''
p_a = [1-rho_a, rho_a]
p_z = [1-rho_z, rho_z]
cov = np.eye(d)
a = np.random.choice([0,1], p=p_a, size=N)
z = np.random.choice([0,1], p=p_z, size=N)
x = [np.random.multivariate_normal(mu[z_i],cov) for z_i in z]
y = [np.random.choice([zi,1-zi],p=[1-beta[ai], beta[ai]]) for ai,zi in zip(a,z)]
# convert to numpy arrays and reshape
labels_protected = np.asarray([a,z,y]).T
x = np.asarray(x)
# concatenate the data and p
data = np.concatenate([labels_protected,x],axis=1)
labels =['a','z','y']
labels.extend(['x'+str(i) for i in range(len(mu[0]))])
df = pd.DataFrame(data=data, columns = labels)
return df
def convert_to_dataset(df, label_names, protected_attribute_names):
'''
Converts a dataframe created by one of the above functions into a dataset usable in IBM 360 package
Parameters
-----------
df : pandas dataframe
label_names : optional, a list of strings describing each label
protected_attribute_names : optional, a list of strings describing features corresponding to
protected attributes
Returns
--------
aif360.datasets.BinaryLabelDataset
'''
return aif360.datasets.BinaryLabelDataset(1.0, 0.0, df = df, label_names = label_names, protected_attribute_names = protected_attribute_names)
| [
"numpy.eye",
"numpy.random.choice",
"numpy.random.multivariate_normal",
"numpy.asarray",
"numpy.concatenate",
"pandas.DataFrame"
] | [((825, 834), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (831, 834), True, 'import numpy as np\n'), ((844, 883), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'p_a', 'size': 'N'}), '([0, 1], p=p_a, size=N)\n', (860, 883), True, 'import numpy as np\n'), ((891, 930), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'p_z', 'size': 'N'}), '([0, 1], p=p_z, size=N)\n', (907, 930), True, 'import numpy as np\n'), ((1032, 1045), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1042, 1045), True, 'import numpy as np\n'), ((1225, 1264), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'labels'}), '(data=data, columns=labels)\n', (1237, 1264), True, 'import pandas as pd\n'), ((1972, 1981), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1978, 1981), True, 'import numpy as np\n'), ((2077, 2116), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'p_a', 'size': 'N'}), '([0, 1], p=p_a, size=N)\n', (2093, 2116), True, 'import numpy as np\n'), ((2124, 2163), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'p_z', 'size': 'N'}), '([0, 1], p=p_z, size=N)\n', (2140, 2163), True, 'import numpy as np\n'), ((2610, 2623), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2620, 2623), True, 'import numpy as np\n'), ((2668, 2713), 'numpy.concatenate', 'np.concatenate', (['[labels_protected, x]'], {'axis': '(1)'}), '([labels_protected, x], axis=1)\n', (2682, 2713), True, 'import numpy as np\n'), ((2804, 2843), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'labels'}), '(data=data, columns=labels)\n', (2816, 2843), True, 'import pandas as pd\n'), ((3509, 3518), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3515, 3518), True, 'import numpy as np\n'), ((3528, 3567), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'p_a', 'size': 'N'}), '([0, 1], p=p_a, size=N)\n', (3544, 3567), True, 'import numpy as np\n'), ((3575, 3614), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'p_z', 'size': 'N'}), '([0, 1], p=p_z, size=N)\n', (3591, 3614), True, 'import numpy as np\n'), ((3860, 3873), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3870, 3873), True, 'import numpy as np\n'), ((3918, 3963), 'numpy.concatenate', 'np.concatenate', (['[labels_protected, x]'], {'axis': '(1)'}), '([labels_protected, x], axis=1)\n', (3932, 3963), True, 'import numpy as np\n'), ((4056, 4095), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'labels'}), '(data=data, columns=labels)\n', (4068, 4095), True, 'import pandas as pd\n'), ((949, 997), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu[a_i][z_i]', 'cov'], {}), '(mu[a_i][z_i], cov)\n', (978, 997), True, 'import numpy as np\n'), ((2196, 2217), 'numpy.asarray', 'np.asarray', (['[a, z, y]'], {}), '([a, z, y])\n', (2206, 2217), True, 'import numpy as np\n'), ((2229, 2272), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu[z_i]', 'cov'], {}), '(mu[z_i], cov)\n', (2258, 2272), True, 'import numpy as np\n'), ((2338, 2353), 'numpy.eye', 'np.eye', (['d_noise'], {}), '(d_noise)\n', (2344, 2353), True, 'import numpy as np\n'), ((3623, 3666), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu[z_i]', 'cov'], {}), '(mu[z_i], cov)\n', (3652, 3666), True, 'import numpy as np\n'), ((3689, 3747), 'numpy.random.choice', 'np.random.choice', (['[zi, 1 - zi]'], {'p': '[1 - beta[ai], beta[ai]]'}), '([zi, 1 - zi], p=[1 - beta[ai], beta[ai]])\n', (3705, 3747), True, 'import numpy as np\n'), ((3830, 3851), 'numpy.asarray', 'np.asarray', (['[a, z, y]'], {}), '([a, z, y])\n', (3840, 3851), True, 'import numpy as np\n'), ((2440, 2462), 'numpy.concatenate', 'np.concatenate', (['(x, n)'], {}), '((x, n))\n', (2454, 2462), True, 'import numpy as np\n'), ((2488, 2539), 'numpy.concatenate', 'np.concatenate', (['(n, x[d_shared - 1:d], x[:d_noise])'], {}), '((n, x[d_shared - 1:d], x[:d_noise]))\n', (2502, 2539), True, 'import numpy as np\n'), ((1106, 1127), 'numpy.asarray', 'np.asarray', (['[a, z, y]'], {}), '([a, z, y])\n', (1116, 1127), True, 'import numpy as np\n')] |
"""
2015-2016 <NAME> <EMAIL>
"""
import numpy as np
import scipy as sp
from scipy import sparse
from lowrank_matrix import ManifoldElement
from approximator_api import AbstractApproximator
from manifold_functions import TangentVector, svd_retraction
from manifold_functions import riemannian_grad_partial, delta_on_sigma_set
from scipy.sparse import linalg, csc_matrix
EPS = 1e-9
def closed_form_initial_guess(vec, delta, sigma_set):
n_mat = csc_matrix(vec.release().evaluate(sigma_set)).T
trace_first = n_mat.multiply(delta.T).sum()
trace_second = n_mat.multiply(n_mat).sum()
return np.abs(trace_first / trace_second)
class CGApproximator(AbstractApproximator):
def __init__(self):
AbstractApproximator.__init__(self)
self.target_matrix = None
self.density = None
self.norm_bound = None
self.sigma_set = None
self.x_prev, self.x = None, None
self.grad_prev, self.grad = None, None
self.conj_prev, self.conj = None, None
self.delta = None
self.grad_partial = None
def approximate(self, a, r, sigma_set=None, x0=None, maxiter=900, eps=EPS):
return self._approximate(a, r, sigma_set=sigma_set, x0=x0, maxiter=maxiter, eps=eps)
def _approximate(self, a, r, sigma_set=None, x0=None, maxiter=900, eps=EPS):
if a is None:
raise ValueError("target matrix must be provided")
self.target_matrix = a
self.initialization(sigma_set)
all_err = []
#err = []
#for rank in range(1, r):
# x0, it, err = self.cg_approximate(r=rank, x0=x0,
# maxiter=10, eps=eps)
# all_err += err
x, it, err = self.cg_approximate(r=r, x0=x0, maxiter=maxiter, eps=eps)
return x, it, all_err + err
def cg_approximate(self, r, x0=None, maxiter=100, eps=1e-9):
self.init_condition(r, x0)
error_history = []
for it in range(maxiter):
self.step()
error_history.append(self.loss())
print('it: %s, error: %s' % (it, error_history[-1]))
if error_history[-1] < self.norm_bound * eps:
return self.x, it, error_history
return self.x, maxiter, error_history
def step(self):
self.cg_grad()
self.cg_step()
pass
def init_condition(self, r, x0):
if x0 is None:
x0 = ManifoldElement.rand(self.target_matrix.shape, r, norm=self.norm_bound)
self.x_prev, self.x = ManifoldElement(x0, r), ManifoldElement(x0, r)
self.delta = delta_on_sigma_set(self.x, self.target_matrix, self.sigma_set)
self.grad_partial = riemannian_grad_partial(self.x, self.target_matrix, self.sigma_set,
grad=self.delta, manifold_elems=True)
self.grad = -TangentVector(self.x, self.grad_partial)
self.grad_prev = self.grad
self.conj_prev, self.conj = TangentVector.zero(self.x), TangentVector.zero(self.x)
return None
def cg_grad(self):
self.delta = delta_on_sigma_set(self.x, self.target_matrix, self.sigma_set)
self.grad_partial = riemannian_grad_partial(self.x, self.target_matrix, self.sigma_set,
grad=self.delta, manifold_elems=True)
self.grad_prev, self.grad = self.grad, -TangentVector(self.x, self.grad_partial)
return None
def cg_step(self):
self.conj_prev, self.conj = self.conj, self.conjugate_direction()
alpha = closed_form_initial_guess(self.conj, self.delta, self.sigma_set)
self.x_prev, self.x = \
self.x, self.armijo_backtracking(lambda x: self.cost_raw(x), alpha)[0]
return None
def armijo_backtracking(self, func, alpha, maxiter=20):
"""
Returns step and next point, minimizing given functional
Parameters
----------
func : function
function to minimize
x : ManifoldElement
initial point
alpha : float
estimated line search parameter
direction : TangentVector
direction to move from initial point
conj_direction : TangentVector
conjugated direction
Returns
-------
x_new :
next point (x + step * direction)
step : float
optimal step
"""
scale = -0.0001 * alpha
for i in range(maxiter):
x_new = svd_retraction(self.x + (0.5 ** i * alpha) * self.conj.release(), self.x.r)
bound = (0.5 ** i * scale) * self.grad.release().scalar_product(self.conj.release())
if self.cost_raw(self.x) - self.cost_raw(x_new) >= bound:
return x_new, 0.5 ** i * scale
return x_new, 0.5 ** maxiter * scale
def cost_raw(self, elem):
"""
Compute function 0.5 *|| a[sigma] - elem[sigma] ||_F^2
Parameters
----------
a : np.ndarray or sp.sparse.spmatrix
matrix to approximate
elem : ManifoldElement
approximation
sigma_set : tuple of np.ndarrays
index set of x indices and y indices
Returns
-------
out: float
cost function
"""
return 0.5 * sp.sparse.linalg.norm(elem.evaluate(self.sigma_set) - self.target_matrix) ** 2
def conjugate_direction(self):
grad_prev_trans = self.grad_prev.transport(self.x)
conj_prev_trans = self.conj_prev.transport(self.x)
delta = self.grad - grad_prev_trans
beta = max(0, delta.release().scalar_product(self.grad.release()) /
self.grad_prev.release().frobenius_norm() ** 2)
conj = -self.grad + beta * conj_prev_trans
angle = self.grad.release().scalar_product(conj.release()) / \
np.sqrt(conj.release().frobenius_norm() ** 2 *
self.grad.release().frobenius_norm() ** 2)
if angle <= 0.1:
conj = self.grad
return conj
| [
"numpy.abs",
"manifold_functions.riemannian_grad_partial",
"manifold_functions.TangentVector",
"manifold_functions.TangentVector.zero",
"approximator_api.AbstractApproximator.__init__",
"lowrank_matrix.ManifoldElement.rand",
"lowrank_matrix.ManifoldElement",
"manifold_functions.delta_on_sigma_set"
] | [((605, 639), 'numpy.abs', 'np.abs', (['(trace_first / trace_second)'], {}), '(trace_first / trace_second)\n', (611, 639), True, 'import numpy as np\n'), ((718, 753), 'approximator_api.AbstractApproximator.__init__', 'AbstractApproximator.__init__', (['self'], {}), '(self)\n', (747, 753), False, 'from approximator_api import AbstractApproximator\n'), ((2607, 2669), 'manifold_functions.delta_on_sigma_set', 'delta_on_sigma_set', (['self.x', 'self.target_matrix', 'self.sigma_set'], {}), '(self.x, self.target_matrix, self.sigma_set)\n', (2625, 2669), False, 'from manifold_functions import riemannian_grad_partial, delta_on_sigma_set\n'), ((2698, 2808), 'manifold_functions.riemannian_grad_partial', 'riemannian_grad_partial', (['self.x', 'self.target_matrix', 'self.sigma_set'], {'grad': 'self.delta', 'manifold_elems': '(True)'}), '(self.x, self.target_matrix, self.sigma_set, grad=\n self.delta, manifold_elems=True)\n', (2721, 2808), False, 'from manifold_functions import riemannian_grad_partial, delta_on_sigma_set\n'), ((3109, 3171), 'manifold_functions.delta_on_sigma_set', 'delta_on_sigma_set', (['self.x', 'self.target_matrix', 'self.sigma_set'], {}), '(self.x, self.target_matrix, self.sigma_set)\n', (3127, 3171), False, 'from manifold_functions import riemannian_grad_partial, delta_on_sigma_set\n'), ((3200, 3310), 'manifold_functions.riemannian_grad_partial', 'riemannian_grad_partial', (['self.x', 'self.target_matrix', 'self.sigma_set'], {'grad': 'self.delta', 'manifold_elems': '(True)'}), '(self.x, self.target_matrix, self.sigma_set, grad=\n self.delta, manifold_elems=True)\n', (3223, 3310), False, 'from manifold_functions import riemannian_grad_partial, delta_on_sigma_set\n'), ((2437, 2508), 'lowrank_matrix.ManifoldElement.rand', 'ManifoldElement.rand', (['self.target_matrix.shape', 'r'], {'norm': 'self.norm_bound'}), '(self.target_matrix.shape, r, norm=self.norm_bound)\n', (2457, 2508), False, 'from lowrank_matrix import ManifoldElement\n'), ((2539, 2561), 'lowrank_matrix.ManifoldElement', 'ManifoldElement', (['x0', 'r'], {}), '(x0, r)\n', (2554, 2561), False, 'from lowrank_matrix import ManifoldElement\n'), ((2563, 2585), 'lowrank_matrix.ManifoldElement', 'ManifoldElement', (['x0', 'r'], {}), '(x0, r)\n', (2578, 2585), False, 'from lowrank_matrix import ManifoldElement\n'), ((2877, 2917), 'manifold_functions.TangentVector', 'TangentVector', (['self.x', 'self.grad_partial'], {}), '(self.x, self.grad_partial)\n', (2890, 2917), False, 'from manifold_functions import TangentVector, svd_retraction\n'), ((2989, 3015), 'manifold_functions.TangentVector.zero', 'TangentVector.zero', (['self.x'], {}), '(self.x)\n', (3007, 3015), False, 'from manifold_functions import TangentVector, svd_retraction\n'), ((3017, 3043), 'manifold_functions.TangentVector.zero', 'TangentVector.zero', (['self.x'], {}), '(self.x)\n', (3035, 3043), False, 'from manifold_functions import TangentVector, svd_retraction\n'), ((3406, 3446), 'manifold_functions.TangentVector', 'TangentVector', (['self.x', 'self.grad_partial'], {}), '(self.x, self.grad_partial)\n', (3419, 3446), False, 'from manifold_functions import TangentVector, svd_retraction\n')] |
"""
create an initially equal probability map,
then use this to generate the probability to hit
a certain point. if the point is full, move to
the next radius.
"""
from PIL import Image
import numpy as np
from random import choice
w, h = 250, 250
field = Image.new("RGBA", (w, h), color="black")
field = np.array(field)
pixels = [(y, x) for x in range(h) for y in range(w)]
def within(y, x):
global w, h
return (y, x) if (y < h and y >= 0) and (x < w and x >= 0) else False
def find_first(yx):
y, x = yx
check, iters, done = [(y, x)], 0, []
while len(check) != 0:
if iters >= 2000:
break
y, x = check[0]
check.append(within(y+1, x)) # down
check.append(within(y-1, x)) # up
check.append(within(y, x-1)) # left
check.append(within(y, x+1)) # right
check = [c for c in check if c != False and c not in done]
done.append((y, x))
if field[y][x][2] != 250:
return (y, x)
del check[0]
iters += 1
return (False, False)
for i in range(20000000):
y, x = find_first(choice(pixels))
# print(y, x)
if y == False and x == False:
continue
# r, g, b, a = field[y][x]
# print(r, g, b, a)
field[y][x][2] += 1
pixels.append((y, x))
if i % 100000 == 0:
Image.fromarray(field).save("progress.png")
print(f"{i/100000} units")
Image.fromarray(field).show()
| [
"PIL.Image.new",
"numpy.array",
"random.choice",
"PIL.Image.fromarray"
] | [((258, 298), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(w, h)'], {'color': '"""black"""'}), "('RGBA', (w, h), color='black')\n", (267, 298), False, 'from PIL import Image\n'), ((307, 322), 'numpy.array', 'np.array', (['field'], {}), '(field)\n', (315, 322), True, 'import numpy as np\n'), ((1102, 1116), 'random.choice', 'choice', (['pixels'], {}), '(pixels)\n', (1108, 1116), False, 'from random import choice\n'), ((1404, 1426), 'PIL.Image.fromarray', 'Image.fromarray', (['field'], {}), '(field)\n', (1419, 1426), False, 'from PIL import Image\n'), ((1324, 1346), 'PIL.Image.fromarray', 'Image.fromarray', (['field'], {}), '(field)\n', (1339, 1346), False, 'from PIL import Image\n')] |
from random import shuffle, Random
import cv2
from collections import defaultdict
from vi3o import view
import numpy as np
import pickle
import os
from vi3o.image import imsave
from ggdtrack.utils import parallel, save_json, save_pickle, load_json, save_graph
class KltTrack:
def __init__(self, idx, x, y):
self.history = [(idx, x, y, None)]
self.dets_history = []
self.dets_history_for_post_vx = defaultdict(list)
self.predictions = defaultdict(list)
@property
def idx(self):
return self.history[-1][0]
@property
def x(self):
return self.history[-1][1]
@property
def y(self):
return self.history[-1][2]
@property
def e(self):
return self.history[-1][3]
def distance_to(self, x, y):
return np.sqrt((self.x - x) ** 2 + (self.y - y) ** 2)
def connect(d1, d2, weight_data):
d1.next_weight_data[d2].append(weight_data)
d2.prev.add(d1)
def video_detections(scene, f0, frames, min_conf=None):
if min_conf is None:
min_conf = scene.default_min_conf
for frame_idx in range(f0, f0 + frames):
frame = scene.frame(frame_idx)
detections = []
for det in scene.detections(start_frame=frame_idx, stop_frame=frame_idx):
if det.confidence > min_conf:
detections.append(det)
yield frame_idx, frame, detections
def estimate_intradet_iou(detections):
for det in detections:
det.max_intra_iou = 0
det.max_intra_ioa = 0
for i in range(len(detections)):
for j in range(i+1, len(detections)):
iou = detections[i].iou(detections[j])
ioa = detections[i].ioa(detections[j])
for det in (detections[i], detections[j]):
det.max_intra_iou = max(det.max_intra_iou, iou)
det.max_intra_ioa = max(det.max_intra_ioa, ioa)
def make_graph(video_detections, fps, show=False, max_connect=5):
tracks = []
lk_params = dict( winSize = (15, 15),
maxLevel = 4,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 5000,
qualityLevel = 0.01,
minDistance = 10,
blockSize = 7 )
col = (255,0,0)
max_len = 3*fps
min_klt_per_obj = 10
velocity_history = fps//2
prediction_df = fps
graph = []
detect = True
for frame_idx, frame, detections in video_detections:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
height, width, _ = frame.shape
estimate_intradet_iou(detections)
for det in detections:
det.next_weight_data = defaultdict(list)
det.pre_vs = []
det.post_vs = []
det.prev = set()
if show:
det.draw(frame, label=det.id)
graph.append(det)
# Track klt points to next frame
if len(tracks) > 0:
interesting = []
img0, img1 = prev_gray, frame_gray
p0 = np.float32([(tr.x, tr.y) for tr in tracks]).reshape(-1, 1, 2)
# See how the points have moved between the two frames
p1, st, err1 = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag, e in zip(tracks, p1.reshape(-1, 2), good, err1.flat):
if not good_flag:
continue
if not (0 <= x < width and 0 <= y < height):
continue
if e > 1e3:
continue
tr.history.append((frame_idx, x, y, e))
tr.history = tr.history[-max_len-1:]
new_tracks.append(tr)
if show:
cv2.circle(frame, (x, y), 2, (255-min(e*10, 255),0,0), -1)
for prev_dets in tr.dets_history:
for det in prev_dets:
if det.id == 2860144:
interesting.append(tr)
tracks = new_tracks
interesting = tracks
if show:
cv2.polylines(frame, [np.int32([(x,y) for _,x,y,_ in tr.history]) for tr in interesting ], False, col)
# Find detections with too few klt points
if detect:
mask = np.zeros_like(frame_gray)
detect = False
min_area = float('Inf')
for det in detections:
cnt = 0
for tr in tracks:
if det.covers(tr.x, tr.y):
cnt += 1
if cnt < min_klt_per_obj:
det.update_mask(mask)
detect = True
min_area = min(min_area, det.area)
# Detect new klt points
if detect:
feature_params['minDistance'] = int(np.sqrt(min_area / min_klt_per_obj))
for tr in tracks:
cv2.circle(mask, (tr.x, tr.y), feature_params['minDistance']//2, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
nt = KltTrack(frame_idx, x, y)
tracks.append(nt)
# Assign detections to klt points and build detection connectivity graph
new_tracks = []
for tr in tracks:
vx = vy = None
last_dets = []
for nxt in detections:
if nxt.covers(tr.x, tr.y):
last_dets.append(nxt)
tr.dets_history_for_post_vx[frame_idx].append(nxt)
for prev_dets in tr.dets_history:
for prv in prev_dets:
df = nxt.frame - prv.frame
klt = tr.history[-df-1:]
connect(prv, nxt, ('klt', klt))
if vx is None and len(tr.history) > velocity_history: # Predic where the detection will be in the future
hist = tr.history[-velocity_history:]
(vx, x0), rx, _, _, _ = np.polyfit(range(len(hist)), [p[1] for p in hist], 1, full=True)
(vy, y0), ry, _, _, _ = np.polyfit(range(len(hist)), [p[2] for p in hist], 1, full=True)
klt_res = [p[3] for p in hist]
r = (rx[0] + ry[0], sum(klt_res), max(klt_res))
if vx is not None:
v = (vx, vy) + r
nxt.pre_vs.append(v)
for df in range(1, prediction_df):
d = nxt.predict(df, vx, vy)
d.original = nxt
d.prediction_v = v
tr.predictions[d.frame].append(d)
for d in tr.dets_history_for_post_vx[frame_idx - velocity_history]:
d.post_vs.append(v)
if frame_idx - velocity_history in tr.dets_history_for_post_vx:
del tr.dets_history_for_post_vx[frame_idx - velocity_history]
if last_dets:
tr.dets_history.append(last_dets)
tr.dets_history = tr.dets_history[-max_connect:]
f = frame_idx - max_len
tr.dets_history = [last_dets for last_dets in tr.dets_history if last_dets[0].frame > f]
if tr.dets_history:
new_tracks.append(tr)
tracks = new_tracks
# Form long term connection from predicted detections
for tr in tracks:
for prd in tr.predictions[frame_idx]:
for det in detections:
if det not in tr.dets_history[-1] and prd.iou(det) > 0.5:
connect(prd.original, det, ('long', prd))
del tr.predictions[frame_idx]
if show:
for det in detections:
if det.pre_vs:
# vx = np.median([vx for vx, vy in det.pre_vs])
# vy = np.median([vy for vx, vy in det.pre_vs])
for vx, vy, r, _, _ in det.pre_vs:
df = 30
d = det.predict(df, vx, vy)
cv2.arrowedLine(frame, (int(det.cx), int(det.cy)), (int(d.cx), int(d.cy)),
(0,0,max(0, 255-int(r))), 1)
prev_gray = frame_gray
if show:
view(frame)
return graph
def prep_training_graphs_worker(arg):
scene, f0, myseg, graph_name, part, params = arg
if not os.path.exists(graph_name):
graph = make_graph(video_detections(scene, f0, myseg), scene.fps, **params)
save_graph(graph, graph_name)
save_json({'first_frame': f0, 'length': myseg}, graph_name + '-meta.json')
return part, (graph_name, scene.name)
def prep_training_graphs(dataset, cachedir, threads=None, segment_length_s=10, segment_overlap_s=1, limit=None,
limit_train_amount=None,
worker=prep_training_graphs_worker, worker_params=None, seed=42):
if worker_params is None:
worker_params = {}
lsts = {n: [] for n in dataset.parts.keys()}
jobs = []
for part in lsts.keys():
for scene_name in dataset.parts[part]:
scene = dataset.scene(scene_name)
segment_length = segment_length_s * scene.fps
segment_overlap = segment_overlap_s * scene.fps
f0 = scene.parts[part].start
while f0 + segment_length < scene.parts[part].stop or f0 == scene.parts[part].start:
if f0 + 2*segment_length > scene.parts[part].stop:
myseg = scene.parts[part].stop - f0
else:
myseg = segment_length
graph_name = os.path.join(cachedir, "graphs", "%s_graph_%s_%.8d.pck" % (dataset.name, scene_name, f0))
jobs.append((scene, f0, myseg, graph_name, part, worker_params))
f0 += myseg - segment_overlap
jobs.sort(key=lambda j: j[3])
Random(seed).shuffle(jobs)
if limit_train_amount is not None:
assert limit is None
train_jobs = [j for j in jobs if j[4] == 'train']
eval_jobs = [j for j in jobs if j[4] == 'eval']
train_jobs = train_jobs[:int(limit_train_amount * len(train_jobs))]
jobs = train_jobs + eval_jobs
elif limit is not None:
jobs = [j for j in jobs if j[4] != 'test']
jobs = jobs[:limit]
jobs.sort(key=lambda j: j[3])
for part, entry in parallel(worker, jobs, threads, 'Preppping training graphs'):
lsts[part].append(entry)
save_json(lsts, os.path.join(cachedir, "graphs", "%s_traineval.json" % dataset.name))
def graph_names(dataset, part):
parts = load_json(os.path.join(dataset.cachedir, "graphs", "%s_traineval.json" % dataset.name))
if part == 'trainval':
return parts['train'] + parts['eval']
else:
return parts[part]
def make_duke_test_video():
cam = 2
seq = []
scene = Duke('/home/hakan/src/duke').scene(cam)
for frame_idx, frame, detections in video_detections(scene, 54373, 10):
fn = "test/data/duke_frame_%d_%.8d.jpg" % (cam, frame_idx)
imsave(frame, fn)
seq.append((frame_idx, fn.replace("test/", ""), detections))
save_pickle(seq, "test/data/duke_test_seq_cam2_10.pck")
gt = scene.ground_truth()
gt = {f: gt[f] for f, _, _ in seq}
save_pickle(gt, "test/data/duke_test_seq_cam2_10_gt.pck")
if __name__ == '__main__':
from ggdtrack.duke_dataset import Duke
from ggdtrack.visdrone_dataset import VisDrone
from ggdtrack.mot16_dataset import Mot16
# scene = Duke('/home/hakan/src/duke').scene(1)
# make_graph(video_detections(scene, 124472, 100, 0), scene.fps, True, True)
# prep_training_graphs(Duke('/home/hakan/src/duke'))
# prep_training_graphs_worker((Duke('/home/hakan/src/duke').scene(2), 232034, 600, "cachedir/graphs/duke_graph_2_00232034.pck", "test"))
# Duke('/home/hakan/src/duke').scene(7).frame(336553 + 1129-2)
# prep_training_graphs_worker((Duke('/home/hakan/src/duke').scene(2), 232034, 600, "cachedir/graphs/duke_graph_2_00232034.pck", "test"))
# prep_training_graphs_worker((Duke('/home/hakan/src/duke').scene(2), 54373, 600, "cachedir/graphs/duke_graph_2_00054373.pck", "??"))
# prep_training_graphs_worker((Duke('/home/hakan/src/duke').scene(2), 54373, 10, "cachedir/graphs/tst.pck", "??"))
# make_graph(video_detections(Duke('/home/hakan/src/duke').scene(2), 54373, 10), 60, True)
# make_duke_test_video()
make_graph(video_detections(VisDrone('/home/hakanad/src/ggdtrack/data/').scene("val__uav0000305_00000_v"), 160, 600), 25, True)
# make_graph(video_detections(VisDrone('/home/hakan/src/ggdtrack/data/').scene("val__uav0000137_00458_v"), 1, 10000), 25, True)
# make_graph(video_detections(VisDrone('/home/hakan/src/ggdtrack/data/').scene("train__uav0000279_00001_v"), 160, 10000), 25, True)
# make_graph(video_detections(Mot16("/home/hakan/src/ggdtrack/data").scene('train__MOT16-13'), 1, 1000), 25, True)
| [
"vi3o.view",
"numpy.sqrt",
"ggdtrack.utils.save_graph",
"numpy.int32",
"os.path.exists",
"random.Random",
"ggdtrack.visdrone_dataset.VisDrone",
"vi3o.image.imsave",
"cv2.circle",
"cv2.cvtColor",
"ggdtrack.duke_dataset.Duke",
"ggdtrack.utils.save_pickle",
"ggdtrack.utils.parallel",
"cv2.goo... | [((10799, 10859), 'ggdtrack.utils.parallel', 'parallel', (['worker', 'jobs', 'threads', '"""Preppping training graphs"""'], {}), "(worker, jobs, threads, 'Preppping training graphs')\n", (10807, 10859), False, 'from ggdtrack.utils import parallel, save_json, save_pickle, load_json, save_graph\n'), ((11580, 11635), 'ggdtrack.utils.save_pickle', 'save_pickle', (['seq', '"""test/data/duke_test_seq_cam2_10.pck"""'], {}), "(seq, 'test/data/duke_test_seq_cam2_10.pck')\n", (11591, 11635), False, 'from ggdtrack.utils import parallel, save_json, save_pickle, load_json, save_graph\n'), ((11709, 11766), 'ggdtrack.utils.save_pickle', 'save_pickle', (['gt', '"""test/data/duke_test_seq_cam2_10_gt.pck"""'], {}), "(gt, 'test/data/duke_test_seq_cam2_10_gt.pck')\n", (11720, 11766), False, 'from ggdtrack.utils import parallel, save_json, save_pickle, load_json, save_graph\n'), ((430, 447), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (441, 447), False, 'from collections import defaultdict\n'), ((475, 492), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (486, 492), False, 'from collections import defaultdict\n'), ((812, 858), 'numpy.sqrt', 'np.sqrt', (['((self.x - x) ** 2 + (self.y - y) ** 2)'], {}), '((self.x - x) ** 2 + (self.y - y) ** 2)\n', (819, 858), True, 'import numpy as np\n'), ((2570, 2609), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (2582, 2609), False, 'import cv2\n'), ((8805, 8831), 'os.path.exists', 'os.path.exists', (['graph_name'], {}), '(graph_name)\n', (8819, 8831), False, 'import os\n'), ((8925, 8954), 'ggdtrack.utils.save_graph', 'save_graph', (['graph', 'graph_name'], {}), '(graph, graph_name)\n', (8935, 8954), False, 'from ggdtrack.utils import parallel, save_json, save_pickle, load_json, save_graph\n'), ((8963, 9037), 'ggdtrack.utils.save_json', 'save_json', (["{'first_frame': f0, 'length': myseg}", "(graph_name + '-meta.json')"], {}), "({'first_frame': f0, 'length': myseg}, graph_name + '-meta.json')\n", (8972, 9037), False, 'from ggdtrack.utils import parallel, save_json, save_pickle, load_json, save_graph\n'), ((11043, 11119), 'os.path.join', 'os.path.join', (['dataset.cachedir', '"""graphs"""', "('%s_traineval.json' % dataset.name)"], {}), "(dataset.cachedir, 'graphs', '%s_traineval.json' % dataset.name)\n", (11055, 11119), False, 'import os\n'), ((11489, 11506), 'vi3o.image.imsave', 'imsave', (['frame', 'fn'], {}), '(frame, fn)\n', (11495, 11506), False, 'from vi3o.image import imsave\n'), ((2758, 2775), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2769, 2775), False, 'from collections import defaultdict\n'), ((3279, 3338), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['img0', 'img1', 'p0', 'None'], {}), '(img0, img1, p0, None, **lk_params)\n', (3303, 3338), False, 'import cv2\n'), ((3366, 3425), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['img1', 'img0', 'p1', 'None'], {}), '(img1, img0, p1, None, **lk_params)\n', (3390, 3425), False, 'import cv2\n'), ((4569, 4594), 'numpy.zeros_like', 'np.zeros_like', (['frame_gray'], {}), '(frame_gray)\n', (4582, 4594), True, 'import numpy as np\n'), ((5231, 5295), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['frame_gray'], {'mask': 'mask'}), '(frame_gray, mask=mask, **feature_params)\n', (5254, 5295), False, 'import cv2\n'), ((8671, 8682), 'vi3o.view', 'view', (['frame'], {}), '(frame)\n', (8675, 8682), False, 'from vi3o import view\n'), ((10308, 10320), 'random.Random', 'Random', (['seed'], {}), '(seed)\n', (10314, 10320), False, 'from random import shuffle, Random\n'), ((10918, 10986), 'os.path.join', 'os.path.join', (['cachedir', '"""graphs"""', "('%s_traineval.json' % dataset.name)"], {}), "(cachedir, 'graphs', '%s_traineval.json' % dataset.name)\n", (10930, 10986), False, 'import os\n'), ((11298, 11326), 'ggdtrack.duke_dataset.Duke', 'Duke', (['"""/home/hakan/src/duke"""'], {}), "('/home/hakan/src/duke')\n", (11302, 11326), False, 'from ggdtrack.duke_dataset import Duke\n'), ((5060, 5095), 'numpy.sqrt', 'np.sqrt', (['(min_area / min_klt_per_obj)'], {}), '(min_area / min_klt_per_obj)\n', (5067, 5095), True, 'import numpy as np\n'), ((5143, 5216), 'cv2.circle', 'cv2.circle', (['mask', '(tr.x, tr.y)', "(feature_params['minDistance'] // 2)", '(0)', '(-1)'], {}), "(mask, (tr.x, tr.y), feature_params['minDistance'] // 2, 0, -1)\n", (5153, 5216), False, 'import cv2\n'), ((10052, 10145), 'os.path.join', 'os.path.join', (['cachedir', '"""graphs"""', "('%s_graph_%s_%.8d.pck' % (dataset.name, scene_name, f0))"], {}), "(cachedir, 'graphs', '%s_graph_%s_%.8d.pck' % (dataset.name,\n scene_name, f0))\n", (10064, 10145), False, 'import os\n'), ((3122, 3165), 'numpy.float32', 'np.float32', (['[(tr.x, tr.y) for tr in tracks]'], {}), '([(tr.x, tr.y) for tr in tracks])\n', (3132, 3165), True, 'import numpy as np\n'), ((12890, 12934), 'ggdtrack.visdrone_dataset.VisDrone', 'VisDrone', (['"""/home/hakanad/src/ggdtrack/data/"""'], {}), "('/home/hakanad/src/ggdtrack/data/')\n", (12898, 12934), False, 'from ggdtrack.visdrone_dataset import VisDrone\n'), ((4398, 4445), 'numpy.int32', 'np.int32', (['[(x, y) for _, x, y, _ in tr.history]'], {}), '([(x, y) for _, x, y, _ in tr.history])\n', (4406, 4445), True, 'import numpy as np\n'), ((5354, 5367), 'numpy.float32', 'np.float32', (['p'], {}), '(p)\n', (5364, 5367), True, 'import numpy as np\n')] |
# logistic regression
# minimize (1/n)sum_t ln(1 + e(-y_t beta.x_t)) + 0.5*lambda*||beta||_2^2
import math
import numpy as np
#from cvxpy import *
from sklearn.linear_model import LogisticRegression
MAX_ERR = 1000000000
# ---------------------------------------------
# Max norm of optimal hypothesis when this is the regularization param
def compute_max_norm(lamb):
return math.sqrt(2.0*math.log(2.0) / lamb)
# Sensitivity of a query to sparse vector
def get_sv_sensitivity(max_norm, n):
return math.log(1.0 + math.exp(max_norm)) / n
# L1-sensitivity of the optimal beta
def compute_opt_sensitivity(n, dim, lamb):
return 2.0 * math.sqrt(dim) / (n*lamb)
# ---------------------------------------------
def compute_err(X, Y, lamb, beta):
n = len(X)
total = 0.0
try:
for i in range(n):
total += math.log(1.0 + math.exp(-Y[i] * np.dot(X[i], beta)))
except OverflowError:
return MAX_ERR
avg = total / n
twonorm = np.dot(beta, beta)
return avg + 0.5 * lamb * twonorm
# ---------------------------------------------
def logistic_regression(X, Y, lamb):
C = 1.0 / (lamb * len(X))
lr = LogisticRegression(penalty="l2", C=C, fit_intercept=False)
lr.fit(X, Y)
beta = np.array(lr.coef_[0])
return beta, compute_err(X, Y, lamb, beta)
# Problem: this took too much memory (5+GB on a 120MB input data file)
# Input:
# X and Y are numpy arrays,
# X is dim by n
# Y is 1 by n, each entry is {-1, +1}
# lamb is regularization constant
#
# Output:
# optimal hypothesis beta
# value of its solution (optimal regularized error)
#def cvxpy_logistic_regression(X, Y, lamb):
# n = len(Y)
# d = X.shape[1]
# beta = Variable(d)
# # this version threw an error for me
# #expr1 = sum([log_sum_exp(vstack(0, -Y[i] * np.dot(X[i, :], beta))) for i in range(n)])
# expr1 = sum(logistic(X[i,:].T*beta*-Y[i]) for i in range(n))
# expr1 = conv(1.0 / float(n), sum(expr1))
# expr2 = square(norm(beta))
# expr2 = conv(lamb, expr2)
# expr_list = [expr1, expr2]
# expr = sum(expr_list)
# obj = Minimize(expr)
#
# p = Problem(obj)
# p.solve()
#
# beta_arr = np.array([b[0,0] for b in beta])
# return beta, p.value
#
| [
"math.sqrt",
"math.log",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"numpy.dot",
"math.exp"
] | [((954, 972), 'numpy.dot', 'np.dot', (['beta', 'beta'], {}), '(beta, beta)\n', (960, 972), True, 'import numpy as np\n'), ((1132, 1190), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'C': 'C', 'fit_intercept': '(False)'}), "(penalty='l2', C=C, fit_intercept=False)\n", (1150, 1190), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1215, 1236), 'numpy.array', 'np.array', (['lr.coef_[0]'], {}), '(lr.coef_[0])\n', (1223, 1236), True, 'import numpy as np\n'), ((644, 658), 'math.sqrt', 'math.sqrt', (['dim'], {}), '(dim)\n', (653, 658), False, 'import math\n'), ((396, 409), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (404, 409), False, 'import math\n'), ((523, 541), 'math.exp', 'math.exp', (['max_norm'], {}), '(max_norm)\n', (531, 541), False, 'import math\n'), ((860, 878), 'numpy.dot', 'np.dot', (['X[i]', 'beta'], {}), '(X[i], beta)\n', (866, 878), True, 'import numpy as np\n')] |
__all__ = ["run", "sample"]
import warnings
from functools import partial
from typing import List, Tuple
import jax.numpy as jnp
import numpy as np
from jax import lax, local_device_count, pmap, random, vmap
from jax._src.api import vmap
from jax.tree_util import tree_flatten
from rmhmc.base_types import Position, identity
from rmhmc.hmc import MCMCKernel, SamplerCarry
def sample(
kernel: MCMCKernel,
rng_key: random.KeyArray,
initial_coords: Position,
*,
num_steps: int = 1000,
num_tune: int = 1000,
num_chains: int = 2,
tune_kinetic_energy: bool = True,
initial_buffer_size: int = 75,
first_window_size: int = 25,
final_buffer_size: int = 75,
parallel: bool = True,
) -> SamplerCarry:
def sample_one_chain(
rng_key: random.KeyArray, q: Position
) -> SamplerCarry:
tune_key, sample_key = random.split(rng_key)
state = kernel.init(q)
state, _ = run(
kernel,
num_tune,
tune_key,
state,
tune=True,
tune_kinetic_energy=tune_kinetic_energy,
initial_buffer_size=initial_buffer_size,
first_window_size=first_window_size,
final_buffer_size=final_buffer_size,
)
return run(kernel, num_steps, sample_key, state, tune=False)[1]
keys = random.split(rng_key, num_chains)
if num_chains > 1:
proto_init_val = tree_flatten(initial_coords)[0][0]
if jnp.shape(proto_init_val)[0] != num_chains:
raise ValueError(
"The leading dimension of the initial parameters must match num_chains"
)
if parallel and num_chains >= local_device_count():
warnings.warn(
f"Only {local_device_count()} devices are visible to JAX; chains will be "
"sampled sequentially"
)
parallel = False
if parallel:
execute = pmap(sample_one_chain)
else:
execute = vmap(sample_one_chain)
return execute(keys, initial_coords)
def run(
kernel: MCMCKernel,
num_steps: int,
rng_key: random.KeyArray,
state: SamplerCarry,
*,
tune: bool = False,
tune_kinetic_energy: bool = True,
initial_buffer_size: int = 75,
first_window_size: int = 25,
final_buffer_size: int = 75,
) -> Tuple[SamplerCarry, SamplerCarry]:
if tune:
if tune_kinetic_energy:
schedule = jnp.asarray(
build_schedule(
num_steps,
initial_buffer_size=initial_buffer_size,
final_buffer_size=final_buffer_size,
first_window_size=first_window_size,
)
)
else:
schedule = jnp.zeros((num_steps, 2), dtype=bool)
if initial_buffer_size <= 1:
raise ValueError("'initial_buffer_size' must be >1")
schedule[initial_buffer_size - 1, 1] = True
else:
schedule = jnp.zeros((num_steps, 2), dtype=bool)
def step(
state: SamplerCarry, args: Tuple[Tuple[bool, bool], random.KeyArray]
) -> Tuple[SamplerCarry, SamplerCarry]:
(slow_update, reset), rng_key = args
state_ = kernel.step(state, rng_key)
# If we're tuning, update all the tuning parameters
if tune:
state_ = kernel.fast_update(state_)
# Update the slow parameters if requested
state_ = lax.cond(
slow_update, kernel.slow_update, identity, state_
)
# Reset the tuning parameters if requested
state_ = lax.cond(reset, kernel.reset, identity, state_)
return state_, state_
# state = system.init(q)
state, trace = lax.scan(
step, state, (schedule, random.split(rng_key, num_steps))
)
return kernel.tune_finish(state), trace
def build_schedule(
num_steps: int,
*,
initial_buffer_size: int = 75,
first_window_size: int = 25,
final_buffer_size: int = 75,
) -> List[Tuple[bool, bool]]:
num_steps = int(num_steps)
initial_buffer_size = int(initial_buffer_size)
first_window_size = int(first_window_size)
final_buffer_size = int(final_buffer_size)
if num_steps < 0:
raise ValueError("'num_steps' must be >=0")
if initial_buffer_size < 0:
raise ValueError("'initial_buffer_size' must be >=0")
if first_window_size < 1:
raise ValueError("'first_window_size' must be >=1")
if final_buffer_size < 0:
raise ValueError("'final_buffer_size' must be >=0")
# Special cases when num_steps is too small even for the hack below
if num_steps == 0:
warnings.warn("with zero tuning samples, the schedule is empty")
return []
if initial_buffer_size + first_window_size + final_buffer_size > num_steps:
warnings.warn(
"there are not enough tuning steps to accomodate the tuning "
"schedule; assigning automatically as 20%/70%/10%"
)
initial_buffer_size = np.ceil(0.2 * num_steps).astype(int)
final_buffer_size = np.ceil(0.1 * num_steps).astype(int)
first_window_size = num_steps - initial_buffer_size - final_buffer_size
# If this didn't cut it, 'num_steps' is too small (this should only happen
# when num_steps == 1) just return one step of tuning
if first_window_size <= 0:
initial_buffer_size = 0
final_buffer_size = 0
first_window_size = num_steps
t = initial_buffer_size
delta = first_window_size
if initial_buffer_size > 1:
update_steps = [(False, False)] * (initial_buffer_size - 1) + [
(False, True)
]
else:
update_steps = []
while t < num_steps - final_buffer_size:
if t + 2 * delta > num_steps - final_buffer_size:
d = num_steps - final_buffer_size - t
update_steps += [(False, False)] * (d - 1) + [(True, False)]
break
else:
update_steps += [(False, False)] * (delta - 1) + [(True, False)]
t += delta
delta = 2 * delta
if np.any(update_steps) <= 0:
raise ValueError("invalid tuning schedule")
return update_steps + [(False, False)] * final_buffer_size
| [
"jax.lax.cond",
"jax.numpy.zeros",
"numpy.ceil",
"jax.pmap",
"jax.local_device_count",
"numpy.any",
"jax.numpy.shape",
"jax._src.api.vmap",
"warnings.warn",
"jax.tree_util.tree_flatten",
"jax.random.split"
] | [((1350, 1383), 'jax.random.split', 'random.split', (['rng_key', 'num_chains'], {}), '(rng_key, num_chains)\n', (1362, 1383), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((869, 890), 'jax.random.split', 'random.split', (['rng_key'], {}), '(rng_key)\n', (881, 890), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((1926, 1948), 'jax.pmap', 'pmap', (['sample_one_chain'], {}), '(sample_one_chain)\n', (1930, 1948), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((1977, 1999), 'jax._src.api.vmap', 'vmap', (['sample_one_chain'], {}), '(sample_one_chain)\n', (1981, 1999), False, 'from jax._src.api import vmap\n'), ((2985, 3022), 'jax.numpy.zeros', 'jnp.zeros', (['(num_steps, 2)'], {'dtype': 'bool'}), '((num_steps, 2), dtype=bool)\n', (2994, 3022), True, 'import jax.numpy as jnp\n'), ((4683, 4747), 'warnings.warn', 'warnings.warn', (['"""with zero tuning samples, the schedule is empty"""'], {}), "('with zero tuning samples, the schedule is empty')\n", (4696, 4747), False, 'import warnings\n'), ((4855, 4989), 'warnings.warn', 'warnings.warn', (['"""there are not enough tuning steps to accomodate the tuning schedule; assigning automatically as 20%/70%/10%"""'], {}), "(\n 'there are not enough tuning steps to accomodate the tuning schedule; assigning automatically as 20%/70%/10%'\n )\n", (4868, 4989), False, 'import warnings\n'), ((6145, 6165), 'numpy.any', 'np.any', (['update_steps'], {}), '(update_steps)\n', (6151, 6165), True, 'import numpy as np\n'), ((1689, 1709), 'jax.local_device_count', 'local_device_count', ([], {}), '()\n', (1707, 1709), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((2751, 2788), 'jax.numpy.zeros', 'jnp.zeros', (['(num_steps, 2)'], {'dtype': 'bool'}), '((num_steps, 2), dtype=bool)\n', (2760, 2788), True, 'import jax.numpy as jnp\n'), ((3452, 3511), 'jax.lax.cond', 'lax.cond', (['slow_update', 'kernel.slow_update', 'identity', 'state_'], {}), '(slow_update, kernel.slow_update, identity, state_)\n', (3460, 3511), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((3619, 3666), 'jax.lax.cond', 'lax.cond', (['reset', 'kernel.reset', 'identity', 'state_'], {}), '(reset, kernel.reset, identity, state_)\n', (3627, 3666), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((3789, 3821), 'jax.random.split', 'random.split', (['rng_key', 'num_steps'], {}), '(rng_key, num_steps)\n', (3801, 3821), False, 'from jax import lax, local_device_count, pmap, random, vmap\n'), ((1432, 1460), 'jax.tree_util.tree_flatten', 'tree_flatten', (['initial_coords'], {}), '(initial_coords)\n', (1444, 1460), False, 'from jax.tree_util import tree_flatten\n'), ((1478, 1503), 'jax.numpy.shape', 'jnp.shape', (['proto_init_val'], {}), '(proto_init_val)\n', (1487, 1503), True, 'import jax.numpy as jnp\n'), ((5047, 5071), 'numpy.ceil', 'np.ceil', (['(0.2 * num_steps)'], {}), '(0.2 * num_steps)\n', (5054, 5071), True, 'import numpy as np\n'), ((5112, 5136), 'numpy.ceil', 'np.ceil', (['(0.1 * num_steps)'], {}), '(0.1 * num_steps)\n', (5119, 5136), True, 'import numpy as np\n'), ((1754, 1774), 'jax.local_device_count', 'local_device_count', ([], {}), '()\n', (1772, 1774), False, 'from jax import lax, local_device_count, pmap, random, vmap\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 20:21:27 2018
@author: Administrator
"""
import numpy as np
from MyLibrary import *
FPS=120
screenwidth=288
screenheight=512
fontsize=30
player_filename="players.png"
player_frame_width=48
player_frame_height=48
player_frame_num=4
base_filename="base.png"
base_frame_width=80
base_frame_height=15
base_frame_num=1
base_velocity_y=np.arange(-3,-8,-0.5)
stage=0
level=0
maxlevel=len(base_velocity_y)-1
timer_tick=30
interval=90
player_velocity_y=6
final_color=0,0,0
game_over=False
player_moving=False
FPSclock=pygame.time.Clock()
#创建玩家、平板的精灵组
player_group=pygame.sprite.Group()
base_group=pygame.sprite.Group()
#在屏幕下方随机位置生成板
def getRandomBase(filename,framewidth,frameheight,frameamount,velocity_y=-3,distance=100):
base=MySprite()
base.load(filename,framewidth,frameheight,frameamount)
base.position=random.randint(0,screenwidth-base.frame_width),distance+screenheight
base.velocity.y=velocity_y
base_group.add(base)
#当有板超出屏幕上方时,改变其Y坐标至屏幕下方
def chBase():
global stage
for base in base_group:
if base.Y<-base.frame_height:
stage+=1
base.Y=screenheight+interval
base.X=random.randint(0,screenwidth-base.frame_width)
#计算游戏难度、板的速度
def calcLevel():
global level
old_level=level
present_level=stage//20
if present_level>=old_level and level<maxlevel:
level+=1
return base_velocity_y[level]
#计算玩家的速度
def calcVelocity(direction,vel=1.0):
velocity=Point(0,0)
if direction==0:#not move
velocity.x=0
elif direction==1:#to the left
velocity.x=-vel
elif direction==2:#to the right
velocity.x=vel
return velocity
#当玩家按左右键时更改帧的图像
def frameChange():
if player.direction==0:#不动
player.first_frame=3
player.last_frame=3
elif player.direction==1:#向左
player.first_frame=0
player.last_frame=0
elif player.direction==2:#向右
player.first_frame=2
player.last_frame=2
if player.frame<player.first_frame:
player.frame=player.first_frame
#获取每个像素的透明度矩阵
def getHitmasks(image):
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
#如果碰撞,则返回True
def checkCrash(player,base,hitmasks):
#碰到底部
if player.Y + player.frame_height >= screenheight - 1:
return True
else:
player_rect = pygame.Rect(player.X, player.Y, player.frame_width, player.frame_height)
for base in base_group:
base_rect = pygame.Rect(base.X, base.Y, base.frame_width, base.frame_height)
player_hit_mask = hitmasks['player']
base_hit_mask = hitmasks['base']
#检测是否有像素碰撞
collide = pixelCollision(player_rect, base_rect, player_hit_mask, base_hit_mask)
if collide:
return True
return False
#检测像素是否碰撞
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in range(rect.width):
for y in range(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
class Game():
def __init__(self,screen0):
global screen, timer, player, font,player_group,base_group
pygame.init()
player_group = pygame.sprite.Group()
base_group = pygame.sprite.Group()
screen = screen0
timer = pygame.time.Clock()
player = MySprite()
player.load(player_filename, player_frame_width, player_frame_height, player_frame_num)
player.position = screenwidth // 3, screenheight // 3
player_group.add(player)
self.reset_score = True # 该标志用于确保score的重置在下一帧才进行
for i in np.arange(0, 501, 100):
getRandomBase(base_filename, base_frame_width, base_frame_height, base_frame_num, base_velocity_y[0], i)
def frameStep(self,input_actions):
if self.reset_score:
self.score=0
self.reset_score=False
pygame.event.pump()
reward=0.2
self.score+=1
terminal=False
timer.tick(timer_tick)
ticks = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
pygame.quit()
sys.exit()
if(input_actions[0]==1):
player_moving = True
player.direction = 1
elif input_actions[2]==1:
player_moving = True
player.direction = 2
else:
player.direction = 0
global game_over,level
if game_over:
reward=-5
level = 0
terminal=True
global screen
self.__init__(screen)
game_over=False
else:
if player.direction:
player.velocity = calcVelocity(player.direction, 5)
#改变帧图像
frameChange()
#更新图像
player_group.update(ticks, 50)
#检测碰撞,以确定速度
player_moving = True
for base in base_group:
# player={}
Hitmasks = {}
Hitmasks['base'] = (getHitmasks(base.image))
Hitmasks['player'] = (getHitmasks(player.image))
iscrash = checkCrash(player, base,Hitmasks)
if iscrash:
if player.Y + player.frame_height >= base.Y and player.Y < base.Y:
if player.Y + player.frame_height < base.Y + 15:
player.Y = base.Y - player.frame_height + 2
player.velocity.y = base.velocity.y
elif player.X + player.frame_width >= base.X and player.X < base.X:
player.X = base.X - player.frame_width
player.velocity.y = player_velocity_y
elif player.X <= base.X + base.frame_width and base.X < player.X:
player.X = base.X + base.frame_width
player.velocity.y = player_velocity_y
break
else:
player.velocity.y = player_velocity_y
if player_moving:
player.X += player.velocity.x
player.Y += player.velocity.y
if player.X < 0:
player.X = 0
elif player.X > screenwidth - player.frame_width:
player.X = screenwidth - player.frame_width
if player.Y < 0 or player.Y > screenheight - player.frame_height:
game_over = True
#移动板
calcLevel()
for base in base_group:
base.velocity.y = base_velocity_y[level]
base.Y += base.velocity.y
#改变板的位置(如果碰到边界)
chBase()
screen.fill(final_color)
base_group.draw(screen)
player_group.draw(screen)
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
FPSclock.tick(FPS)
return image_data,reward,terminal,self.score | [
"numpy.arange"
] | [((403, 426), 'numpy.arange', 'np.arange', (['(-3)', '(-8)', '(-0.5)'], {}), '(-3, -8, -0.5)\n', (412, 426), True, 'import numpy as np\n'), ((4201, 4223), 'numpy.arange', 'np.arange', (['(0)', '(501)', '(100)'], {}), '(0, 501, 100)\n', (4210, 4223), True, 'import numpy as np\n')] |
from keras.applications import ResNet50
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from threading import Thread
from PIL import Image
import numpy as np
import base64
import flask
import redis
import uuid
import time
import json
import sys
import io
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
IMAGE_CHANS = 3
IMAGE_DTYPE = "float32"
BATCH_SIZE = 32
SERVER_SLEEP = 0.25
CLIENT_SLEEP = 0.25
IMAGE_QUEUE = "image_queue"
app = flask.Flask(__name__)
db = redis.StrictRedis(host="localhost", port=6379, db=0)
model = None
def base64_encode_image(a):
return base64.b64encode(a).decode("utf-8")
def base64_decode_image(a, dtype, shape):
if sys.version_info.major == 3:
a = bytes(a, encoding="utf-8")
a = np.frombuffer(base64.decodestring(a), dtype=dtype)
a = a.reshape(shape)
return a
def prepare_image(image, target):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
return image
def classify_process():
print("* Loading model...")
model = ResNet50(weights="imagenet")
print("* Model loaded")
while True:
queue = db.lrange(IMAGE_QUEUE, 0, BATCH_SIZE - 1)
imageIDs = []
batch = None
for q in queue:
q = json.loads(q.decode("utf-8"))
image = base64_decode_image(q["image"], IMAGE_DTYPE,
(1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANS))
if batch is None:
batch = image
else:
batch = np.vstack([batch, image])
imageIDs.append(q["id"])
if len(imageIDs) > 0:
print("* Batch size: {}".format(batch.shape))
preds = model.predict(batch)
results = imagenet_utils.decode_predictions(preds)
for (imageID, resultSet) in zip(imageIDs, results):
output = []
for (imagenetID, label, prob) in resultSet:
r = {"label": label, "probability": float(prob)}
output.append(r)
db.set(imageID, json.dumps(output))
db.ltrim(IMAGE_QUEUE, len(imageIDs), -1)
time.sleep(SERVER_SLEEP)
@app.route("/predict", methods=["POST"])
def predict():
data = {"success": False}
if flask.request.method == "POST":
if flask.request.files.get("image"):
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
image = prepare_image(image, (IMAGE_WIDTH, IMAGE_HEIGHT))
image = image.copy(order="C")
k = str(uuid.uuid4())
d = {"id": k, "image": base64_encode_image(image)}
db.rpush(IMAGE_QUEUE, json.dumps(d))
while True:
output = db.get(k)
if output is not None:
output = output.decode("utf-8")
data["predictions"] = json.loads(output)
db.delete(k)
break
time.sleep(CLIENT_SLEEP)
data["success"] = True
return flask.jsonify(data)
if __name__ == "__main__":
# load the function used to classify input images in a *separate*
# thread than the one used for main classification
print("* Starting model service...")
t = Thread(target=classify_process, args=())
t.daemon = True
t.start()
# start the web server
print("* Starting web service...")
app.run()
| [
"flask.request.files.get",
"keras.preprocessing.image.img_to_array",
"json.loads",
"flask.Flask",
"keras.applications.ResNet50",
"base64.b64encode",
"json.dumps",
"io.BytesIO",
"time.sleep",
"uuid.uuid4",
"base64.decodestring",
"redis.StrictRedis",
"numpy.expand_dims",
"numpy.vstack",
"k... | [((474, 495), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (485, 495), False, 'import flask\n'), ((501, 553), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""localhost"""', 'port': '(6379)', 'db': '(0)'}), "(host='localhost', port=6379, db=0)\n", (518, 553), False, 'import redis\n'), ((967, 986), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (979, 986), False, 'from keras.preprocessing.image import img_to_array\n'), ((996, 1025), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1010, 1025), True, 'import numpy as np\n'), ((1035, 1073), 'keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['image'], {}), '(image)\n', (1066, 1073), False, 'from keras.applications import imagenet_utils\n'), ((1151, 1179), 'keras.applications.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (1159, 1179), False, 'from keras.applications import ResNet50\n'), ((2740, 2759), 'flask.jsonify', 'flask.jsonify', (['data'], {}), '(data)\n', (2753, 2759), False, 'import flask\n'), ((2951, 2991), 'threading.Thread', 'Thread', ([], {'target': 'classify_process', 'args': '()'}), '(target=classify_process, args=())\n', (2957, 2991), False, 'from threading import Thread\n'), ((768, 790), 'base64.decodestring', 'base64.decodestring', (['a'], {}), '(a)\n', (787, 790), False, 'import base64\n'), ((2015, 2039), 'time.sleep', 'time.sleep', (['SERVER_SLEEP'], {}), '(SERVER_SLEEP)\n', (2025, 2039), False, 'import time\n'), ((2165, 2197), 'flask.request.files.get', 'flask.request.files.get', (['"""image"""'], {}), "('image')\n", (2188, 2197), False, 'import flask\n'), ((604, 623), 'base64.b64encode', 'base64.b64encode', (['a'], {}), '(a)\n', (620, 623), False, 'import base64\n'), ((1693, 1733), 'keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['preds'], {}), '(preds)\n', (1726, 1733), False, 'from keras.applications import imagenet_utils\n'), ((1521, 1546), 'numpy.vstack', 'np.vstack', (['[batch, image]'], {}), '([batch, image])\n', (1530, 1546), True, 'import numpy as np\n'), ((2268, 2285), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (2278, 2285), False, 'import io\n'), ((2392, 2404), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2402, 2404), False, 'import uuid\n'), ((2485, 2498), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (2495, 2498), False, 'import json\n'), ((2681, 2705), 'time.sleep', 'time.sleep', (['CLIENT_SLEEP'], {}), '(CLIENT_SLEEP)\n', (2691, 2705), False, 'import time\n'), ((1949, 1967), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (1959, 1967), False, 'import json\n'), ((2629, 2647), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (2639, 2647), False, 'import json\n')] |
#!/usr/bin/env python
"""
Test that samples from the joint radius/energy distribution are actually
distributed like the model
"""
from icecube import icetray, MuonGun, phys_services
import numpy
from scipy import integrate
import warnings
import resource
rng = phys_services.I3GSLRandomService(0)
model = MuonGun.load_model('GaisserH4a_atmod12_SIBYLL')
# only look at bundles with at least 10 muons
model.flux.min_multiplicity = 10
def draw_bundle(rng, flux):
"""
Generate a bundle impact point, zenith angle, and multiplicity proportional
to the total flux
"""
max_flux = float(flux(1.5, 1., flux.min_multiplicity))
while True:
depth = rng.uniform(1.5, 2.5)
ct = rng.uniform(0, 1)
m = flux.min_multiplicity + rng.integer(flux.max_multiplicity-flux.min_multiplicity)
if rng.uniform(0, max_flux) < float(flux(depth, ct, m)):
return depth, ct, m
def sample_energy(edist, depth, ct, m, nsamples=10000):
# bins that will have roughly equal contents
rbins = numpy.array([0, 1, 2, 3, 4, 6, 10, 15, 250])
powerlaw = MuonGun.OffsetPowerLaw(4, 1e3, edist.min, edist.max)
ebins = powerlaw.isf(numpy.linspace(0, 1, 21)[::-1])
start = resource.getrusage(resource.RUSAGE_SELF)
samples = edist.generate(rng, depth, ct, m, nsamples)
end = resource.getrusage(resource.RUSAGE_SELF)
dt = end.ru_utime - start.ru_utime
icetray.logging.log_info("%.1f microsec/sample" % (1e6*dt/nsamples))
samples = numpy.array([(i.first, i.second) for i in samples])
bins, edges = numpy.histogramdd(samples, bins=(rbins, ebins))
assert bins.sum() == nsamples
empties = (bins < 10).sum()/float(bins.size)
if empties > 0.25:
warnings.warn('%d%% of bins have fewer than 10 entries' % (100*empties))
norm = nsamples/edist.integrate(depth, ct, m, 0, 250, edist.min, edist.max)
@numpy.vectorize
def integrate_model(rbin, ebin):
return edist.integrate(depth, ct, m, edges[0][rbin], edges[0][rbin+1], edges[1][ebin], edges[1][ebin+1])
i, j = numpy.meshgrid(range(len(rbins)-1), range(len(ebins)-1), indexing='ij')
mu = norm*integrate_model(i.T, j.T)
chi2 = (bins.T - mu)**2/mu
return samples, chi2.sum(), bins.size
for i in xrange(10):
depth, ct, m = draw_bundle(rng, model.flux)
samples, chi2, ndof = sample_energy(model.energy, depth, ct, m, nsamples=10000)
icetray.logging.log_info('depth=%.1f kmwe, cos(theta)=%.2f, m=%d, chi2/ndof = %.2f' % (depth, ct, m, chi2/ndof))
assert chi2/ndof < 1.5, "Samples follow model"
| [
"icecube.icetray.logging.log_info",
"icecube.phys_services.I3GSLRandomService",
"numpy.histogramdd",
"resource.getrusage",
"numpy.array",
"numpy.linspace",
"icecube.MuonGun.OffsetPowerLaw",
"warnings.warn",
"icecube.MuonGun.load_model"
] | [((265, 300), 'icecube.phys_services.I3GSLRandomService', 'phys_services.I3GSLRandomService', (['(0)'], {}), '(0)\n', (297, 300), False, 'from icecube import icetray, MuonGun, phys_services\n'), ((310, 357), 'icecube.MuonGun.load_model', 'MuonGun.load_model', (['"""GaisserH4a_atmod12_SIBYLL"""'], {}), "('GaisserH4a_atmod12_SIBYLL')\n", (328, 357), False, 'from icecube import icetray, MuonGun, phys_services\n'), ((1041, 1085), 'numpy.array', 'numpy.array', (['[0, 1, 2, 3, 4, 6, 10, 15, 250]'], {}), '([0, 1, 2, 3, 4, 6, 10, 15, 250])\n', (1052, 1085), False, 'import numpy\n'), ((1101, 1156), 'icecube.MuonGun.OffsetPowerLaw', 'MuonGun.OffsetPowerLaw', (['(4)', '(1000.0)', 'edist.min', 'edist.max'], {}), '(4, 1000.0, edist.min, edist.max)\n', (1123, 1156), False, 'from icecube import icetray, MuonGun, phys_services\n'), ((1228, 1268), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (1246, 1268), False, 'import resource\n'), ((1337, 1377), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (1355, 1377), False, 'import resource\n'), ((1421, 1499), 'icecube.icetray.logging.log_info', 'icetray.logging.log_info', (["('%.1f microsec/sample' % (1000000.0 * dt / nsamples))"], {}), "('%.1f microsec/sample' % (1000000.0 * dt / nsamples))\n", (1445, 1499), False, 'from icecube import icetray, MuonGun, phys_services\n'), ((1504, 1555), 'numpy.array', 'numpy.array', (['[(i.first, i.second) for i in samples]'], {}), '([(i.first, i.second) for i in samples])\n', (1515, 1555), False, 'import numpy\n'), ((1579, 1626), 'numpy.histogramdd', 'numpy.histogramdd', (['samples'], {'bins': '(rbins, ebins)'}), '(samples, bins=(rbins, ebins))\n', (1596, 1626), False, 'import numpy\n'), ((2434, 2557), 'icecube.icetray.logging.log_info', 'icetray.logging.log_info', (["('depth=%.1f kmwe, cos(theta)=%.2f, m=%d, chi2/ndof = %.2f' % (depth, ct, m,\n chi2 / ndof))"], {}), "(\n 'depth=%.1f kmwe, cos(theta)=%.2f, m=%d, chi2/ndof = %.2f' % (depth, ct,\n m, chi2 / ndof))\n", (2458, 2557), False, 'from icecube import icetray, MuonGun, phys_services\n'), ((1746, 1820), 'warnings.warn', 'warnings.warn', (["('%d%% of bins have fewer than 10 entries' % (100 * empties))"], {}), "('%d%% of bins have fewer than 10 entries' % (100 * empties))\n", (1759, 1820), False, 'import warnings\n'), ((1179, 1203), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(21)'], {}), '(0, 1, 21)\n', (1193, 1203), False, 'import numpy\n')] |
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import ks_2samp
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import silhouette_score
from sklearn.model_selection import KFold
def get_slope(y):
x = np.ones(y.shape).cumsum()
res = stats.linregress(x, y)
return res[0]
def pred_pressure(x, temp):
p0 = temp.iloc[0]["pressure"]
p1 = temp[(temp["time_step"] > 0.85) & (temp["time_step"] < 0.98)].mean()[
"pressure"
]
p2 = temp["pressure"].iloc[-1]
t_ref = x[3]
kp = x[0] / 10
ki = x[1] / 100
kd = x[2] / 1000
# c = x[3] * 100
err = p0 - p1
int_ = 0
de = (temp["pressure"].iloc[2] - temp["pressure"].iloc[0]) / temp["dt"].iloc[1]
error = [err]
# target = p1 - (p1 - p2) / (1 + np.exp(-c * (temp["time_step"] - t_ref)))
target = np.where(
temp["time_step"] < 1,
p1,
np.where(
temp["time_step"] < t_ref,
(p2 - t_ref * p1 + (p1 - p2) * temp["time_step"]) / (1 - t_ref),
p2,
),
)
for i in range(1, len(temp)):
dt = temp["dt"].iloc[i]
int_ = int_ + err * dt
dif_ = de / dt
de = kp * err + ki * int_ + kd * dif_ - err
err = err + de
error.append(err)
pred = target + error
return pred
def sum_squad(x, temp):
pred = pred_pressure(x, temp)
return ((pred - temp["pressure"]) ** 2).sum()
# sns.lineplot(data = temp,x='time_step', y = 'pred')
# sns.lineplot(data = temp,x='time_step', y = 'pressure')
# plt.show()
def eval_u_in_out_ks(df_model):
selected_breaths = df_model["breath_id"].unique()
temp = df_model[df_model["breath_id"].isin(selected_breaths)]
n_breaths = len(selected_breaths)
a = temp["u_in"].values.reshape(n_breaths, 80)
b = temp["u_out"].values.reshape(n_breaths, 80)
results = pd.DataFrame(
{
"breath_id": selected_breaths,
"ks_u": [ks_2samp(a[i], b[i])[0] for i in range(n_breaths)],
}
)
return results
def generate_folds(X, n_folds=5, shuffle=True, random_state=42):
X = X.copy()
kf = KFold(n_splits=n_folds, shuffle=shuffle, random_state=random_state)
folds = list(kf.split(X))
for fold_index in range(n_folds):
train_index, validation_index = folds[fold_index]
X.loc[X[X.index.isin(validation_index)].index, "fold"] = fold_index
return X["fold"].astype(int)
def get_breath_type(df, scaler, clusterer):
grouped = df.groupby("breath_id")
breaths = (
grouped.mean()
.drop(columns=["id", "time_step", "pressure"], errors="ignore")
.reset_index()
)
breaths = breaths.drop(columns="cluster", errors="ignore")
breaths_scaled = scaler.transform(breaths)
labels = clusterer.predict(breaths_scaled)
return labels
def preprocessing(df_model):
def log_exp_return(series):
return np.exp(np.log1p(series).diff(1).fillna(0))
# ---------------------------
mean_u_in = (
df_model.groupby("breath_id").agg(u_in_mean=("u_in", "mean")).reset_index()
)
# -----------------------------
ks_u = eval_u_in_out_ks(df_model)
teste = (
df_model.merge(mean_u_in, on="breath_id", how="left").merge(
ks_u, on="breath_id", how="left"
)
# .merge(slope,on="breath_id", how="left")
# .merge(pressure_init,on="breath_id", how="left")
)
# time diff
teste["time_diff"] = (
teste["time_step"].groupby(teste["breath_id"]).diff(1).fillna(0)
)
# u_in parameter
teste["u_in_ratio"] = (
teste["u_in"].groupby(teste["breath_id"]).apply(log_exp_return)
)
teste["last_value_u_in"] = (
teste["u_in"].groupby(teste["breath_id"]).transform("last")
)
teste["first_value_u_in"] = (
teste["u_in"].groupby(teste["breath_id"]).transform("first")
)
# u_in area
teste["area"] = teste["time_step"] * teste["u_in"]
teste["area"] = teste.groupby("breath_id")["area"].cumsum()
teste["u_in_cumsum"] = (teste["u_in"]).groupby(teste["breath_id"]).cumsum()
# u_in shift change
for i in np.arange(1, 5, 1):
teste[f"u_in_lag_fwrd{i}"] = (
teste["u_in"].groupby(teste["breath_id"]).shift(i).fillna(0)
)
teste[f"u_in_lag_back{i}"] = (
teste["u_in"].groupby(teste["breath_id"]).shift(int(-i)).fillna(0)
)
# R, C parameter
teste["RC"] = teste["C"] * teste["R"]
teste["R/C"] = teste["R"] / teste["C"]
teste["C/R"] = teste["C"] / teste["R"]
#teste["R"] = teste["R"].astype("category")
#teste["C"] = teste["C"].astype("category")
#teste["RC"] = teste["RC"].astype("category")
#teste["R/C"] = teste["R/C"].astype("category")
#teste["C/R"] = teste["C/R"].astype("category")
# for col in teste.dtypes[teste.dtypes == "category"].index:
# teste = pd.concat([teste.drop(columns=col,errors='ignore'),pd.get_dummies(teste[col], prefix=col)], axis=1)
return teste
class BreathClusterer:
def __init__(
self,
scaler=StandardScaler(),
n_clusters_list=range(4, 10),
verbose=1,
**kwargs,
):
self.scaler = scaler
self.n_clusters_list = n_clusters_list
self.verbose = verbose
self.kmean_parameters = kwargs
self.best_score = -1
self.clusterer = KMeans(**kwargs)
def preprocess(self, X, y=None):
grouped = X.groupby("breath_id")
breaths = (
grouped.mean()
.drop(columns=["id", "time_step", "pressure"], errors="ignore")
.reset_index()
)
breaths = breaths.drop(columns="cluster", errors="ignore")
return breaths
def transform(self, X, y=None):
breaths = self.preprocess(X)
breaths_scaled = self.scaler.transform(breaths)
return breaths_scaled
def predict(self, X, y=None):
X = X.copy()
breaths = self.preprocess(X)
breaths_scaled = self.scaler.transform(breaths)
labels_prediction = self.clusterer.predict(breaths_scaled)
breaths["breath_type"] = labels_prediction
return breaths[["breath_id", "breath_type"]]
def fit(self, X, y=None):
breaths = self.preprocess(X)
self.scaler.fit(breaths)
breaths_scaled = self.scaler.transform(breaths)
n_cluster_scores = []
for n_clusters in self.n_clusters_list:
pars = self.clusterer.get_params()
pars.update(dict(n_clusters=n_clusters))
temp_clusterer = self.clusterer.__class__(**pars)
temp_clusterer.fit(breaths_scaled)
silhouette = silhouette_score(breaths_scaled, temp_clusterer.labels_)
if silhouette > self.best_score:
self.best_score = silhouette
self.clusterer = temp_clusterer
n_cluster_scores.append({"n_cluster": n_clusters, "score": silhouette})
if self.verbose > 0:
print(
"n_cluster: {} | silhouette: {:.3f}".format(n_clusters, silhouette)
)
return self
| [
"sklearn.cluster.KMeans",
"scipy.stats.linregress",
"numpy.ones",
"numpy.where",
"scipy.stats.ks_2samp",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.KFold",
"sklearn.metrics.silhouette_score",
"numpy.arange",
"numpy.log1p"
] | [((348, 370), 'scipy.stats.linregress', 'stats.linregress', (['x', 'y'], {}), '(x, y)\n', (364, 370), False, 'from scipy import stats\n'), ((2237, 2304), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_folds', 'shuffle': 'shuffle', 'random_state': 'random_state'}), '(n_splits=n_folds, shuffle=shuffle, random_state=random_state)\n', (2242, 2304), False, 'from sklearn.model_selection import KFold\n'), ((4265, 4283), 'numpy.arange', 'np.arange', (['(1)', '(5)', '(1)'], {}), '(1, 5, 1)\n', (4274, 4283), True, 'import numpy as np\n'), ((987, 1096), 'numpy.where', 'np.where', (["(temp['time_step'] < t_ref)", "((p2 - t_ref * p1 + (p1 - p2) * temp['time_step']) / (1 - t_ref))", 'p2'], {}), "(temp['time_step'] < t_ref, (p2 - t_ref * p1 + (p1 - p2) * temp[\n 'time_step']) / (1 - t_ref), p2)\n", (995, 1096), True, 'import numpy as np\n'), ((5218, 5234), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5232, 5234), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((5518, 5534), 'sklearn.cluster.KMeans', 'KMeans', ([], {}), '(**kwargs)\n', (5524, 5534), False, 'from sklearn.cluster import KMeans\n'), ((311, 327), 'numpy.ones', 'np.ones', (['y.shape'], {}), '(y.shape)\n', (318, 327), True, 'import numpy as np\n'), ((6832, 6888), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['breaths_scaled', 'temp_clusterer.labels_'], {}), '(breaths_scaled, temp_clusterer.labels_)\n', (6848, 6888), False, 'from sklearn.metrics import silhouette_score\n'), ((2054, 2074), 'scipy.stats.ks_2samp', 'ks_2samp', (['a[i]', 'b[i]'], {}), '(a[i], b[i])\n', (2062, 2074), False, 'from scipy.stats import ks_2samp\n'), ((3035, 3051), 'numpy.log1p', 'np.log1p', (['series'], {}), '(series)\n', (3043, 3051), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Pocket SDR Python AP - Plot PSD and histgrams of digital IF data
#
# Author:
# T.TAKASU
#
# History:
# 2021-10-20 0.1 new
# 2021-12-01 1.0 rename pocket_plot.py -> pocket_psd.py
# add option -h
# 2021-12-10 1.1 improve plotting time
# 2022-01-10 1.2 add OFFSET and SIGMA in histgram plot
#
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import sdr_func
# show usage --------------------------------------------------------------------
def show_usage():
print('Usage: pocket_psd.py [-t tint] [-f freq] [-IQ] [-h] [-n NFFT] [-sdr sdrname] file')
exit()
# plot PSD ---------------------------------------------------------------------
def plot_psd(fig, rect, IQ, fs, fc, bc):
yl = [-80, -45]
if IQ == 1: # I
xl = [0, fs * 0.5]
else: # IQ
xl = [-fs * 0.5, fs * 0.5]
ax = fig.add_axes(rect, facecolor=bc)
xi = 1e6 if xl[1] - xl[0] < 15e6 else 2e6
xt = np.arange(np.floor(xl[0] / xi) * xi, xl[1] + xi, xi)
yt = np.arange(yl[0], yl[1] + 5, 5)
plt.xticks(xt, ("%.0f" % (x * 1e-6) for x in xt))
plt.yticks(yt)
ax.set_xlim(xl)
ax.set_ylim(yl)
ax.grid(True, lw=0.4)
p = ax.text(0.97, 0.97, '', ha='right', va='top', color=fc, transform=ax.transAxes)
return ax, p
# update PSD -------------------------------------------------------------------
def update_psd(ax, p, data, IQ, fs, time, N, fc):
for line in ax.get_lines():
line.remove()
if IQ == 2: # IQ
N = int(N / 2)
plt.sca(ax)
plt.psd(data, Fs=fs, NFFT=N, c=fc, lw=0.3)
p.set_text('Fs = %6.3f MHz\nT= %7.3f s' % (fs / 1e6, time))
ax.set_xlabel('Frequency (MHz)')
# plot histgram ----------------------------------------------------------------
def plot_hist_d(fig, rect, text, fc, bc):
ax = fig.add_axes(rect, facecolor=bc)
ax.set_xticks(np.arange(-5, 6, 1))
ax.set_xlim([-5, 5])
ax.set_ylim([0, 0.5])
ax.tick_params(labelleft=False)
if text == 'I':
ax.tick_params(labelbottom=False)
ax.grid(True, lw=0.4)
ax.text(0.07, 0.935, text, ha='center', va='top', color=fc,
transform=ax.transAxes)
p = ax.text(0.95, 0.935, '', ha='right', va='top', color=fc,
transform=ax.transAxes)
return ax, p
# plot histgrams ---------------------------------------------------------------
def plot_hist(fig, rect, fc, bc):
h = rect[3] / 2 - 0.02
rect1 = [rect[0], rect[1] + h + 0.04, rect[2], h]
rect2 = [rect[0], rect[1], rect[2], h]
ax1, p1 = plot_hist_d(fig, rect1, 'I', fc, bc)
ax2, p2 = plot_hist_d(fig, rect2, 'Q', fc, bc)
ax2.set_xlabel('Quantized Value')
return (ax1, ax2), (p1, p2)
# update histgram --------------------------------------------------------------
def update_hist_d(ax, p, data, fc):
for q in ax.patches:
q.remove()
if len(data) > 0:
bins = np.arange(-5.5, 6.5, 1)
plt.sca(ax)
plt.hist(data, bins=bins, density=True, rwidth=0.7, color=fc)
p.set_text('OFFSET = %.3f\nSIGMA = %.3f' % (np.mean(data), np.std(data)))
# update histgrams -------------------------------------------------------------
def update_hist(ax, p, data, IQ, fc):
if IQ == 1: # I
update_hist_d(ax[0], p[0], data.real, fc)
update_hist_d(ax[1], p[1], [], fc)
else: # IQ
update_hist_d(ax[0], p[0], data.real, fc)
update_hist_d(ax[1], p[1], data.imag, fc)
#-------------------------------------------------------------------------------
#
# Synopsis
#
# pocket_psd.py [-t tint] [-f freq] [-IQ] [-h] [-n NFFT] file
#
# Description
#
# Plot PSD (power spectrum density) and histgram of input digital IF data.
#
# Options ([]: default)
#
# -t tint
# Time interval for PSD and histgram in seconds. [0.01]
#
# -f freq
# Sampling frequency of digital IF data in MHz. [24.000]
#
# -IQ
# I/Q sampling type of digital IF data. [no]
#
# -h
# Enable histgram plots. [no]
#
# -n NFFT
# Number of FFT data points for PSD. [4096]
#
# -sdr sdrname
# Specify SDR name: pocketsdr or bladerf [pocketsdr]
#
#
if __name__ == '__main__':
window = 'PocketSDR - POWER SPECTRAL DENSITY'
size = (9, 6)
tint = 0.01
fs = 24e6
IQ = 1
N = 4096
hist = 0
file = ''
fc = 'darkblue'
bc = 'w'
rect0 = [0.08, 0.09, 0.84, 0.85]
rect1 = [0.08, 0.09, 0.56, 0.85]
rect2 = [0.67, 0.09, 0.30, 0.85]
sdrname = 'pocketsdr'
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-t':
i += 1
tint = float(sys.argv[i])
elif sys.argv[i] == '-f':
i += 1
fs = float(sys.argv[i]) * 1e6
elif sys.argv[i] == '-n':
i += 1
N = int(sys.argv[i])
elif sys.argv[i] == '-IQ':
IQ = 2
elif sys.argv[i] == '-h':
hist = 1
elif sys.argv[i] == '-sdr':
i += 1
sdrname = sys.argv[i]
elif sys.argv[i][0] == '-':
show_usage()
else:
file = sys.argv[i];
i += 1
if file == '':
print('Specify input file.')
exit()
mpl.rcParams['toolbar'] = 'None';
mpl.rcParams['font.size'] = 9
fig = plt.figure(window, figsize=size)
ax0 = fig.add_axes(rect0)
ax0.axis('off')
ax0.set_title('Digital IF data: FILE = ' + file, fontsize=10)
if hist:
ax1, p1 = plot_psd(fig, rect1, IQ, fs, fc, bc)
ax2, p2 = plot_hist(fig, rect2, fc, bc)
else:
ax1, p1 = plot_psd(fig, rect0, IQ, fs, fc, bc)
try:
for i in range(0, 10000000):
data = sdr_func.read_data(file, fs, IQ, tint, toff=tint * i, sdrname=sdrname)
if plt.figure(window) != fig: # window closed
exit()
if len(data) >= int(fs * tint):
if hist:
update_psd(ax1, p1, data, IQ, fs, tint * i, N, fc)
update_hist(ax2, p2, data, IQ, fc)
else:
update_psd(ax1, p1, data, IQ, fs, tint * i, N, fc)
plt.pause(1e-3)
except KeyboardInterrupt:
exit()
| [
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.psd",
"numpy.floor",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"sdr_func.read_data",
"numpy.std",
"matplotlib.pyplot.pause",
"numpy.arange"
] | [((1060, 1090), 'numpy.arange', 'np.arange', (['yl[0]', '(yl[1] + 5)', '(5)'], {}), '(yl[0], yl[1] + 5, 5)\n', (1069, 1090), True, 'import numpy as np\n'), ((1095, 1145), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xt', "('%.0f' % (x * 1e-06) for x in xt)"], {}), "(xt, ('%.0f' % (x * 1e-06) for x in xt))\n", (1105, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1149, 1163), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yt'], {}), '(yt)\n', (1159, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1580), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (1576, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1627), 'matplotlib.pyplot.psd', 'plt.psd', (['data'], {'Fs': 'fs', 'NFFT': 'N', 'c': 'fc', 'lw': '(0.3)'}), '(data, Fs=fs, NFFT=N, c=fc, lw=0.3)\n', (1592, 1627), True, 'import matplotlib.pyplot as plt\n'), ((5347, 5379), 'matplotlib.pyplot.figure', 'plt.figure', (['window'], {'figsize': 'size'}), '(window, figsize=size)\n', (5357, 5379), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1932), 'numpy.arange', 'np.arange', (['(-5)', '(6)', '(1)'], {}), '(-5, 6, 1)\n', (1922, 1932), True, 'import numpy as np\n'), ((2930, 2953), 'numpy.arange', 'np.arange', (['(-5.5)', '(6.5)', '(1)'], {}), '(-5.5, 6.5, 1)\n', (2939, 2953), True, 'import numpy as np\n'), ((2962, 2973), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (2969, 2973), True, 'import matplotlib.pyplot as plt\n'), ((2982, 3043), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': 'bins', 'density': '(True)', 'rwidth': '(0.7)', 'color': 'fc'}), '(data, bins=bins, density=True, rwidth=0.7, color=fc)\n', (2990, 3043), True, 'import matplotlib.pyplot as plt\n'), ((1008, 1028), 'numpy.floor', 'np.floor', (['(xl[0] / xi)'], {}), '(xl[0] / xi)\n', (1016, 1028), True, 'import numpy as np\n'), ((5747, 5817), 'sdr_func.read_data', 'sdr_func.read_data', (['file', 'fs', 'IQ', 'tint'], {'toff': '(tint * i)', 'sdrname': 'sdrname'}), '(file, fs, IQ, tint, toff=tint * i, sdrname=sdrname)\n', (5765, 5817), False, 'import sdr_func\n'), ((6225, 6241), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (6234, 6241), True, 'import matplotlib.pyplot as plt\n'), ((5846, 5864), 'matplotlib.pyplot.figure', 'plt.figure', (['window'], {}), '(window)\n', (5856, 5864), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3109), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (3103, 3109), True, 'import numpy as np\n'), ((3111, 3123), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (3117, 3123), True, 'import numpy as np\n')] |
from __future__ import annotations
import numpy as np
import pandas as pd
import sklearn.linear_model
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, \
RidgeRegression, LassoRegression
from sklearn.linear_model import Lasso
from IMLearn.metrics.loss_functions import mean_square_error
from IMLearn.desent_methods.gradient_descent import GradientDescent
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
MIN_X = -1.2
MAX_X = 2
NORMAL_MEAN = 0
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
train_proportion = 2 / 3
f = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(start=MIN_X, stop=MAX_X, num=n_samples)
noiseless_y = f(X)
noised_y = f(X) + np.random.normal(loc=NORMAL_MEAN, scale=noise,
size=n_samples)
train_x, train_y, test_x, test_y = split_train_test(pd.DataFrame(X),
pd.Series(noised_y),
train_proportion)
train_x, train_y, test_x, test_y = train_x.to_numpy().reshape(
len(train_x)), \
train_y.to_numpy().reshape(
len(train_y)), test_x.to_numpy().reshape(
len(test_x)), \
test_y.to_numpy().reshape(len(test_y))
fig1 = go.Figure()
fig1.add_trace(
go.Scatter(x=X, y=noiseless_y, mode="markers+lines", name="real"))
fig1.add_trace(go.Scatter(x=test_x, y=test_y,
mode="markers", marker=dict(color='#ff6361'),
name="test"))
fig1.add_trace(go.Scatter(x=train_x, y=train_y,
mode="markers", marker=dict(color='#58508d'),
name="train"))
fig1.update_layout(title="Noise vs. Noiseless", xaxis_title="x",
yaxis_title="f(x)", height=700, width=1000)
fig1.show()
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
polynomial_degrees = 11
validation_error, training_error = [], []
for deg in range(polynomial_degrees):
ts, vs = cross_validate(PolynomialFitting(deg), train_x, train_y,
mean_square_error)
validation_error.append(vs)
training_error.append(ts)
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x=[i for i in range(polynomial_degrees)],
y=validation_error,
mode="markers", marker=dict(color='#ff6361'),
name="validation"))
fig2.add_trace(go.Scatter(x=[i for i in range(polynomial_degrees)],
y=training_error,
mode="markers", marker=dict(color='#58508d'),
name="training"))
fig2.update_layout(
title="Mean Error as Function of Polynom Degree",
xaxis_title="degree",
yaxis_title="validation error", height=700, width=1000)
fig2.show()
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
k_star = np.argmin(validation_error)
print("k-star: ", k_star)
p_reg = PolynomialFitting(k_star).fit(train_x, train_y)
loss = p_reg.loss(test_x, test_y)
print("loss on test set: ", loss)
def select_regularization_parameter(n_samples: int = 50,
n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
data = datasets.load_diabetes()
X, y = data.data, data.target
train_x, train_y, test_x, test_y = X[:50], y[:50], X[50:], y[50:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
lamds_range = np.linspace(0, 1, n_evaluations)
validation_error_lasso, validation_error_ridge = [], []
training_error_lasso, training_error_ridge = [], []
for lam in lamds_range:
lasso = sklearn.linear_model.Lasso(lam)
ridge = RidgeRegression(lam)
ts_lasso, vs_lasso = cross_validate(lasso, train_x, train_y,
mean_square_error)
ts_ridge, vs_ridge = cross_validate(ridge, train_x, train_y,
mean_square_error)
validation_error_ridge.append(vs_ridge)
training_error_ridge.append(ts_ridge)
validation_error_lasso.append(vs_lasso)
training_error_lasso.append(ts_lasso)
fig7 = make_subplots(rows=1, cols=2, shared_xaxes=True,
subplot_titles=("Lasso", "Ridge"))
fig7.add_trace(go.Scatter(x=lamds_range,
y=validation_error_lasso,
mode="markers+lines",
marker=dict(color='#ff6361'), name="validation"),
row=1, col=1)
fig7.add_trace(go.Scatter(x=lamds_range,
y=training_error_lasso,
mode="markers+lines",
marker=dict(color='#003f5c'), name="training"),
row=1, col=1)
fig7.add_trace(go.Scatter(x=lamds_range,
y=validation_error_ridge,
mode="markers+lines",
marker=dict(color='#ff6361'), name="validation"),
row=1, col=2)
fig7.add_trace(go.Scatter(x=lamds_range,
y=training_error_ridge,
mode="markers+lines",
marker=dict(color='#003f5c'), name="training"),
row=1, col=2)
fig7.update_layout(
title="Scoring in Different Models",
xaxis_title="lambda",
yaxis_title="scoring")
fig7.show()
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
best_ridge = lamds_range[np.argmin(validation_error_ridge)]
best_lasso = lamds_range[np.argmin(validation_error_lasso)]
print(f"regularization best: ridge-{best_ridge}, lasso-{best_lasso}")
lasso = sklearn.linear_model.Lasso(best_lasso)
ridge = RidgeRegression(lam=best_ridge)
linear = LinearRegression()
lasso.fit(train_x, train_y)
ridge.fit(train_x, train_y)
linear.fit(train_x, train_y)
lasso_loss = mean_square_error(lasso.predict(test_x), test_y)
ridge_loss = mean_square_error(ridge.predict(test_x), test_y)
linear_loss = linear.loss(test_x, test_y)
print(
f"Models errors: ridge-{ridge_loss}, lasso-{lasso_loss}, linear-{linear_loss}")
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_samples=1500, noise=10)
select_regularization_parameter()
| [
"numpy.random.normal",
"pandas.Series",
"plotly.subplots.make_subplots",
"pandas.DataFrame",
"IMLearn.model_selection.cross_validate",
"IMLearn.learners.regressors.RidgeRegression",
"plotly.graph_objects.Figure",
"numpy.linspace",
"IMLearn.learners.regressors.LinearRegression",
"sklearn.datasets.l... | [((1320, 1371), 'numpy.linspace', 'np.linspace', ([], {'start': 'MIN_X', 'stop': 'MAX_X', 'num': 'n_samples'}), '(start=MIN_X, stop=MAX_X, num=n_samples)\n', (1331, 1371), True, 'import numpy as np\n'), ((2102, 2113), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2111, 2113), True, 'import plotly.graph_objects as go\n'), ((3105, 3116), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (3114, 3116), True, 'import plotly.graph_objects as go\n'), ((3911, 3938), 'numpy.argmin', 'np.argmin', (['validation_error'], {}), '(validation_error)\n', (3920, 3938), True, 'import numpy as np\n'), ((4721, 4745), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', ([], {}), '()\n', (4743, 4745), False, 'from sklearn import datasets\n'), ((4984, 5016), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_evaluations'], {}), '(0, 1, n_evaluations)\n', (4995, 5016), True, 'import numpy as np\n'), ((5710, 5797), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'shared_xaxes': '(True)', 'subplot_titles': "('Lasso', 'Ridge')"}), "(rows=1, cols=2, shared_xaxes=True, subplot_titles=('Lasso',\n 'Ridge'))\n", (5723, 5797), False, 'from plotly.subplots import make_subplots\n'), ((7374, 7405), 'IMLearn.learners.regressors.RidgeRegression', 'RidgeRegression', ([], {'lam': 'best_ridge'}), '(lam=best_ridge)\n', (7389, 7405), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression, LassoRegression\n'), ((7419, 7437), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7435, 7437), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression, LassoRegression\n'), ((7847, 7864), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7861, 7864), True, 'import numpy as np\n'), ((1417, 1479), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'NORMAL_MEAN', 'scale': 'noise', 'size': 'n_samples'}), '(loc=NORMAL_MEAN, scale=noise, size=n_samples)\n', (1433, 1479), True, 'import numpy as np\n'), ((1576, 1591), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1588, 1591), True, 'import pandas as pd\n'), ((1649, 1668), 'pandas.Series', 'pd.Series', (['noised_y'], {}), '(noised_y)\n', (1658, 1668), True, 'import pandas as pd\n'), ((2142, 2207), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'X', 'y': 'noiseless_y', 'mode': '"""markers+lines"""', 'name': '"""real"""'}), "(x=X, y=noiseless_y, mode='markers+lines', name='real')\n", (2152, 2207), True, 'import plotly.graph_objects as go\n'), ((5225, 5245), 'IMLearn.learners.regressors.RidgeRegression', 'RidgeRegression', (['lam'], {}), '(lam)\n', (5240, 5245), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression, LassoRegression\n'), ((5275, 5333), 'IMLearn.model_selection.cross_validate', 'cross_validate', (['lasso', 'train_x', 'train_y', 'mean_square_error'], {}), '(lasso, train_x, train_y, mean_square_error)\n', (5289, 5333), False, 'from IMLearn.model_selection import cross_validate\n'), ((5407, 5465), 'IMLearn.model_selection.cross_validate', 'cross_validate', (['ridge', 'train_x', 'train_y', 'mean_square_error'], {}), '(ridge, train_x, train_y, mean_square_error)\n', (5421, 5465), False, 'from IMLearn.model_selection import cross_validate\n'), ((7137, 7170), 'numpy.argmin', 'np.argmin', (['validation_error_ridge'], {}), '(validation_error_ridge)\n', (7146, 7170), True, 'import numpy as np\n'), ((7201, 7234), 'numpy.argmin', 'np.argmin', (['validation_error_lasso'], {}), '(validation_error_lasso)\n', (7210, 7234), True, 'import numpy as np\n'), ((2930, 2952), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', (['deg'], {}), '(deg)\n', (2947, 2952), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression, LassoRegression\n'), ((3981, 4006), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', (['k_star'], {}), '(k_star)\n', (3998, 4006), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression, LassoRegression\n')] |
import numpy as np
from sample_statistics import sample_statistics as stats
class ArtifactsVector:
def __init__(self, _config, _artifacts):
self.config = _config
# Source recording stats
split_from_roots={x.key[0:-1]:0 for x in _artifacts}
for artifact in _artifacts:
split_from_roots[artifact.key[0:-1]] += 1
self.N_splits_per_root=[y for x,y in split_from_roots.items()]
# Audio Source
self.split_length_in_samples=np.array([artifact.source.n_samples for artifact in _artifacts])
self.split_length_in_seconds=np.array([artifact.source.n_seconds for artifact in _artifacts])
# Text Target
self.split_words=[artifact.target.tokens for artifact in _artifacts]
self.split_length_in_words=np.array([artifact.target.n_words for artifact in _artifacts])
self.split_length_in_graphemes=np.array([artifact.target.n_graphemes for artifact in _artifacts])
# Text in Audio
self.samples_per_word=self.split_length_in_samples/self.split_length_in_words
self.seconds_per_word=self.samples_per_word/self.config.sample_rate
self.samples_per_grapheme = self.split_length_in_samples/self.split_length_in_graphemes
self.seconds_per_grapheme = self.samples_per_grapheme/self.config.sample_rate
# Words
self.all_words=list(sorted(set([item for sublist in self.split_words for item in sublist])))
self.N_all_words=len(self.all_words)
# Graphemes
self.all_graphemes=list(sorted(set(''.join(self.all_words))))
self.N_all_graphemes=len(self.all_graphemes)
# Graphemes in Words
self.word_lengths_in_graphemes=[len(x) for x in self.all_words]
def sample_statistics(self):
R = []
R.extend(stats('Split Speech', 'Length in samples', self.split_length_in_samples))
R.extend(stats('Split Speech', 'Length in seconds', self.split_length_in_seconds))
R.extend(stats('Split Transcription', 'Length in words', self.split_length_in_words))
R.extend(stats('Split Transcription', 'Length in graphemes', self.split_length_in_graphemes))
R.extend(stats('Words', 'Length in samples', self.samples_per_word))
R.extend(stats('Words', 'Length in seconds', self.seconds_per_word))
R.extend(stats('Graphemes', 'Length in samples', self.samples_per_grapheme))
R.extend(stats('Graphemes', 'Length in seconds', self.seconds_per_grapheme))
return R
| [
"numpy.array",
"sample_statistics.sample_statistics"
] | [((492, 556), 'numpy.array', 'np.array', (['[artifact.source.n_samples for artifact in _artifacts]'], {}), '([artifact.source.n_samples for artifact in _artifacts])\n', (500, 556), True, 'import numpy as np\n'), ((594, 658), 'numpy.array', 'np.array', (['[artifact.source.n_seconds for artifact in _artifacts]'], {}), '([artifact.source.n_seconds for artifact in _artifacts])\n', (602, 658), True, 'import numpy as np\n'), ((793, 855), 'numpy.array', 'np.array', (['[artifact.target.n_words for artifact in _artifacts]'], {}), '([artifact.target.n_words for artifact in _artifacts])\n', (801, 855), True, 'import numpy as np\n'), ((895, 961), 'numpy.array', 'np.array', (['[artifact.target.n_graphemes for artifact in _artifacts]'], {}), '([artifact.target.n_graphemes for artifact in _artifacts])\n', (903, 961), True, 'import numpy as np\n'), ((1802, 1874), 'sample_statistics.sample_statistics', 'stats', (['"""Split Speech"""', '"""Length in samples"""', 'self.split_length_in_samples'], {}), "('Split Speech', 'Length in samples', self.split_length_in_samples)\n", (1807, 1874), True, 'from sample_statistics import sample_statistics as stats\n'), ((1893, 1965), 'sample_statistics.sample_statistics', 'stats', (['"""Split Speech"""', '"""Length in seconds"""', 'self.split_length_in_seconds'], {}), "('Split Speech', 'Length in seconds', self.split_length_in_seconds)\n", (1898, 1965), True, 'from sample_statistics import sample_statistics as stats\n'), ((1984, 2059), 'sample_statistics.sample_statistics', 'stats', (['"""Split Transcription"""', '"""Length in words"""', 'self.split_length_in_words'], {}), "('Split Transcription', 'Length in words', self.split_length_in_words)\n", (1989, 2059), True, 'from sample_statistics import sample_statistics as stats\n'), ((2078, 2166), 'sample_statistics.sample_statistics', 'stats', (['"""Split Transcription"""', '"""Length in graphemes"""', 'self.split_length_in_graphemes'], {}), "('Split Transcription', 'Length in graphemes', self.\n split_length_in_graphemes)\n", (2083, 2166), True, 'from sample_statistics import sample_statistics as stats\n'), ((2180, 2238), 'sample_statistics.sample_statistics', 'stats', (['"""Words"""', '"""Length in samples"""', 'self.samples_per_word'], {}), "('Words', 'Length in samples', self.samples_per_word)\n", (2185, 2238), True, 'from sample_statistics import sample_statistics as stats\n'), ((2257, 2315), 'sample_statistics.sample_statistics', 'stats', (['"""Words"""', '"""Length in seconds"""', 'self.seconds_per_word'], {}), "('Words', 'Length in seconds', self.seconds_per_word)\n", (2262, 2315), True, 'from sample_statistics import sample_statistics as stats\n'), ((2334, 2400), 'sample_statistics.sample_statistics', 'stats', (['"""Graphemes"""', '"""Length in samples"""', 'self.samples_per_grapheme'], {}), "('Graphemes', 'Length in samples', self.samples_per_grapheme)\n", (2339, 2400), True, 'from sample_statistics import sample_statistics as stats\n'), ((2419, 2485), 'sample_statistics.sample_statistics', 'stats', (['"""Graphemes"""', '"""Length in seconds"""', 'self.seconds_per_grapheme'], {}), "('Graphemes', 'Length in seconds', self.seconds_per_grapheme)\n", (2424, 2485), True, 'from sample_statistics import sample_statistics as stats\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
from torch.autograd import Function
import sys
import ctypes
lib=ctypes.cdll.LoadLibrary("/root/Pointnet_Pointnet2_pytorch/libmorton/encode.so")
lib.encode.restype=ctypes.c_uint64
def timeit(tag, t):
print("{}: {}s".format(tag, time() - t))
return time()
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
B, N, C = xyz.shape
S = npoint
fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]
new_xyz = index_points(xyz, fps_idx)
idx = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
else:
new_points = grouped_xyz_norm
if returnfps:
return new_xyz, new_points, grouped_xyz, fps_idx
else:
return new_xyz, new_points
def sample_and_group_all(xyz, points):
"""
Input:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, 3]
new_points: sampled points data, [B, 1, N, 3+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = torch.zeros(B, 1, C).to(device)
grouped_xyz = xyz.view(B, 1, N, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
return new_xyz, new_points
def naive_sample(xyz, npoint:int):
"""
Inputs: xyz [ B, N, 3]
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
B=xyz.shape[0]
return_dix=torch.ones((B,npoint),dtype=torch.long,device=xyz.device)
for b in range(B):
pc=xyz[b]
shift_pc=pc.clone()
shift_pc[1:]=pc[0:-1]
distance=(shift_pc-pc)**2
distance=distance.sum(dim=-1)
idx=torch.topk(distance,npoint)[1]
return_dix[b]=idx
return return_dix
def seq_continuous_group(xyz,sample_idx,nsample):
"""
Input:
nsample: max sample number in local region
sample_idx: the index generated by previous sample step
xyz: all points, [B, N, 3]
Return:
group_idx: grouped points index, [B, npoint, nsample]
"""
B,N,_=xyz.shape
_,npoint=sample_idx.shape
idx=torch.zeros((B,npoint,nsample),dtype=torch.long,device=xyz.device)
for i in range(B):
for j in range(npoint):
#check range
start=sample_idx[i,j]
if (start+nsample)<N:
idx[i,j]=torch.arange(start,start+nsample)
else:
# num_overflow=start+nsample-N+1
# num_in=N-num_overflow
# idx[i,j][:num_in]=torch.arange(start,start+num_in)
# idx[i,j][num_in:]=N-1
idx[i,j]=torch.arange(N-nsample,N)
tmp=idx[:,:,nsample//2]
idx[:,:,nsample//2]=idx[:,:,0]
idx[:,:,0]=tmp
return idx
def morton_sample_and_group(npoint, radius, nsample, xyz, points):
"""
Input:
xyz: Z-order sorted points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
B, N, C = xyz.shape
S = npoint
sample_idx=naive_sample(xyz,npoint)
new_xyz=index_points(xyz,sample_idx)
idx=seq_continuous_group(xyz,sample_idx,nsample)
grouped_xyz=index_points(xyz,idx)
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
else:
new_points = grouped_xyz_norm
return new_xyz, new_points
def z_order_encode(inputs):
shape=list(inputs.shape)
shape[-1]=1
code=np.ndarray(shape,dtype=np.uint64)
for i in range(shape[0]):
for j in range(shape[1]):
x,y,z=inputs[i,j].tolist()
code[i,j]=lib.encode(x,y,z)
return code.astype(np.float64)
class Z_order_sorting(Function):
@staticmethod
def forward(ctx,xyz,normal):
data=((xyz+2)*4096).cpu().numpy()
data = data.astype(dtype=np.uint32)
assert data.shape[-1] == 3
z_order_code=torch.from_numpy(z_order_encode(data)).cuda()
_,idx=torch.sort(z_order_code,dim=1)
batch_idx=torch.arange(xyz.shape[0]).reshape(xyz.shape[0],1,1)
return xyz[batch_idx,idx].squeeze(2),normal[batch_idx,idx].squeeze(2)
@staticmethod
def backward(ctx,grad_out):
return ()
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointNetSetAbstraction_SA(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction_SA, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
# Z-order sorting
xyz, points = Z_order_sorting.apply(xyz, points)
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = morton_sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class ChannelAttention(nn.Module):
def __init__(self):
super(ChannelAttention, self).__init__()
self.fc1 = nn.Conv1d(1, 16, 1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv1d(16, 1, 1)
self. sigmoid = nn.Sigmoid()
def forward(self, x):
avg_pool_out = torch.mean(x,dim=1,keepdim=True)
avg_out = self.fc2(self.relu1(self.fc1(avg_pool_out)))
max_pool_out ,_= torch.max(x,dim=1,keepdim=True)
max_out = self.fc2(self.relu1(self.fc1(max_pool_out)))
out = avg_out + max_out
out = self.sigmoid(out)
return out
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.permute(0, 2, 1)
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
out = self.sigmoid(x)
out = out.permute(0, 2, 1)
return out
class PointNetSetAbstraction_AM(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction_AM, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
self.ca=ChannelAttention()
self.sa=SpatialAttention()
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
Wc=self.ca(points)
points=points*Wc
Ws=self.sa(points)
points=points*Ws
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointNetSetAbstractionMsg(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):
super(PointNetSetAbstractionMsg, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
B, N, C = xyz.shape
S = self.npoint
new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = query_ball_point(radius, K, xyz, new_xyz)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)))
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetSetAbstractionMsg_SA(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list,zorder_sort=True):
super(PointNetSetAbstractionMsg_SA, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
self.zorder_sort=zorder_sort
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.zorder_sort:
xyz, points = Z_order_sorting.apply(xyz, points)
B, N, C = xyz.shape
S = self.npoint
sample_idx=naive_sample(xyz, S)
new_xyz = index_points(xyz, sample_idx)
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = seq_continuous_group(xyz, sample_idx,K)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)))
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetSetAbstractionMsg_AM(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):
super(PointNetSetAbstractionMsg_AM, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
self.ca = ChannelAttention()
self.sa = SpatialAttention()
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
Wc=self.ca(points)
points = points*Wc
Ws=self.sa(points)
points = points*Ws
B, N, C = xyz.shape
S = self.npoint
new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = query_ball_point(radius, K, xyz, new_xyz)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)))
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
return new_points
| [
"torch.nn.ReLU",
"torch.max",
"torch.nn.BatchNorm1d",
"torch.sum",
"torch.arange",
"numpy.mean",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"ctypes.cdll.LoadLibrary",
"torch.nn.ModuleList",
"torch.mean",
"torch.randint",
"torch.sort",
"torch.topk",
"time.time",
"torch.cat",
"torch.n... | [((174, 253), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['"""/root/Pointnet_Pointnet2_pytorch/libmorton/encode.so"""'], {}), "('/root/Pointnet_Pointnet2_pytorch/libmorton/encode.so')\n", (197, 253), False, 'import ctypes\n'), ((366, 372), 'time.time', 'time', ([], {}), '()\n', (370, 372), False, 'from time import time\n'), ((431, 450), 'numpy.mean', 'np.mean', (['pc'], {'axis': '(0)'}), '(pc, axis=0)\n', (438, 450), True, 'import numpy as np\n'), ((5344, 5404), 'torch.ones', 'torch.ones', (['(B, npoint)'], {'dtype': 'torch.long', 'device': 'xyz.device'}), '((B, npoint), dtype=torch.long, device=xyz.device)\n', (5354, 5404), False, 'import torch\n'), ((6024, 6094), 'torch.zeros', 'torch.zeros', (['(B, npoint, nsample)'], {'dtype': 'torch.long', 'device': 'xyz.device'}), '((B, npoint, nsample), dtype=torch.long, device=xyz.device)\n', (6035, 6094), False, 'import torch\n'), ((7630, 7664), 'numpy.ndarray', 'np.ndarray', (['shape'], {'dtype': 'np.uint64'}), '(shape, dtype=np.uint64)\n', (7640, 7664), True, 'import numpy as np\n'), ((2510, 2546), 'torch.sum', 'torch.sum', (['((xyz - centroid) ** 2)', '(-1)'], {}), '((xyz - centroid) ** 2, -1)\n', (2519, 2546), False, 'import torch\n'), ((4304, 4357), 'torch.cat', 'torch.cat', (['[grouped_xyz_norm, grouped_points]'], {'dim': '(-1)'}), '([grouped_xyz_norm, grouped_points], dim=-1)\n', (4313, 4357), False, 'import torch\n'), ((7384, 7437), 'torch.cat', 'torch.cat', (['[grouped_xyz_norm, grouped_points]'], {'dim': '(-1)'}), '([grouped_xyz_norm, grouped_points], dim=-1)\n', (7393, 7437), False, 'import torch\n'), ((8129, 8160), 'torch.sort', 'torch.sort', (['z_order_code'], {'dim': '(1)'}), '(z_order_code, dim=1)\n', (8139, 8160), False, 'import torch\n'), ((8666, 8681), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8679, 8681), True, 'import torch.nn as nn\n'), ((8705, 8720), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8718, 8720), True, 'import torch.nn as nn\n'), ((10423, 10438), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (10436, 10438), True, 'import torch.nn as nn\n'), ((10462, 10477), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (10475, 10477), True, 'import torch.nn as nn\n'), ((12102, 12121), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(16)', '(1)'], {}), '(1, 16, 1)\n', (12111, 12121), True, 'import torch.nn as nn\n'), ((12143, 12152), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12150, 12152), True, 'import torch.nn as nn\n'), ((12172, 12191), 'torch.nn.Conv1d', 'nn.Conv1d', (['(16)', '(1)', '(1)'], {}), '(16, 1, 1)\n', (12181, 12191), True, 'import torch.nn as nn\n'), ((12216, 12228), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (12226, 12228), True, 'import torch.nn as nn\n'), ((12279, 12313), 'torch.mean', 'torch.mean', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (12289, 12313), False, 'import torch\n'), ((12400, 12433), 'torch.max', 'torch.max', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (12409, 12433), False, 'import torch\n'), ((12837, 12894), 'torch.nn.Conv1d', 'nn.Conv1d', (['(2)', '(1)', 'kernel_size'], {'padding': 'padding', 'bias': '(False)'}), '(2, 1, kernel_size, padding=padding, bias=False)\n', (12846, 12894), True, 'import torch.nn as nn\n'), ((12918, 12930), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (12928, 12930), True, 'import torch.nn as nn\n'), ((13006, 13040), 'torch.mean', 'torch.mean', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (13016, 13040), False, 'import torch\n'), ((13062, 13095), 'torch.max', 'torch.max', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (13071, 13095), False, 'import torch\n'), ((13108, 13144), 'torch.cat', 'torch.cat', (['[avg_out, max_out]'], {'dim': '(1)'}), '([avg_out, max_out], dim=1)\n', (13117, 13144), False, 'import torch\n'), ((13549, 13564), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (13562, 13564), True, 'import torch.nn as nn\n'), ((13588, 13603), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (13601, 13603), True, 'import torch.nn as nn\n'), ((15523, 15538), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15536, 15538), True, 'import torch.nn as nn\n'), ((15564, 15579), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15577, 15579), True, 'import torch.nn as nn\n'), ((17602, 17635), 'torch.cat', 'torch.cat', (['new_points_list'], {'dim': '(1)'}), '(new_points_list, dim=1)\n', (17611, 17635), False, 'import torch\n'), ((18021, 18036), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (18034, 18036), True, 'import torch.nn as nn\n'), ((18062, 18077), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (18075, 18077), True, 'import torch.nn as nn\n'), ((20246, 20279), 'torch.cat', 'torch.cat', (['new_points_list'], {'dim': '(1)'}), '(new_points_list, dim=1)\n', (20255, 20279), False, 'import torch\n'), ((20649, 20664), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (20662, 20664), True, 'import torch.nn as nn\n'), ((20690, 20705), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (20703, 20705), True, 'import torch.nn as nn\n'), ((22926, 22959), 'torch.cat', 'torch.cat', (['new_points_list'], {'dim': '(1)'}), '(new_points_list, dim=1)\n', (22935, 22959), False, 'import torch\n'), ((23175, 23190), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (23188, 23190), True, 'import torch.nn as nn\n'), ((23214, 23229), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (23227, 23229), True, 'import torch.nn as nn\n'), ((497, 520), 'numpy.sum', 'np.sum', (['(pc ** 2)'], {'axis': '(1)'}), '(pc ** 2, axis=1)\n', (503, 520), True, 'import numpy as np\n'), ((1177, 1200), 'torch.sum', 'torch.sum', (['(src ** 2)', '(-1)'], {}), '(src ** 2, -1)\n', (1186, 1200), False, 'import torch\n'), ((1227, 1250), 'torch.sum', 'torch.sum', (['(dst ** 2)', '(-1)'], {}), '(dst ** 2, -1)\n', (1236, 1250), False, 'import torch\n'), ((2130, 2170), 'torch.zeros', 'torch.zeros', (['B', 'npoint'], {'dtype': 'torch.long'}), '(B, npoint, dtype=torch.long)\n', (2141, 2170), False, 'import torch\n'), ((2247, 2290), 'torch.randint', 'torch.randint', (['(0)', 'N', '(B,)'], {'dtype': 'torch.long'}), '(0, N, (B,), dtype=torch.long)\n', (2260, 2290), False, 'import torch\n'), ((2322, 2355), 'torch.arange', 'torch.arange', (['B'], {'dtype': 'torch.long'}), '(B, dtype=torch.long)\n', (2334, 2355), False, 'import torch\n'), ((2633, 2656), 'torch.max', 'torch.max', (['distance', '(-1)'], {}), '(distance, -1)\n', (2642, 2656), False, 'import torch\n'), ((4905, 4925), 'torch.zeros', 'torch.zeros', (['B', '(1)', 'C'], {}), '(B, 1, C)\n', (4916, 4925), False, 'import torch\n'), ((5585, 5613), 'torch.topk', 'torch.topk', (['distance', 'npoint'], {}), '(distance, npoint)\n', (5595, 5613), False, 'import torch\n'), ((10021, 10045), 'torch.max', 'torch.max', (['new_points', '(2)'], {}), '(new_points, 2)\n', (10030, 10045), False, 'import torch\n'), ((11868, 11892), 'torch.max', 'torch.max', (['new_points', '(2)'], {}), '(new_points, 2)\n', (11877, 11892), False, 'import torch\n'), ((15096, 15120), 'torch.max', 'torch.max', (['new_points', '(2)'], {}), '(new_points, 2)\n', (15105, 15120), False, 'import torch\n'), ((15639, 15654), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15652, 15654), True, 'import torch.nn as nn\n'), ((15673, 15688), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15686, 15688), True, 'import torch.nn as nn\n'), ((18174, 18189), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (18187, 18189), True, 'import torch.nn as nn\n'), ((18208, 18223), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (18221, 18223), True, 'import torch.nn as nn\n'), ((20839, 20854), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (20852, 20854), True, 'import torch.nn as nn\n'), ((20873, 20888), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (20886, 20888), True, 'import torch.nn as nn\n'), ((24353, 24395), 'torch.sum', 'torch.sum', (['dist_recip'], {'dim': '(2)', 'keepdim': '(True)'}), '(dist_recip, dim=2, keepdim=True)\n', (24362, 24395), False, 'import torch\n'), ((24645, 24694), 'torch.cat', 'torch.cat', (['[points1, interpolated_points]'], {'dim': '(-1)'}), '([points1, interpolated_points], dim=-1)\n', (24654, 24694), False, 'import torch\n'), ((342, 348), 'time.time', 'time', ([], {}), '()\n', (346, 348), False, 'from time import time\n'), ((2197, 2213), 'torch.ones', 'torch.ones', (['B', 'N'], {}), '(B, N)\n', (2207, 2213), False, 'import torch\n'), ((6264, 6300), 'torch.arange', 'torch.arange', (['start', '(start + nsample)'], {}), '(start, start + nsample)\n', (6276, 6300), False, 'import torch\n'), ((6539, 6567), 'torch.arange', 'torch.arange', (['(N - nsample)', 'N'], {}), '(N - nsample, N)\n', (6551, 6567), False, 'import torch\n'), ((8178, 8204), 'torch.arange', 'torch.arange', (['xyz.shape[0]'], {}), '(xyz.shape[0])\n', (8190, 8204), False, 'import torch\n'), ((8821, 8860), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (8830, 8860), True, 'import torch.nn as nn\n'), ((8894, 8921), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (8908, 8921), True, 'import torch.nn as nn\n'), ((10578, 10617), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (10587, 10617), True, 'import torch.nn as nn\n'), ((10651, 10678), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (10665, 10678), True, 'import torch.nn as nn\n'), ((13774, 13813), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (13783, 13813), True, 'import torch.nn as nn\n'), ((13848, 13875), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (13862, 13875), True, 'import torch.nn as nn\n'), ((17010, 17058), 'torch.cat', 'torch.cat', (['[grouped_points, grouped_xyz]'], {'dim': '(-1)'}), '([grouped_points, grouped_xyz], dim=-1)\n', (17019, 17058), False, 'import torch\n'), ((17437, 17465), 'torch.max', 'torch.max', (['grouped_points', '(2)'], {}), '(grouped_points, 2)\n', (17446, 17465), False, 'import torch\n'), ((19654, 19702), 'torch.cat', 'torch.cat', (['[grouped_points, grouped_xyz]'], {'dim': '(-1)'}), '([grouped_points, grouped_xyz], dim=-1)\n', (19663, 19702), False, 'import torch\n'), ((20081, 20109), 'torch.max', 'torch.max', (['grouped_points', '(2)'], {}), '(grouped_points, 2)\n', (20090, 20109), False, 'import torch\n'), ((22334, 22382), 'torch.cat', 'torch.cat', (['[grouped_points, grouped_xyz]'], {'dim': '(-1)'}), '([grouped_points, grouped_xyz], dim=-1)\n', (22343, 22382), False, 'import torch\n'), ((22761, 22789), 'torch.max', 'torch.max', (['grouped_points', '(2)'], {}), '(grouped_points, 2)\n', (22770, 22789), False, 'import torch\n'), ((23330, 23369), 'torch.nn.Conv1d', 'nn.Conv1d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (23339, 23369), True, 'import torch.nn as nn\n'), ((23403, 23430), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_channel'], {}), '(out_channel)\n', (23417, 23430), True, 'import torch.nn as nn\n'), ((15804, 15843), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (15813, 15843), True, 'import torch.nn as nn\n'), ((15872, 15899), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (15886, 15899), True, 'import torch.nn as nn\n'), ((18339, 18378), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (18348, 18378), True, 'import torch.nn as nn\n'), ((18407, 18434), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (18421, 18434), True, 'import torch.nn as nn\n'), ((21004, 21043), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel', 'out_channel', '(1)'], {}), '(last_channel, out_channel, 1)\n', (21013, 21043), True, 'import torch.nn as nn\n'), ((21072, 21099), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (21086, 21099), True, 'import torch.nn as nn\n'), ((1702, 1735), 'torch.arange', 'torch.arange', (['B'], {'dtype': 'torch.long'}), '(B, dtype=torch.long)\n', (1714, 1735), False, 'import torch\n'), ((3087, 3120), 'torch.arange', 'torch.arange', (['N'], {'dtype': 'torch.long'}), '(N, dtype=torch.long)\n', (3099, 3120), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-06-13 10:34:43
# @Author : <NAME> & <NAME> (<EMAIL>)
# @Link : http://iridescent.ink
# @Version : $1.0$
import numpy as np
from pysparse.utils.const import *
import matplotlib.pyplot as plt
def showdict(D, rcsize=None, stride=None, plot=True, bgcolorv=0, cmap=None, title=None, xlabel=None, ylabel=None):
r"""
Trys to show image blocks in one image.
Parameters
----------
D : array_like
Blocks to be shown, a bH-bW-bC-bN numpy ndarray.
rcsize : int tuple or None, optional
Specifies how many rows and cols of blocks that you want to show,
e.g. (rows, cols). If not given, rcsize=(rows, clos) will be computed
automaticly.
stride : int tuple or None, optional
The step size (blank pixels nums) in row and col between two blocks.
If not given, stride=(1,1).
plot : bool, optional
True for ploting, False for silent and returns a H-W-C numpy ndarray
for showing.
bgcolorv : float or None, optional
The background color, 1 for white, 0 for black. Default, 0.
Returns
-------
out : ndarray or bool
A H-W-C numpy ndarray for showing.
See Also
--------
odctdict.
Examples
--------
>>> D = pys.odctdict((M, N))
>>> showdict(D, bgcolor='k')
"""
# M, N = D.shape
# print(M, N)
# H1 = int(np.sqrt(M))
# W1 = int(np.sqrt(M))
# D = np.reshape(D, (H1, W1, 1, N))
# H = int(np.sqrt(N))
# W = int(np.sqrt(N))
# A = np.zeros((int(H * H1), int(W * W1)))
# rows, cols = np.mgrid[0:H1, 0:W1]
# print(rows)
# for j in range(W):
# for i in range(H):
# A[int(i * H1) + rows, int(j * W1) +
# cols] = D[:, :, 0, int(i + j * H)]
# plt.figure()
# plt.imshow(A)
# plt.show()
if plot is None:
plot = True
M, N = D.shape
H1 = int(np.sqrt(M))
W1 = int(np.sqrt(M))
D = np.reshape(D, (H1, W1, 1, N))
if D.size == 0:
return D
if not (isinstance(D, np.ndarray) and D.ndim == 4):
raise TypeError('"D" should be a pH-pW-pC-pN numpy array!')
_, _, _, bN = D.shape
if rcsize is None:
rows = int(math.sqrt(bN))
cols = int(bN / rows)
if bN % cols > 0:
rows = rows + 1
else:
rows = rcsize[0]
cols = rcsize[1]
# step size
if stride is None:
stride = (1, 1)
# background color
# if bgcolor == 'w':
# bgcolor_value = 255
# elif bgcolor == 'k':
# bgcolor_value = 0
bgcolor_value = bgcolorv
if bN < rows * cols:
A = np.pad(D,
((0, stride[0]), (0, stride[1]),
(0, 0), (0, rows * cols - bN)),
'constant', constant_values=bgcolor_value)
else:
A = np.pad(D,
((0, stride[0]), (0, stride[1]), (0, 0), (0, 0)),
'constant', constant_values=bgcolor_value)
A = A[:, :, :, 0:rows * cols]
aH, aW, aC, aN = A.shape
A = np.transpose(A, (3, 1, 0, 2)).reshape(
rows, cols, aH, aW, aC).swapaxes(
1, 2).reshape(rows * aH, cols * aW, aC)
# A = np.transpose(A, (3, 0, 1, 2)).reshape(
# rows, cols, aH, aW, aC).swapaxes(
# 1, 2).reshape(rows * aH, cols * aW, aC)
aH, aW, aC = A.shape
A = A[0:aH - stride[0], 0:aW - stride[1], :]
if aC == 1:
A = A[:, :, 0]
if title is None:
title = 'Show ' + str(bN) + ' atoms in ' + str(rows) +\
' rows ' + str(cols) + ' cols, with stride' + str(stride)
if xlabel is None:
xlabel = ''
if ylabel is None:
ylabel = ''
if plot:
plt.figure()
plt.imshow(A, cmap=cmap)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.colorbar()
plt.show()
return A # H-W-C
| [
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.pad",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((1998, 2027), 'numpy.reshape', 'np.reshape', (['D', '(H1, W1, 1, N)'], {}), '(D, (H1, W1, 1, N))\n', (2008, 2027), True, 'import numpy as np\n'), ((1953, 1963), 'numpy.sqrt', 'np.sqrt', (['M'], {}), '(M)\n', (1960, 1963), True, 'import numpy as np\n'), ((1978, 1988), 'numpy.sqrt', 'np.sqrt', (['M'], {}), '(M)\n', (1985, 1988), True, 'import numpy as np\n'), ((2681, 2802), 'numpy.pad', 'np.pad', (['D', '((0, stride[0]), (0, stride[1]), (0, 0), (0, rows * cols - bN))', '"""constant"""'], {'constant_values': 'bgcolor_value'}), "(D, ((0, stride[0]), (0, stride[1]), (0, 0), (0, rows * cols - bN)),\n 'constant', constant_values=bgcolor_value)\n", (2687, 2802), True, 'import numpy as np\n'), ((2882, 2988), 'numpy.pad', 'np.pad', (['D', '((0, stride[0]), (0, stride[1]), (0, 0), (0, 0))', '"""constant"""'], {'constant_values': 'bgcolor_value'}), "(D, ((0, stride[0]), (0, stride[1]), (0, 0), (0, 0)), 'constant',\n constant_values=bgcolor_value)\n", (2888, 2988), True, 'import numpy as np\n'), ((3750, 3762), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3760, 3762), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3795), 'matplotlib.pyplot.imshow', 'plt.imshow', (['A'], {'cmap': 'cmap'}), '(A, cmap=cmap)\n', (3781, 3795), True, 'import matplotlib.pyplot as plt\n'), ((3804, 3820), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3813, 3820), True, 'import matplotlib.pyplot as plt\n'), ((3829, 3847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3839, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (3866, 3874), True, 'import matplotlib.pyplot as plt\n'), ((3883, 3897), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3895, 3897), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3914, 3916), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3129), 'numpy.transpose', 'np.transpose', (['A', '(3, 1, 0, 2)'], {}), '(A, (3, 1, 0, 2))\n', (3112, 3129), True, 'import numpy as np\n')] |
"""Classification using random forest."""
import logging
import pickle
import numpy as np
from sklearn.ensemble import RandomForestClassifier
logger = logging.getLogger(__name__)
class RandomForest:
"""Train or classify using a RandomForest model."""
def __init__(self, num_features, model=None):
"""Create instance of RandomForest.
Args:
num_features (int): Number of features to train or classify.
num_trees (int, optional): [description]. Defaults to 200.
model ([type], optional): [description]. Defaults to None.
"""
self.num_features = num_features
self.model = self.load_model(model)
def load_model(self, model):
"""Load trained sklearn.ensemble.RandomForestClassifier model.
Args:
model_path (str): path to the trained model
Returns:
sklearn.ensemble.RandomForestClassifier: Trained model, see reference for details.
"""
if model is None:
return None
# Check if the model_input is a path or an sklearn random forest model
if isinstance(model, str):
try:
model = pickle.load(open(model, "rb"))
return self.validate_model(model)
except OSError:
logger.error("Could not load RandomForestModel")
return None
elif isinstance(model, RandomForestClassifier):
# Validate model based on parameters
return self.validate_model(model)
return None
def validate_model(self, model):
"""Validate a model with the current class instantiation.
Args:
model (sklearn.ensemble.RandomForestClassifier): A trained RandomForestClassifier
Returns:
[sklearn.ensemble.RandomForestClassifier]: A valid trained RandomForestClassifier
"""
if not isinstance(model, RandomForestClassifier):
logger.error(
"Can not validate model, is not of instance sklearn.ensemble.forest.RandomForestClassifier"
)
return None
if not model.n_features_ == self.num_features:
logger.error(
"Number of features is different from model parameter. Model has: %d, input was: %d",
model.n_features_,
self.num_features,
)
return None
return model
def train(self, X, y, num_trees=100, processors=-1):
"""Train/Fit a RandomForestClassifier using the observation matrix X and class vector y.
Args:
X (np.array): 2D Matrix of feature observations.
y (np.array): 1D vector of class labels.
num_tress (int): Number of tress used in the forest.
processors (int): Number of parallel jobs used to train, -1 means all processors.
Returns:
sklearn.ensemble.RandomForestClassifier: A trained RandomForestClassifier model.
"""
# If a model is already defined, something is wrong. Does not support training multiple times in a row.
if self.model is not None:
logger.error(
"Surfclass does not support training an already existing model.."
)
return None
# validate X fits the parameters given in init
assert isinstance(X, np.ndarray), "X is not a valid numpy.ndarray"
assert (
X.ndim == 2
), "X does not have the correct shape, should be of form (n,f): observations 1D, and feature"
assert y.ndim == 1, "y does not have the correct shape, should be 1D vector"
assert (
X.shape[1] == self.num_features
), "Model and input does have the same number of features"
assert (
X.shape[0] == y.shape[0]
), "Number of class observations does not match number of feature observations."
rf = RandomForestClassifier(
n_estimators=num_trees, oob_score=False, verbose=0, n_jobs=processors
)
# fit the model
rf_trained = rf.fit(X, y)
# save the model to the instanced class (useful when one want to run classify immediately after)
self.model = rf_trained
# return the trained model
return rf_trained
def classify(self, X, prob=False, processors=None):
"""Classify X using the instantiated RandomForestClassifier model.
Args:
X (np.array): 2D Matrix of feature observations.
prob (bool): If true returns tuple with classified vector and highest class probability vector
processors (int): Number of parallel jobs used to train. -1 means all processors, None means model default.
Returns:
np.array or tuple (np.array,np.array): classified vector or tuple of classified vector and probability vector
"""
assert (
self.model is not None
), "Could not find a model, please either train a model or initialise the class with a valid model path"
# TODO: This might be double-work but the model attribute can have been changed
model = self.validate_model(self.model)
if isinstance(processors, int):
model.n_jobs = processors
# Test the X input is acceptable for the given model.
assert (
X.ndim == 2
), "X does not have the correct shape, should be of form (n,f): observations 1D, and feature"
assert isinstance(X, np.ndarray), "X is not a valid numpy array"
assert (
X.shape[1] == self.num_features
), "Model and input does have the same number of features"
# run the classificaiton using X
classes = self.model.classes_
class_prediction_prob = model.predict_proba(X)
class_prediction = classes[np.argmax(class_prediction_prob, axis=1)]
# return tuple with class prediction and highest class probability if prob
if prob:
return (class_prediction, np.amax(class_prediction_prob, axis=1))
return class_prediction
| [
"logging.getLogger",
"numpy.amax",
"numpy.argmax",
"sklearn.ensemble.RandomForestClassifier"
] | [((152, 179), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (169, 179), False, 'import logging\n'), ((3949, 4046), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'num_trees', 'oob_score': '(False)', 'verbose': '(0)', 'n_jobs': 'processors'}), '(n_estimators=num_trees, oob_score=False, verbose=0,\n n_jobs=processors)\n', (3971, 4046), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5871, 5911), 'numpy.argmax', 'np.argmax', (['class_prediction_prob'], {'axis': '(1)'}), '(class_prediction_prob, axis=1)\n', (5880, 5911), True, 'import numpy as np\n'), ((6052, 6090), 'numpy.amax', 'np.amax', (['class_prediction_prob'], {'axis': '(1)'}), '(class_prediction_prob, axis=1)\n', (6059, 6090), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from collections.abc import Iterable
pd.set_option("display.max_rows", 10)
class DataFrame(object):
'''
Custom DataFrame Class; Pandas DataFrames with methods removed.
:Example:
>>> df = DataFrame.from_records([[1,2,3],[4,5,6]], columns=['a', 'b', 'c'])
>>> df.shape
(2, 3)
>>> df.assign(d=[1,2]).shape
(2, 4)
>>> df.loc[1, 'b']
5
'''
def __init__(self, **kwargs):
'''
Create an empty DataFrame.
'''
# hidden pandas dataframe object
self._pd = pd.DataFrame(**kwargs)
# lift loc/iloc back to custom DataFrame objects
self.loc = DataFrameIndexer(self._pd.loc)
self.iloc = DataFrameIndexer(self._pd.iloc)
# Properties
self.shape = _lift_to_pd(self._pd.shape)
self.columns = _lift_to_pd(self._pd.columns)
self.index = _lift_to_pd(self._pd.index)
self.values = _lift_to_pd(self._pd.values)
self.T = _lift_to_pd(self._pd.T)
# Formatting
def __repr__(self):
return self._pd.__repr__()
def __str__(self):
return self._pd.__str__()
# return the underlying DataFrame
def to_df(self):
'''Return the full pandas DataFrame.'''
return self._pd
# Creation
@classmethod
def from_dict(cls, data):
'''
Create a DataFrame from a dictionary.
'''
return cls(data=data)
@classmethod
def from_records(cls, data, columns):
'''
Create a DataFrame from a sequence of records.
'''
return cls(data=data, columns=columns)
# Dunder Attributes
def _repr_html_(self):
f = _lift_to_pd(self._pd._repr_html_)
return f()
# Selection
def take(self, indices):
'''
Return the elements in the given positional indices along an axis.
:param indices: An array of ints indicating which positions to take.
:type indices: list of ints
:return: DataFrame with the given positional indices.
:rtype: DataFrame
:raises IndexError: if any `indices` are out of bounds with respect to DataFrame length.
:example:
>>> df = bpd.DataFrame().assign(name=['falcon', 'parrot', 'lion'],
... kind=['bird', 'bird', 'mammal'])
>>> df
name kind
0 falcon bird
1 parrot bird
2 lion mammal
>>> df.take([0, 2])
name kind
0 falcon bird
2 lion mammal
'''
if not isinstance(indices, Iterable):
raise TypeError('Argument `indices` must be a list-like object')
if not all(isinstance(x, (int, np.integer)) for x in indices):
raise ValueError('Argument `indices` must only contain integers')
if not all(x < self._pd.shape[0] for x in indices):
raise IndexError('Indices are out-of-bounds')
f = _lift_to_pd(self._pd.take)
return f(indices=indices)
def drop(self, columns=None):
'''
Drop specified labels from rows or columns.
:param columns: Column labels to drop.
:type columns: str label or list of str labels
:return: DataFrame with the dropped columns.
:rtype: DataFrame
:raises KeyError: if `columns` not found in columns
:example:
>>> df = bpd.DataFrame().assign(A=[0, 4, 8],
... B=[1, 5, 9],
... C=[2, 6, 10],
... D=[3, 7, 11])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
'''
if not isinstance(columns, Iterable):
raise TypeError('Argument `columns` must be a string label or list of string labels')
mask = [columns not in self.columns] if isinstance(columns, str) else [x not in self.columns for x in columns]
if any(mask):
c = [columns] if isinstance(columns, str) else columns
raise KeyError('{} not found in columns'.format(np.array(c)[mask]))
f = _lift_to_pd(self._pd.drop)
return f(columns=columns)
def sample(self, n=None, replace=False, random_state=None):
'''
Return a random sample of items from an axis of object.
:param n: Number of items from axis to return.
:param replace: Sample with or without replacement.
:param random_state: Seed for the random number generator
:type n: int, optional
:type replace: bool, default False
:type random_state: int, optional
:return: DataFrame with `n` randomly sampled rows.
:rtype: DataFrame
:raises ValueError: if a sample larger than the length of the DataFrame is taken without replacement.
:example:
>>> df = bpd.DataFrame().assign(letter=['a', 'b', 'c'],
... count=[9, 3, 3],
... points=[1, 2, 2])
>>> df.sample(1, random_state=0)
letter count points
2 c 3 2
'''
if not isinstance(n, int) and n != None:
raise TypeError('Argument `n` not an integer')
if not isinstance(replace, bool):
raise TypeError('Argument `replace` not a boolean')
if not isinstance(random_state, int) and random_state != None:
raise TypeError('Argument `random_state` must be an integer or None')
if n != None and n > self._pd.shape[0] and replace == False:
raise ValueError('Cannot take a larger sample than length of DataFrame when `replace=False`')
f = _lift_to_pd(self._pd.sample)
return f(n=n, replace=replace, random_state=random_state)
def get(self, key):
'''
Get item from object for given key (ex: DataFrame column).
:param key: Column label or list of column labels
:type key: str label or list of str labels
:return: Series with the corresponding label or DataFrame with the corresponding labels
:rtype: Series or DataFrame
:raises KeyError: if `key` not found in columns
:example:
>>> df = bpd.DataFrame().assign(letter=['a', 'b', 'c'],
... count=[9, 3, 3],
... points=[1, 2, 2])
>>> df.get('letter')
0 a
1 b
2 c
Name: letter, dtype: object
>>> df.get(['count', 'points'])
count points
0 9 1
1 3 2
2 3 2
'''
if not isinstance(key, str) and not isinstance(key, Iterable):
raise TypeError('Argument `key` must be a string label or list of string labels')
mask = [key not in self.columns] if isinstance(key, str) else [x not in self.columns for x in key]
if any(mask):
k = [key] if isinstance(key, str) else key
raise KeyError('{} not found in columns'.format(np.array(k)[mask]))
f = _lift_to_pd(self._pd.get)
return f(key=key)
# Creation
def assign(self, **kwargs):
'''
Assign new columns to a DataFrame.
:param kwargs: Keyword column names with a list of values.
:return: DataFrame with the additional column(s).
:rtype: DataFrame
:raises ValueError: if columns have different lengths or if new columns have different lengths than the existing DataFrame
:example:
>>> df = bpd.DataFrame().assign(flower=['sunflower', 'rose'])
>>> df.assign(color=['yellow', 'red'])
flower color
0 sunflower yellow
1 rose red
'''
if len(set(map(len, kwargs.values()))) not in (0, 1):
raise ValueError('Not all columns have the same length')
if self._pd.shape[1] != 0:
if len(list(kwargs.values())[0]) != self._pd.shape[0]:
raise ValueError('New column does not have the same length as existing DataFrame')
f = _lift_to_pd(self._pd.assign)
return f(**kwargs)
# Transformation
def apply(self, func, axis=0):
'''
Apply a function along an axis of the DataFrame.
:param func: Function to apply to each column or row.
:param axis: Axis along which the function is applied:
- 0 or 'index': apply function to each column.
- 1 or 'columns': apply function to each row.
:type func: function
:type axis: {0 or ‘index’, 1 or ‘columns’}, default 0
:return: Result of applying func along the given axis of the DataFrame.
:rtype: Series or DataFrame
:example:
>>> def add_two(row):
... return row + 2
>>> df = bpd.DataFrame(A=[1, 1],
... B=[2, 2])
>>> df.apply(add_two)
A B
0 3 4
1 3 4
'''
if not callable(func):
raise TypeError('Argument `func` must be a function')
if axis not in [0, 1]:
raise ValueError('Argument `axis` must be either 0 or 1')
f = _lift_to_pd(self._pd.apply)
return f(func=func, axis=axis)
def sort_values(self, by, ascending=True):
'''
Sort by the values along either axis.
:param by: String label or list of string labels to sort by.
:param ascending: Sort ascending vs. descending.
:type by: str or list of str
:type param: bool, default True
:return: DataFrame with sorted values.
:rtype: DataFrame
:raises KeyError: if `by` not found in columns
:example:
>>> df = bpd.DataFrame().assign(name=['Sally', 'George', 'Bill', 'Ann'],
... age=[21, 25, 18, 28],
... height_cm=[161, 168, 171, 149])
>>> df.sort_values(by='age')
name age height_cm
2 Bill 18 171
0 Sally 21 161
1 George 25 168
3 Ann 28 149
>>> df.sort_values(by='height_cm', ascending=False)
name age height_cm
2 Bill 18 171
1 George 25 168
0 Sally 21 161
3 Ann 28 149
'''
if not isinstance(by, Iterable):
raise TypeError('Argument `by` must be a string label or list of string labels')
mask = [by not in self.columns] if isinstance(by, str) else [x not in self.columns for x in by]
if any(mask):
b = [by] if isinstance(by, str) else by
raise KeyError('{} not found in columns'.format(np.array(b)[mask]))
if not isinstance(ascending, bool):
raise TypeError('Argument `ascending` must be a boolean')
f = _lift_to_pd(self._pd.sort_values)
return f(by=by, ascending=ascending)
def describe(self):
'''
Generate descriptive statistics that summarize the central
tendency, dispersion and shape of a dataset’s distribution.
:return: Summary statistics of the DataFrame provided.
:rtype: DataFrame
:example:
>>> df = bpd.DataFrame().assign(A=[0, 10, 20],
... B=[1, 2, 3])
>>> df.describe()
A B
count 3.0 3.0
mean 10.0 2.0
std 10.0 1.0
min 0.0 1.0
25% 5.0 1.5
50% 10.0 2.0
75% 15.0 2.5
max 20.0 3.0
'''
f = _lift_to_pd(self._pd.describe)
return f()
def groupby(self, by=None):
'''
Group DataFrame or Series using a mapper or by a Series of columns.
:param by: Used to determine the groups for the groupby.
:type by: label, or list of labels
:return: Groupby object that contains information about the groups.
:rtype: DataFrameGroupBy
:raises KeyError: if `by` not found in columns
:example:
>>> df =bpd.DataFrame(animal=['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... max_speed=[380, 370, 24, 26])
>>> df.groupby('animal').mean()
max_speed
animal
Falcon 375.0
Parrot 25.0
'''
if not isinstance(by, Iterable):
raise TypeError('Argument `by` must be a string label or list of string labels')
mask = [by not in self.columns] if isinstance(by, str) else [x not in self.columns for x in by]
if any(mask):
b = [by] if isinstance(by, str) else by
raise KeyError('{} not found in columns'.format(np.array(b)[mask]))
f = _lift_to_pd(self._pd.groupby)
return f(by=by)
def reset_index(self, drop=False):
'''
Reset the index of the DataFrame, and use the default one
instead. If the DataFrame has a MultiIndex, this method can
remove one or more levels.
:param drop: Does not insert index as a column.
:type drop: bool, default False
:return: DataFrame with the new index.
:rtype: DataFrame
:example:
>>> df = bpd.DataFrame().assign(name=['Sally', 'George', 'Bill', 'Ann'],
... age=[21, 25, 18, 28],
... height_cm=[161, 168, 171, 149])
>>> sorted = df.sort_values(by='age')
>>> sorted
name age height_cm
2 Bill 18 171
0 Sally 21 161
1 George 25 168
3 Ann 28 149
>>> sorted.reset_index(drop=True)
name age height_cm
0 Bill 18 171
1 Sally 21 161
2 George 25 168
3 Ann 28 149
'''
if not isinstance(drop, bool):
raise TypeError('Argument `drop` must be a boolean')
f = _lift_to_pd(self._pd.reset_index)
return f(drop=drop)
def set_index(self, keys, drop=True):
'''
Set the DataFrame index using existing columns.
:param keys: Key(s) to set index on.
:param drop: Delete column(s) to be used as the new index.
:type keys: str label or list of str labels
:type drop: bool, default True
:return: DataFrame with changed row labels.
:rtype: DataFrame
:raises KeyError: if `keys` not found in columns
:example:
>>> df = bpd.DataFrame().assign(name=['Sally', 'George', 'Bill', 'Ann'],
... age=[21, 25, 18, 28],
... height_cm=[161, 168, 171, 149])
>>> df.set_index('name')
age height_cm
name
Sally 21 161
George 25 168
Bill 18 171
Ann 28 149
'''
if not isinstance(keys, Iterable):
raise TypeError('Argument `keys` must be a string label or list of string labels')
mask = [keys not in self.columns] if isinstance(keys, str) else [x not in self.columns for x in keys]
if any(mask):
k = [keys] if isinstance(keys, str) else keys
raise KeyError('{} not found in columns'.format(np.array(k)[mask]))
if not isinstance(drop, bool):
raise TypeError('Argument `drop` must be a boolean')
f = _lift_to_pd(self._pd.set_index)
return f(keys=keys, drop=drop)
# Combining
def merge(self, right, how='inner', on=None, left_on=None, right_on=None):
'''
Merge DataFrame or named Series objects with a database-style join.
:param right: Object to merge with
:param how: Type of merge to be performed.
- left: use only keys from left frame, similar to a SQL left outer join; preserve key order.
- right: use only keys from right frame, similar to a SQL right outer join; preserve key order.
- outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically.
- inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys.
:param on: Column or index level names to join on. These must be found in both DataFrames.
:param left_on: Column or index level names to join on in the left DataFrame.
:param right_on: Column or index level names to join on in the right DataFrame.
:type right: DataFrame or named Series
:type how: {'left', 'right', 'outer', 'inner'}, default 'inner'
:type on: label or list of labels
:type left_on: label or list of labels
:type right_on: label or list of labels
:return: A DataFrame of the two merged objects.
:rtype: DataFrame
:raises KeyError: if any input labels are not found in the corresponding DataFrame's columns
:example:
>>> df1 = bpd.DataFrame().assign(pet=['dog', 'cat', 'lizard', 'turtle'],
... kind=['mammal', 'mammal', 'reptile', 'reptile'])
>>> df2 = bpd.DataFrame().assign(kind=['mammal', 'reptile', 'amphibian'],
... abr=['m', 'r', 'a'])
>>> df1.merge(df2, on='kind')
pet kind abr
0 dog mammal m
1 cat mammal m
2 lizard reptile r
3 turtle reptile r
'''
if not isinstance(right, DataFrame):
raise TypeError('Argument `right` must by a DataFrame')
if how not in ['left', 'right', 'outer', 'inner']:
raise ValueError('Argument `how` must be either \'left\', \'right\', \'outer\', or \'inner\'')
if (on not in self._pd.columns or on not in right.columns) and on != None:
raise KeyError('Label \'{}\' not found in both DataFrames'.format(on))
if (left_on == None and right_on != None) or (left_on != None and right_on == None):
raise KeyError('Both `left_on` and `right_on` must be column labels')
if left_on != None and right_on != None:
if left_on not in self._pd.columns:
raise KeyError('Label \'{}\' not found in left DataFrame'.format(left_on))
if right_on not in right.columns:
raise KeyError('Label \'{}\' not found in right DataFrame'.format(right_on))
f = _lift_to_pd(self._pd.merge)
return f(right=right, how=how, on=on, left_on=left_on, right_on=right_on)
def append(self, other, ignore_index=False):
'''
Append rows of other to the end of caller, returning a new object.
:param other: The data to append.
:type other: DataFrame or Series/dict-like object, or list of these
:return: DataFrame with appended rows.
:rtype: DataFrame
:example:
'''
if not isinstance(other, DataFrame):
raise TypeError('Argument `other` must by a DataFrame')
if not isinstance(ignore_index, bool):
raise TypeError('Argument `ignore_index` must be a boolean')
f = _lift_to_pd(self._pd.append)
return f(other=other, ignore_index=ignore_index)
# Plotting
def plot(self, *args, **kwargs):
'''
Plot the data in the DataFrame.
'''
f = _lift_to_pd(self._pd.plot)
return f(*args, **kwargs)
# IO
def to_csv(self, path_or_buf=None, index=True):
'''
Write object to a comma-separated values (csv) file.
:param path_or_buf: File path or object, if None is provided the result is returned as a string.
:param index: Write row names (index).
:type path_or_buf: str or file handle, default None
:type index: bool, default True
:return: If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None.
:rtype: None or str
'''
if not isinstance(index, bool):
raise TypeError('Argument `index` must be a boolean')
f = _lift_to_pd(self._pd.to_csv)
return f(path_or_buf=path_or_buf, index=index)
def to_numpy(self):
'''
Convert the DataFrame to a NumPy array.
:return: DataFrame as a NumPy array.
:rtype: NumPy array
'''
f = _lift_to_pd(self._pd.to_numpy)
return f()
class Series(object):
'''
Custom Series class; Pandas Series with methods removed.
'''
def __init__(self, **kwargs):
'''
Create an empty Series.
'''
# hidden pandas dataeriesframe object
self._pd = pd.Series(**kwargs)
# lift loc/iloc back to custom Series objects
self.loc = DataFrameIndexer(self._pd.loc)
self.iloc = DataFrameIndexer(self._pd.iloc)
self.shape = _lift_to_pd(self._pd.shape)
self.index = _lift_to_pd(self._pd.index)
self.values = _lift_to_pd(self._pd.values)
# Formatting
def __repr__(self):
return self._pd.__repr__()
def __str__(self):
return self._pd.__str__()
# Selection
def take(self, indices):
'''
Return the elements in the given positional indices along an axis.
:param indices: An array of ints indicating which positions to take.
:type indices: list of ints
:return: Series with the given positional indices.
:raises IndexError: if any `indices` are out of bounds with respect to DataFrame length.
:example:
>>> s = bpd.Series(data=[1, 2, 3], index=['A', 'B', 'C'])
>>> s.take([0, 3])
A 1
C 3
dtype: int64
>>> s.take(np.arange(2))
A 1
B 2
dtype: int64
'''
if not isinstance(indices, Iterable):
raise TypeError('Argument `indices` must be a list-like object')
if not all(isinstance(x, (int, np.integer)) for x in indices):
raise ValueError('Argument `indices` must only contain integers')
if not all(x < self._pd.shape[0] for x in indices):
raise IndexError('Indices are out-of-bounds')
f = _lift_to_pd(self._pd.take)
return f(indices)
def sample(self, n=None, replace=False, random_state=None):
'''
Return a random sample of items from an axis of object.
:param n: Number of items from axis to return.
:param replace: Sample with or without replacement.
:param random_state: Seed for the random number generator
:type n: int, optional
:type replace: bool, default False
:type random_state: int, optional
:return: Series with `n` randomly sampled items.
:rtype: Series
:raises ValueError: if a sample larger than the length of the DataFrame is taken without replacement.
:example:
>>> s = bpd.Series(data=[1, 2, 3, 4, 5])
>>> s.sample(3, random_state=0)
2 3
0 1
1 2
dtype: int64
>>> s.sample(7, replace=True, random_state=10)
1 2
4 5
0 1
1 2
3 4
4 5
1 2
dtype: int64
'''
if not isinstance(n, int) and n != None:
raise TypeError('Argument `n` not an integer')
if not isinstance(replace, bool):
raise TypeError('Argument `replace` not a boolean')
if not isinstance(random_state, int) and random_state != None:
raise TypeError('Argument `random_state` must be an integer or None')
if n != None and n > self._pd.shape[0] and replace == False:
raise ValueError('Cannot take a larger sample than length of DataFrame when `replace=False`')
f = _lift_to_pd(self._pd.sample)
return f(n=n, replace=replace, random_state=random_state)
# Transformation
def apply(self, func):
'''
Invoke function on values of Series.
:param func: Function to apply.
:type func: function
:return: Result of applying func to the Series.
:rtype: Series
:example:
>>> def cut_off_5(val):
... if val > 5:
... return 5
... else:
... return val
>>> s = bpd.Series(data=[1, 3, 5, 7, 9]
>>> s.apply(cut_off_5)
0 1
1 3
2 5
3 5
4 5
dtype: int64
'''
if not callable(func):
raise TypeError('Argument `func` must be a function')
f = _lift_to_pd(self._pd.apply)
return f(func=func)
def sort_values(self, ascending=True):
'''
Sort by the values
:param ascending: Sort ascending vs. descending.
:type ascending: bool, default True
:return: Series with sorted values.
:rtype: Series
:example:
>>> s = bpd.Series(data=[6, 4, 3, 9, 5])
>>> s.sort_values()
2 3
1 4
4 5
0 6
3 9
dtype: int64
>>> s.sort_values(ascending=False)
3 9
0 6
4 5
1 4
2 3
dtype: int64
'''
if not isinstance(ascending, bool):
raise TypeError('Argument `ascending` must be a boolean')
f = _lift_to_pd(self._pd.sort_values)
return f(ascending=ascending)
def describe(self):
'''
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset’s distribution.
:return: Summary statistics of the Series provided.
:rtype: Series
:example:
>>> s = bpd.Series(data=[6, 7, 7, 5, 9, 5, 1])
>>> s.describe()
count 7.000000
mean 5.714286
std 2.497618
min 1.000000
25% 5.000000
50% 6.000000
75% 7.000000
max 9.000000
dtype: float64
'''
f = _lift_to_pd(self._pd.describe)
return f()
def reset_index(self, drop=False):
'''
Generate a new DataFrame or Series with the index reset.
:param drop: Does not insert index as a column.
:type drop: bool, default False
:return: When drop is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When drop is True, a Series is returned.
:rtype: Series or DataFrame
:example:
>>> s = bpd.Series([6, 4, 3, 9, 5])
>>> sorted = s.sort_values()
>>> sorted.reset_index()
index 0
0 2 3
1 1 4
2 4 5
3 0 6
4 3 9
>>> sorted.reset_index(drop=True)
0 3
1 4
2 5
3 6
4 9
dtype: int64
'''
if not isinstance(drop, bool):
raise TypeError('Argument `drop` must be a boolean')
f = _lift_to_pd(self._pd.reset_index)
return f(drop=drop)
# Plotting
def plot(self, *args, **kwargs):
'''
Plot the data in the DataFrame.
'''
f = _lift_to_pd(self._pd.plot)
return f(*args, **kwargs)
# IO
def to_csv(self, path_or_buf=None, index=True):
'''
Write object to a comma-separated values (csv) file.
:param path_or_buf: File path or object, if None is provided the result is returned as a string.
:param index: Write row names (index).
:type path_or_buf: str or file handle, default None
:type index: bool, default True
:return: If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None.
:rtype: None or str
'''
if not isinstance(index, bool):
raise TypeError('Argument `index` must be a boolean')
f = _lift_to_pd(self._pd.to_csv)
return f(path_or_buf=path_or_buf, index=index)
def to_numpy(self):
'''
A NumPy ndarray representing the values in this Series or Index.
:return: Series as a NumPy array.
:rtype: NumPy array
'''
f = _lift_to_pd(self._pd.to_numpy)
return f()
# Calculations
def count(self):
'''
Return number of observations in the Series
'''
f = _lift_to_pd(self._pd.count)
return f()
def mean(self):
'''
Return the mean of the values for the requested axis.
'''
f = _lift_to_pd(self._pd.mean)
return f()
def median(self):
'''
Return the median of the values for the requested axis.
'''
f = _lift_to_pd(self._pd.median)
return f()
def min(self):
'''
Return the minimum of the values for the requested axis.
'''
f = _lift_to_pd(self._pd.min)
return f()
def max(self):
'''
Return the maximum of the values for the requested axis.
'''
f = _lift_to_pd(self._pd.max)
return f()
def sum(self):
'''
Return the sum of the values for the requested axis.
'''
f = _lift_to_pd(self._pd.sum)
return f()
def abs(self):
'''
Return a Series with absolute numeric value of each element.
'''
f = _lift_to_pd(self._pd.abs)
return f()
# Arithmetic
def __add__(self, other):
f = _lift_to_pd(self._pd.__add__)
return f(other)
def __mul__(self, other):
f = _lift_to_pd(self._pd.__mul__)
return f(other)
def __rmul__(self, other):
f = _lift_to_pd(self._pd.__rmul__)
return f(other)
def __pow__(self, other):
f = _lift_to_pd(self._pd.__pow__)
return f(other)
def __sub__(self, other):
f = _lift_to_pd(self._pd.__sub__)
return f(other)
def __truediv__(self, other):
f = _lift_to_pd(self._pd.__truediv__)
return f(other)
def __mod__(self, other):
f = _lift_to_pd(self._pd.__mod__)
return f(other)
# comparison
def __eq__(self, other):
f = _lift_to_pd(self._pd.__eq__)
return f(other)
def __ne__(self, other):
f = _lift_to_pd(self._pd.__ne__)
return f(other)
def __gt__(self, other):
f = _lift_to_pd(self._pd.__gt__)
return f(other)
def __lt__(self, other):
f = _lift_to_pd(self._pd.__lt__)
return f(other)
def __ge__(self, other):
f = _lift_to_pd(self._pd.__ge__)
return f(other)
def __le__(self, other):
f = _lift_to_pd(self._pd.__le__)
return f(other)
# othe dunder methods
def __len__(self):
return self._pd.__len__()
# array interface (for applying numpy functions)
def __array__(self, *vargs, **kwargs):
return self._pd.__array__(*vargs, **kwargs)
# return the underlying Series
def to_ser(self):
'''return the full pandas series'''
return self._pd
class DataFrameGroupBy(object):
'''
'''
def __init__(self, groupby):
# hidden pandas dataframe object
self._pd = groupby
# return the underlying groupby object
def to_gb(self):
'''return the full pandas dataframe'''
return self._pd
def aggregate(self, func):
if not callable(func):
raise Exception('Provide a function to aggregate')
return self._pd.aggregate(func)
# Calculations
def count(self):
'''
Compute count of group.
'''
f = _lift_to_pd(self._pd.count)
return f()
def mean(self):
'''
Compute mean of group.
'''
f = _lift_to_pd(self._pd.mean)
return f()
def median(self):
'''
Compute median of group.
'''
f = _lift_to_pd(self._pd.median)
return f()
def min(self):
'''
Compute min of group.
'''
f = _lift_to_pd(self._pd.min)
return f()
def max(self):
'''
Compute max of group.
'''
f = _lift_to_pd(self._pd.max)
return f()
def sum(self):
'''
Compute sum of group.
'''
f = _lift_to_pd(self._pd.sum)
return f()
def size(self):
'''
Compute group sizes.
'''
f = _lift_to_pd(self._pd.size)
return f()
class DataFrameIndexer(object):
'''
Class for lifting results of loc/iloc back to the
custom DataFrame class.
'''
def __init__(self, indexer):
self.idx = indexer
def __getitem__(self, item):
# convert to pandas if item is baby-pandas object
try:
item = item._pd
except AttributeError:
pass
# TODO: restrict what item can be? (e.g. boolean array)
data = self.idx[item]
if isinstance(data, pd.DataFrame):
return DataFrame(data=data)
elif isinstance(data, pd.Series):
return Series(data=data)
else:
return data
def _lift_to_pd(func):
'''checks output-type of function and if output is a
Pandas object, lifts the output to a babypandas class'''
if not callable(func):
return func
types = (DataFrame, DataFrameGroupBy, Series)
def closure(*vargs, **kwargs):
vargs = [x._pd if isinstance(x, types) else x for x in vargs]
kwargs = {k: x._pd if isinstance(x, types) else x
for (k, x) in kwargs.items()}
a = func(*vargs, **kwargs)
if isinstance(a, pd.DataFrame):
return DataFrame(data=a)
elif isinstance(a, pd.Series):
return Series(data=a)
elif isinstance(a, pd.core.groupby.generic.DataFrameGroupBy):
return DataFrameGroupBy(a)
else:
return a
closure.__doc__ = func.__doc__
return closure
def read_csv(filepath, **kwargs):
'''read_csv'''
df = pd.read_csv(filepath, **kwargs)
return DataFrame(data=df)
| [
"pandas.Series",
"pandas.read_csv",
"pandas.set_option",
"numpy.array",
"pandas.DataFrame"
] | [((77, 114), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(10)'], {}), "('display.max_rows', 10)\n", (90, 114), True, 'import pandas as pd\n'), ((34587, 34618), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (34598, 34618), True, 'import pandas as pd\n'), ((575, 597), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '(**kwargs)\n', (587, 597), True, 'import pandas as pd\n'), ((21044, 21063), 'pandas.Series', 'pd.Series', ([], {}), '(**kwargs)\n', (21053, 21063), True, 'import pandas as pd\n'), ((4294, 4305), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (4302, 4305), True, 'import numpy as np\n'), ((7261, 7272), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (7269, 7272), True, 'import numpy as np\n'), ((10989, 11000), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (10997, 11000), True, 'import numpy as np\n'), ((12999, 13010), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (13007, 13010), True, 'import numpy as np\n'), ((15637, 15648), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (15645, 15648), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import division
import numpy as np
import lfd.rapprentice.math_utils as mu
from lfd.environment.simulation import DynamicSimulationRobotWorld
from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject
from lfd.environment import sim_util
from lfd.demonstration.demonstration import AugmentedTrajectory
from lfd.environment import environment
from lfd.transfer import planning
def create_cylinder_grid(cyl_pos0, cyl_pos1, cyl_pos2, cyl_radius, cyl_height):
sample_grid = np.array(np.meshgrid(np.linspace(0,1,5), np.linspace(0,1,5))).T.reshape((-1,2))
cyl_positions = cyl_pos0 + sample_grid[:,0][:,None].dot(cyl_pos1[None,:] - cyl_pos0[None,:]) + sample_grid[:,1][:,None].dot(cyl_pos2[None,:] - cyl_pos0[None,:])
cyl_sim_objs = []
for (i,cyl_pos) in enumerate(cyl_positions):
cyl_sim_objs.append(CylinderSimulationObject("cyl%i"%i, cyl_pos, cyl_radius, cyl_height, dynamic=True))
return cyl_sim_objs
def color_cylinders(cyl_sim_objs):
for sim_obj in cyl_sim_objs:
color = np.random.random(3)
for bt_obj in sim_obj.get_bullet_objects():
for link in bt_obj.GetKinBody().GetLinks():
for geom in link.GetGeometries():
geom.SetDiffuseColor(color)
def create_rope(rope_poss, capsule_height=.02):
rope_pos_dists = np.linalg.norm(np.diff(rope_poss, axis=0), axis=1)
xp = np.r_[0, np.cumsum(rope_pos_dists/capsule_height)]
init_rope_nodes = mu.interp2d(np.arange(xp[-1]+1), xp, rope_poss)
rope_sim_obj = RopeSimulationObject("rope", init_rope_nodes)
return rope_sim_obj
def create_augmented_traj(robot, pick_pos, drop_pos, pick_R, drop_R, move_height, pos_displacement_per_step=.02):
pos_traj = np.array([pick_pos + np.r_[0,0,move_height],
pick_pos,
pick_pos + np.r_[0,0,move_height],
drop_pos + np.r_[0,0,move_height],
drop_pos,
drop_pos + np.r_[0,0,move_height]])
R_traj = np.array([pick_R, pick_R, pick_R, drop_R, drop_R, drop_R])
ee_traj = np.empty((len(pos_traj), 4, 4))
ee_traj[:] = np.eye(4)
ee_traj[:,:3,3] = pos_traj
ee_traj[:,:3,:3] = R_traj
open_finger_traj = np.array([False, False, False, False, True, False])
close_finger_traj = np.array([False, True, False, False, False, False])
open_finger_value = sim_util.get_binary_gripper_angle(True)
closed_finger_value = sim_util.get_binary_gripper_angle(False)
finger_traj = np.array([open_finger_value, open_finger_value, closed_finger_value, closed_finger_value, closed_finger_value, open_finger_value])[:,None]
lr = 'r' # use right arm/gripper
aug_traj = AugmentedTrajectory(lr2ee_traj={lr: ee_traj}, lr2finger_traj={lr: finger_traj}, lr2open_finger_traj={lr: open_finger_traj}, lr2close_finger_traj={lr: close_finger_traj})
# resample augmented trajectory according to the position displacement
pos_dists = np.linalg.norm(np.diff(pos_traj, axis=0), axis=1)
new_times = np.r_[0, np.cumsum(pos_dists/pos_displacement_per_step)]
timesteps_rs = np.interp(np.arange(new_times[-1]+1), new_times, np.arange(len(new_times)))
aug_traj_rs = aug_traj.get_resampled_traj(timesteps_rs)
# do motion planning for aug_traj_rs
manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
ee_link_name = "%s_gripper_tool_frame"%lr
ee_link = robot.GetLink(ee_link_name)
dof_vals = robot.GetManipulator(manip_name).GetArmDOFValues()
init_traj = np.tile(dof_vals, (aug_traj_rs.n_steps,1))
arm_traj, _, _ = planning.plan_follow_traj(robot, manip_name, ee_link, aug_traj_rs.lr2ee_traj['r'], init_traj, no_collision_cost_first=True)
aug_traj_rs.lr2arm_traj[lr] = arm_traj
return aug_traj_rs
def main():
# define simulation objects
table_height = 0.77
cyl_radius = 0.025
cyl_height = 0.3
cyl_pos0 = np.r_[.7, -.15, table_height+cyl_height/2]
cyl_pos1 = np.r_[.7, .15, table_height+cyl_height/2]
cyl_pos2 = np.r_[.4, -.15, table_height+cyl_height/2]
rope_poss = np.array([[.2, -.2, table_height+0.006],
[.8, -.2, table_height+0.006],
[.8, .2, table_height+0.006],
[.2, .2, table_height+0.006]])
sim_objs = []
sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False))
sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False))
cyl_sim_objs = create_cylinder_grid(cyl_pos0, cyl_pos1, cyl_pos2, cyl_radius, cyl_height)
sim_objs.extend(cyl_sim_objs)
rope_sim_obj = create_rope(rope_poss)
sim_objs.append(rope_sim_obj)
# initialize simulation world and environment
sim = DynamicSimulationRobotWorld()
sim.add_objects(sim_objs)
sim.create_viewer()
sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()])
sim_util.reset_arms_to_side(sim)
color_cylinders(cyl_sim_objs)
env = environment.LfdEnvironment(sim, sim)
# define augmented trajectory
pick_pos = rope_poss[0] + .1 * (rope_poss[1] - rope_poss[0])
drop_pos = rope_poss[3] + .1 * (rope_poss[2] - rope_poss[3]) + np.r_[0, .2, 0]
pick_R = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
drop_R = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])
move_height = .2
aug_traj = create_augmented_traj(sim.robot, pick_pos, drop_pos, pick_R, drop_R, move_height)
env.execute_augmented_trajectory(aug_traj)
if __name__ == '__main__':
main()
| [
"numpy.tile",
"numpy.eye",
"lfd.environment.environment.LfdEnvironment",
"numpy.random.random",
"lfd.environment.sim_util.reset_arms_to_side",
"lfd.environment.sim_util.get_binary_gripper_angle",
"lfd.environment.simulation_object.RopeSimulationObject",
"numpy.diff",
"lfd.environment.simulation_obje... | [((1625, 1670), 'lfd.environment.simulation_object.RopeSimulationObject', 'RopeSimulationObject', (['"""rope"""', 'init_rope_nodes'], {}), "('rope', init_rope_nodes)\n", (1645, 1670), False, 'from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject\n'), ((1825, 2012), 'numpy.array', 'np.array', (['[pick_pos + np.r_[0, 0, move_height], pick_pos, pick_pos + np.r_[0, 0,\n move_height], drop_pos + np.r_[0, 0, move_height], drop_pos, drop_pos +\n np.r_[0, 0, move_height]]'], {}), '([pick_pos + np.r_[0, 0, move_height], pick_pos, pick_pos + np.r_[0,\n 0, move_height], drop_pos + np.r_[0, 0, move_height], drop_pos, \n drop_pos + np.r_[0, 0, move_height]])\n', (1833, 2012), True, 'import numpy as np\n'), ((2139, 2197), 'numpy.array', 'np.array', (['[pick_R, pick_R, pick_R, drop_R, drop_R, drop_R]'], {}), '([pick_R, pick_R, pick_R, drop_R, drop_R, drop_R])\n', (2147, 2197), True, 'import numpy as np\n'), ((2261, 2270), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2267, 2270), True, 'import numpy as np\n'), ((2355, 2406), 'numpy.array', 'np.array', (['[False, False, False, False, True, False]'], {}), '([False, False, False, False, True, False])\n', (2363, 2406), True, 'import numpy as np\n'), ((2431, 2482), 'numpy.array', 'np.array', (['[False, True, False, False, False, False]'], {}), '([False, True, False, False, False, False])\n', (2439, 2482), True, 'import numpy as np\n'), ((2507, 2546), 'lfd.environment.sim_util.get_binary_gripper_angle', 'sim_util.get_binary_gripper_angle', (['(True)'], {}), '(True)\n', (2540, 2546), False, 'from lfd.environment import sim_util\n'), ((2573, 2613), 'lfd.environment.sim_util.get_binary_gripper_angle', 'sim_util.get_binary_gripper_angle', (['(False)'], {}), '(False)\n', (2606, 2613), False, 'from lfd.environment import sim_util\n'), ((2828, 3005), 'lfd.demonstration.demonstration.AugmentedTrajectory', 'AugmentedTrajectory', ([], {'lr2ee_traj': '{lr: ee_traj}', 'lr2finger_traj': '{lr: finger_traj}', 'lr2open_finger_traj': '{lr: open_finger_traj}', 'lr2close_finger_traj': '{lr: close_finger_traj}'}), '(lr2ee_traj={lr: ee_traj}, lr2finger_traj={lr:\n finger_traj}, lr2open_finger_traj={lr: open_finger_traj},\n lr2close_finger_traj={lr: close_finger_traj})\n', (2847, 3005), False, 'from lfd.demonstration.demonstration import AugmentedTrajectory\n'), ((3641, 3684), 'numpy.tile', 'np.tile', (['dof_vals', '(aug_traj_rs.n_steps, 1)'], {}), '(dof_vals, (aug_traj_rs.n_steps, 1))\n', (3648, 3684), True, 'import numpy as np\n'), ((3705, 3833), 'lfd.transfer.planning.plan_follow_traj', 'planning.plan_follow_traj', (['robot', 'manip_name', 'ee_link', "aug_traj_rs.lr2ee_traj['r']", 'init_traj'], {'no_collision_cost_first': '(True)'}), "(robot, manip_name, ee_link, aug_traj_rs.\n lr2ee_traj['r'], init_traj, no_collision_cost_first=True)\n", (3730, 3833), False, 'from lfd.transfer import planning\n'), ((4203, 4361), 'numpy.array', 'np.array', (['[[0.2, -0.2, table_height + 0.006], [0.8, -0.2, table_height + 0.006], [0.8,\n 0.2, table_height + 0.006], [0.2, 0.2, table_height + 0.006]]'], {}), '([[0.2, -0.2, table_height + 0.006], [0.8, -0.2, table_height + \n 0.006], [0.8, 0.2, table_height + 0.006], [0.2, 0.2, table_height + 0.006]]\n )\n', (4211, 4361), True, 'import numpy as np\n'), ((4903, 4932), 'lfd.environment.simulation.DynamicSimulationRobotWorld', 'DynamicSimulationRobotWorld', ([], {}), '()\n', (4930, 4932), False, 'from lfd.environment.simulation import DynamicSimulationRobotWorld\n'), ((5089, 5121), 'lfd.environment.sim_util.reset_arms_to_side', 'sim_util.reset_arms_to_side', (['sim'], {}), '(sim)\n', (5116, 5121), False, 'from lfd.environment import sim_util\n'), ((5176, 5212), 'lfd.environment.environment.LfdEnvironment', 'environment.LfdEnvironment', (['sim', 'sim'], {}), '(sim, sim)\n', (5202, 5212), False, 'from lfd.environment import environment\n'), ((5413, 5457), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 0], [-1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])\n', (5421, 5457), True, 'import numpy as np\n'), ((5471, 5516), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, -1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n', (5479, 5516), True, 'import numpy as np\n'), ((1129, 1148), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (1145, 1148), True, 'import numpy as np\n'), ((1440, 1466), 'numpy.diff', 'np.diff', (['rope_poss'], {'axis': '(0)'}), '(rope_poss, axis=0)\n', (1447, 1466), True, 'import numpy as np\n'), ((1570, 1591), 'numpy.arange', 'np.arange', (['(xp[-1] + 1)'], {}), '(xp[-1] + 1)\n', (1579, 1591), True, 'import numpy as np\n'), ((2632, 2766), 'numpy.array', 'np.array', (['[open_finger_value, open_finger_value, closed_finger_value,\n closed_finger_value, closed_finger_value, open_finger_value]'], {}), '([open_finger_value, open_finger_value, closed_finger_value,\n closed_finger_value, closed_finger_value, open_finger_value])\n', (2640, 2766), True, 'import numpy as np\n'), ((3109, 3134), 'numpy.diff', 'np.diff', (['pos_traj'], {'axis': '(0)'}), '(pos_traj, axis=0)\n', (3116, 3134), True, 'import numpy as np\n'), ((3246, 3274), 'numpy.arange', 'np.arange', (['(new_times[-1] + 1)'], {}), '(new_times[-1] + 1)\n', (3255, 3274), True, 'import numpy as np\n'), ((4462, 4526), 'lfd.environment.simulation_object.XmlSimulationObject', 'XmlSimulationObject', (['"""robots/pr2-beta-static.zae"""'], {'dynamic': '(False)'}), "('robots/pr2-beta-static.zae', dynamic=False)\n", (4481, 4526), False, 'from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject\n'), ((4548, 4642), 'lfd.environment.simulation_object.BoxSimulationObject', 'BoxSimulationObject', (['"""table"""', '[1, 0, table_height - 0.1]', '[0.85, 0.85, 0.1]'], {'dynamic': '(False)'}), "('table', [1, 0, table_height - 0.1], [0.85, 0.85, 0.1],\n dynamic=False)\n", (4567, 4642), False, 'from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject\n'), ((936, 1024), 'lfd.environment.simulation_object.CylinderSimulationObject', 'CylinderSimulationObject', (["('cyl%i' % i)", 'cyl_pos', 'cyl_radius', 'cyl_height'], {'dynamic': '(True)'}), "('cyl%i' % i, cyl_pos, cyl_radius, cyl_height,\n dynamic=True)\n", (960, 1024), False, 'from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject\n'), ((1494, 1536), 'numpy.cumsum', 'np.cumsum', (['(rope_pos_dists / capsule_height)'], {}), '(rope_pos_dists / capsule_height)\n', (1503, 1536), True, 'import numpy as np\n'), ((3169, 3217), 'numpy.cumsum', 'np.cumsum', (['(pos_dists / pos_displacement_per_step)'], {}), '(pos_dists / pos_displacement_per_step)\n', (3178, 3217), True, 'import numpy as np\n'), ((613, 633), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (624, 633), True, 'import numpy as np\n'), ((633, 653), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (644, 653), True, 'import numpy as np\n')] |
import pickle
import nltk
import numpy as np
import torch
import os
import copy
from IRNet.preprocess import utils
from src.rule import semQL
from api.adapters.IRNet.constants import PATH_TO_CONCEPTNET
from api.paths import SCHEMAS_DIR
from api.setup_util import schemas, no_stdout
from IRNet.sem2SQL import transform
from .parse_args import init_arg_parser
from IRNet.src.models.model import IRNet
from IRNet.src.rule.sem_utils import wordnet_lemmatizer
from IRNet.src.utils import to_batch_seq
from api import setup_util
class IRNetAdapter:
def __init__(self):
self.params = init_arg_parser()
self.params.schemas_dir = SCHEMAS_DIR
# load model
grammar = semQL.Grammar()
self.model = IRNet(self.params, grammar)
if self.params.cuda:
self.model.cuda()
pretrained_model = torch.load(self.params.load_model, map_location=lambda storage, loc: storage)
else:
pretrained_model = torch.load(self.params.load_model, map_location='cpu')
pretrained_modeled = copy.deepcopy(pretrained_model)
for k in pretrained_model.keys():
if k not in self.model.state_dict().keys():
del pretrained_modeled[k]
self.model.load_state_dict(pretrained_modeled)
# load glove
self.model.word_emb = setup_util.glove_embeddings
# load table data
self.schemas = schemas
def epoch_acc(self, batch_size, sql_data, table_data, beam_size=3):
self.model.eval()
sql_data_list = []
sql_data_list.append(sql_data)
perm = list(range(len(sql_data_list)))
examples = to_batch_seq(sql_data_list, self.schemas, perm, 0, len(perm),
is_train=False)
example = examples[0]
results_all = self.model.parse(example, beam_size=beam_size)
results = results_all[0]
list_preds = []
try:
pred = " ".join([str(x) for x in results[0].actions])
for x in results:
list_preds.append(" ".join(str(x.actions)))
except Exception as e:
pred = ""
pre_sql = example.sql_json['pre_sql']
pre_sql['sketch_result'] = " ".join(str(x) for x in results_all[1])
pre_sql['model_result'] = pred
return pre_sql
def preprocessData(self, data, schema):
with open(os.path.join(PATH_TO_CONCEPTNET, 'english_RelatedTo.pkl'), 'rb') as f:
english_RelatedTo = pickle.load(f)
with open(os.path.join(PATH_TO_CONCEPTNET, 'english_IsA.pkl'), 'rb') as f:
english_IsA = pickle.load(f)
# copy of the origin question_toks
data["origin_question_toks"] = data["question_toks"]
data['question_toks'] = utils.symbol_filter(data['question_toks'])
origin_question_toks = utils.symbol_filter([x for x in data['origin_question_toks'] if x.lower() != 'the'])
question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in data['question_toks'] if x.lower() != 'the']
data['question_toks'] = question_toks
header_toks = []
header_toks_list = []
num_toks = len(question_toks)
idx = 0
tok_concol = []
type_concol = []
nltk_result = nltk.pos_tag(question_toks)
while idx < num_toks:
# fully header
end_idx, header = utils.fully_part_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for table
end_idx, tname = utils.group_header(question_toks, idx, num_toks, schema['table_names'])
if tname:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["table"])
idx = end_idx
continue
# check for column
end_idx, header = utils.group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for partial column
end_idx, tname = utils.partial_header(question_toks, idx, header_toks_list)
if tname:
tok_concol.append(tname)
type_concol.append(["col"])
idx = end_idx
continue
# check for aggregation
end_idx, agg = utils.group_header(question_toks, idx, num_toks, utils.AGG)
if agg:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["agg"])
idx = end_idx
continue
if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR':
tok_concol.append([question_toks[idx]])
type_concol.append(['MORE'])
idx += 1
continue
if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS':
tok_concol.append([question_toks[idx]])
type_concol.append(['MOST'])
idx += 1
continue
# string match for Time Format
if utils.num2year(question_toks[idx]):
question_toks[idx] = 'year'
end_idx, header = utils.group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
def get_concept_result(toks, graph):
for begin_id in range(0, len(toks)):
for r_ind in reversed(range(1, len(toks) + 1 - begin_id)):
tmp_query = "_".join(toks[begin_id:r_ind])
if tmp_query in graph:
mi = graph[tmp_query]
for col in data['col_set']:
if col in mi:
return col
end_idx, symbol = utils.group_symbol(question_toks, idx, num_toks)
if symbol:
tmp_toks = [x for x in question_toks[idx: end_idx]]
assert len(tmp_toks) > 0, print(symbol, question_toks)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
end_idx, values = utils.group_values(origin_question_toks, idx, num_toks)
if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']):
tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True]
assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
result = utils.group_digital(question_toks, idx)
if result is True:
tok_concol.append(question_toks[idx: idx + 1])
type_concol.append(["value"])
idx += 1
continue
if question_toks[idx] == ['ha']:
question_toks[idx] = ['have']
tok_concol.append([question_toks[idx]])
type_concol.append(['NONE'])
idx += 1
continue
data['question_arg'] = tok_concol
data['question_arg_type'] = type_concol
data['nltk_pos'] = nltk_result
return data
@no_stdout
def translate(self, nl_question, db_id):
data = self.createSqlData(nl_question, db_id)
schema = self.schemas[data['db_id']]
myArray = np.asarray(schema['column_names'])
data['col_set'] = myArray[:, 1].tolist()
data['table_names'] = schema['table_names']
processData = self.preprocessData(data, schema)
list = []
list.append(processData)
result = self.epoch_acc(1, processData, schema)
result['model_result_replace'] = result['model_result']
processedResult = transform(result, schema)
print(processedResult)
return processedResult[0]
def createSqlData(self, nl_question, db_id):
nl_question = nl_question.replace(" .", ".")
nl_question = nl_question.replace(" !", "!")
nl_question = nl_question.replace(" ?", "?")
data = {}
data['db_id'] = db_id
data['question'] = nl_question
data['query'] = ''
question_tokens = data['question']
question_tokens = question_tokens.split()
question_tokens.append(question_tokens[-1][-1])
question_tokens[-2] = question_tokens[-2][:-1]
data["question_toks"] = question_tokens
return data
| [
"nltk.pos_tag",
"IRNet.preprocess.utils.num2year",
"IRNet.preprocess.utils.group_digital",
"IRNet.src.models.model.IRNet",
"torch.load",
"numpy.asarray",
"pickle.load",
"IRNet.preprocess.utils.partial_header",
"os.path.join",
"IRNet.sem2SQL.transform",
"IRNet.preprocess.utils.group_header",
"I... | [((696, 711), 'src.rule.semQL.Grammar', 'semQL.Grammar', ([], {}), '()\n', (709, 711), False, 'from src.rule import semQL\n'), ((733, 760), 'IRNet.src.models.model.IRNet', 'IRNet', (['self.params', 'grammar'], {}), '(self.params, grammar)\n', (738, 760), False, 'from IRNet.src.models.model import IRNet\n'), ((1059, 1090), 'copy.deepcopy', 'copy.deepcopy', (['pretrained_model'], {}), '(pretrained_model)\n', (1072, 1090), False, 'import copy\n'), ((2774, 2816), 'IRNet.preprocess.utils.symbol_filter', 'utils.symbol_filter', (["data['question_toks']"], {}), "(data['question_toks'])\n", (2793, 2816), False, 'from IRNet.preprocess import utils\n'), ((3279, 3306), 'nltk.pos_tag', 'nltk.pos_tag', (['question_toks'], {}), '(question_toks)\n', (3291, 3306), False, 'import nltk\n'), ((8725, 8759), 'numpy.asarray', 'np.asarray', (["schema['column_names']"], {}), "(schema['column_names'])\n", (8735, 8759), True, 'import numpy as np\n'), ((9115, 9140), 'IRNet.sem2SQL.transform', 'transform', (['result', 'schema'], {}), '(result, schema)\n', (9124, 9140), False, 'from IRNet.sem2SQL import transform\n'), ((851, 928), 'torch.load', 'torch.load', (['self.params.load_model'], {'map_location': '(lambda storage, loc: storage)'}), '(self.params.load_model, map_location=lambda storage, loc: storage)\n', (861, 928), False, 'import torch\n'), ((974, 1028), 'torch.load', 'torch.load', (['self.params.load_model'], {'map_location': '"""cpu"""'}), "(self.params.load_model, map_location='cpu')\n", (984, 1028), False, 'import torch\n'), ((2497, 2511), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2508, 2511), False, 'import pickle\n'), ((2622, 2636), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2633, 2636), False, 'import pickle\n'), ((3396, 3462), 'IRNet.preprocess.utils.fully_part_header', 'utils.fully_part_header', (['question_toks', 'idx', 'num_toks', 'header_toks'], {}), '(question_toks, idx, num_toks, header_toks)\n', (3419, 3462), False, 'from IRNet.preprocess import utils\n'), ((3708, 3779), 'IRNet.preprocess.utils.group_header', 'utils.group_header', (['question_toks', 'idx', 'num_toks', "schema['table_names']"], {}), "(question_toks, idx, num_toks, schema['table_names'])\n", (3726, 3779), False, 'from IRNet.preprocess import utils\n'), ((4028, 4089), 'IRNet.preprocess.utils.group_header', 'utils.group_header', (['question_toks', 'idx', 'num_toks', 'header_toks'], {}), '(question_toks, idx, num_toks, header_toks)\n', (4046, 4089), False, 'from IRNet.preprocess import utils\n'), ((4344, 4402), 'IRNet.preprocess.utils.partial_header', 'utils.partial_header', (['question_toks', 'idx', 'header_toks_list'], {}), '(question_toks, idx, header_toks_list)\n', (4364, 4402), False, 'from IRNet.preprocess import utils\n'), ((4629, 4688), 'IRNet.preprocess.utils.group_header', 'utils.group_header', (['question_toks', 'idx', 'num_toks', 'utils.AGG'], {}), '(question_toks, idx, num_toks, utils.AGG)\n', (4647, 4688), False, 'from IRNet.preprocess import utils\n'), ((5388, 5422), 'IRNet.preprocess.utils.num2year', 'utils.num2year', (['question_toks[idx]'], {}), '(question_toks[idx])\n', (5402, 5422), False, 'from IRNet.preprocess import utils\n'), ((6295, 6343), 'IRNet.preprocess.utils.group_symbol', 'utils.group_symbol', (['question_toks', 'idx', 'num_toks'], {}), '(question_toks, idx, num_toks)\n', (6313, 6343), False, 'from IRNet.preprocess import utils\n'), ((7037, 7092), 'IRNet.preprocess.utils.group_values', 'utils.group_values', (['origin_question_toks', 'idx', 'num_toks'], {}), '(origin_question_toks, idx, num_toks)\n', (7055, 7092), False, 'from IRNet.preprocess import utils\n'), ((7939, 7978), 'IRNet.preprocess.utils.group_digital', 'utils.group_digital', (['question_toks', 'idx'], {}), '(question_toks, idx)\n', (7958, 7978), False, 'from IRNet.preprocess import utils\n'), ((2394, 2451), 'os.path.join', 'os.path.join', (['PATH_TO_CONCEPTNET', '"""english_RelatedTo.pkl"""'], {}), "(PATH_TO_CONCEPTNET, 'english_RelatedTo.pkl')\n", (2406, 2451), False, 'import os\n'), ((2531, 2582), 'os.path.join', 'os.path.join', (['PATH_TO_CONCEPTNET', '"""english_IsA.pkl"""'], {}), "(PATH_TO_CONCEPTNET, 'english_IsA.pkl')\n", (2543, 2582), False, 'import os\n'), ((5502, 5563), 'IRNet.preprocess.utils.group_header', 'utils.group_header', (['question_toks', 'idx', 'num_toks', 'header_toks'], {}), '(question_toks, idx, num_toks, header_toks)\n', (5520, 5563), False, 'from IRNet.preprocess import utils\n'), ((7210, 7241), 'IRNet.src.rule.sem_utils.wordnet_lemmatizer.lemmatize', 'wordnet_lemmatizer.lemmatize', (['x'], {}), '(x)\n', (7238, 7241), False, 'from IRNet.src.rule.sem_utils import wordnet_lemmatizer\n')] |
#!/usr/bin/env python3
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import softmax
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
from sklearn.metrics import log_loss
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size")
parser.add_argument(
"--classes", default=10, type=int, help="Number of classes to use"
)
parser.add_argument(
"--hidden_layer", default=20, type=int, help="Hidden layer size"
)
parser.add_argument(
"--iterations", default=50, type=int, help="Number of iterations over the data"
)
parser.add_argument(
"--learning_rate", default=0.01, type=float, help="Learning rate"
)
parser.add_argument("--seed", default=42, type=int, help="Random seed")
parser.add_argument("--test_size", default=797, type=int, help="Test set size")
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Use the digits dataset
data, target = sklearn.datasets.load_digits(n_class=args.classes, return_X_y=True)
# Append a constant feature with value 1 to the end of every input data
data = np.pad(data, ((0, 0), (0, 1)), constant_values=1)
# Split the data randomly to train and test using `sklearn.model_selection.train_test_split`,
# with `test_size=args.test_size` and `random_state=args.seed`.
train_data, test_data, train_target, test_target = sklearn.model_selection.train_test_split(
data, target, stratify=target, test_size=args.test_size, random_state=args.seed
)
# Generate initial model weights
weights = [
np.random.uniform(
size=[train_data.shape[1], args.hidden_layer], low=-0.1, high=0.1
),
np.random.uniform(size=[args.hidden_layer, args.classes], low=-0.1, high=0.1),
]
relu = lambda x: np.maximum(x, 0)
def forward(inputs):
# TODO: Implement forward propagation, returning *both* the value of the hidden
# layer and the value of the output layer.
#
# We assume a neural network with a single hidden layer of size `args.hidden_layer`
# and ReLU activation, where ReLU(x) = max(x, 0), and an output layer with softmax
# activation.
#
# The value of the hidden layer is computed as ReLU(inputs times weights[0]).
# The value of the output layer is computed as softmax(hidden_layer times weights[1]).
#
# Note that you need to be careful when computing softmax, because the exponentiation
# in softmax can easily overflow. To avoid it, you can use the fact that
# softmax(z) = softmax(z + any_constant) and compute softmax(z) = softmax(z - maximum_of_z).
# That way we only exponentiate values which are non-positive, and overflow does not occur.
Z = inputs @ weights[0]
A = relu(Z)
output = A @ weights[1]
return softmax(output, axis=1), A, Z
for iteration in range(args.iterations):
permutation = np.random.permutation(train_data.shape[0])
permuted_x_train, permuted_y_train = (
train_data[permutation],
train_target[permutation],
)
batch_count = int(train_data.shape[0] / args.batch_size)
for batch_x, batch_y in zip(
np.split(permuted_x_train, batch_count),
np.split(permuted_y_train, batch_count),
):
probs, A, Z = forward(batch_x)
batch_y = np.eye(args.classes)[batch_y]
dZ2 = probs - batch_y
dW2 = 1 / args.batch_size * A.T @ dZ2
dZ1 = dZ2 @ weights[1].T * (Z >= 0).astype(np.int8)
dW1 = 1 / args.batch_size * batch_x.T @ dZ1
weights[1] -= args.learning_rate * dW2
weights[0] -= args.learning_rate * dW1
train_probs, _, _ = forward(train_data)
test_probs, _, _ = forward(test_data)
predictions_train = np.argmax(train_probs, axis=1)
predictions_test = np.argmax(test_probs, axis=1)
print(
"After iteration {}: train acc {:.1f}%, test acc {:.1f}%".format(
iteration + 1,
100
* sklearn.metrics.accuracy_score(
train_target, predictions_train
), # Training accuracy,
100
* sklearn.metrics.accuracy_score(
test_target, predictions_test
), # Test accuracy,
)
)
| [
"numpy.eye",
"argparse.ArgumentParser",
"numpy.argmax",
"scipy.special.softmax",
"numpy.pad",
"numpy.split",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.maximum",
"numpy.random.permutation"
] | [((292, 317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (315, 317), False, 'import argparse\n'), ((1051, 1076), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1065, 1076), True, 'import numpy as np\n'), ((1282, 1331), 'numpy.pad', 'np.pad', (['data', '((0, 0), (0, 1))'], {'constant_values': '(1)'}), '(data, ((0, 0), (0, 1)), constant_values=1)\n', (1288, 1331), True, 'import numpy as np\n'), ((1752, 1840), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[train_data.shape[1], args.hidden_layer]', 'low': '(-0.1)', 'high': '(0.1)'}), '(size=[train_data.shape[1], args.hidden_layer], low=-0.1,\n high=0.1)\n', (1769, 1840), True, 'import numpy as np\n'), ((1868, 1945), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[args.hidden_layer, args.classes]', 'low': '(-0.1)', 'high': '(0.1)'}), '(size=[args.hidden_layer, args.classes], low=-0.1, high=0.1)\n', (1885, 1945), True, 'import numpy as np\n'), ((1975, 1991), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (1985, 1991), True, 'import numpy as np\n'), ((3146, 3188), 'numpy.random.permutation', 'np.random.permutation', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (3167, 3188), True, 'import numpy as np\n'), ((4068, 4098), 'numpy.argmax', 'np.argmax', (['train_probs'], {'axis': '(1)'}), '(train_probs, axis=1)\n', (4077, 4098), True, 'import numpy as np\n'), ((4126, 4155), 'numpy.argmax', 'np.argmax', (['test_probs'], {'axis': '(1)'}), '(test_probs, axis=1)\n', (4135, 4155), True, 'import numpy as np\n'), ((3048, 3071), 'scipy.special.softmax', 'softmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (3055, 3071), False, 'from scipy.special import softmax\n'), ((3436, 3475), 'numpy.split', 'np.split', (['permuted_x_train', 'batch_count'], {}), '(permuted_x_train, batch_count)\n', (3444, 3475), True, 'import numpy as np\n'), ((3489, 3528), 'numpy.split', 'np.split', (['permuted_y_train', 'batch_count'], {}), '(permuted_y_train, batch_count)\n', (3497, 3528), True, 'import numpy as np\n'), ((3606, 3626), 'numpy.eye', 'np.eye', (['args.classes'], {}), '(args.classes)\n', (3612, 3626), True, 'import numpy as np\n')] |
import numpy as np
from tensorflow.keras import Input
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.models import load_model, Model
from attention import Attention
def main():
# Dummy data. There is nothing to learn in this example.
num_samples, time_steps, input_dim, output_dim = 100, 10, 1, 1
data_x = np.random.uniform(size=(num_samples, time_steps, input_dim))
data_y = np.random.uniform(size=(num_samples, output_dim))
# Define/compile the model.
model_input = Input(shape=(time_steps, input_dim))
x = LSTM(64, return_sequences=True)(model_input)
x = Attention(units=32)(x)
x = Dense(1)(x)
model = Model(model_input, x)
model.compile(loss='mae', optimizer='adam')
model.summary()
# train.
model.fit(data_x, data_y, epochs=10)
# test save/reload model.
pred1 = model.predict(data_x)
model.save('test_model.h5')
model_h5 = load_model('test_model.h5', custom_objects={'Attention': Attention})
pred2 = model_h5.predict(data_x)
np.testing.assert_almost_equal(pred1, pred2)
print('Success.')
if __name__ == '__main__':
main()
| [
"numpy.testing.assert_almost_equal",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.Input",
"numpy.random.uniform",
"tensorflow.keras.models.Model",
"attention.Attention"
] | [((344, 404), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_samples, time_steps, input_dim)'}), '(size=(num_samples, time_steps, input_dim))\n', (361, 404), True, 'import numpy as np\n'), ((418, 467), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_samples, output_dim)'}), '(size=(num_samples, output_dim))\n', (435, 467), True, 'import numpy as np\n'), ((519, 555), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(time_steps, input_dim)'}), '(shape=(time_steps, input_dim))\n', (524, 555), False, 'from tensorflow.keras import Input\n'), ((672, 693), 'tensorflow.keras.models.Model', 'Model', (['model_input', 'x'], {}), '(model_input, x)\n', (677, 693), False, 'from tensorflow.keras.models import load_model, Model\n'), ((929, 997), 'tensorflow.keras.models.load_model', 'load_model', (['"""test_model.h5"""'], {'custom_objects': "{'Attention': Attention}"}), "('test_model.h5', custom_objects={'Attention': Attention})\n", (939, 997), False, 'from tensorflow.keras.models import load_model, Model\n'), ((1039, 1083), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred1', 'pred2'], {}), '(pred1, pred2)\n', (1069, 1083), True, 'import numpy as np\n'), ((564, 595), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (568, 595), False, 'from tensorflow.keras.layers import Dense, LSTM\n'), ((617, 636), 'attention.Attention', 'Attention', ([], {'units': '(32)'}), '(units=32)\n', (626, 636), False, 'from attention import Attention\n'), ((648, 656), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (653, 656), False, 'from tensorflow.keras.layers import Dense, LSTM\n')] |
#!/usr/bin/env python3
import numpy as np
from Core import RISM_Obj
from dataclasses import dataclass, field
import Util
from scipy.special import spherical_jn
@dataclass
class DRISM(object):
data_vv: RISM_Obj
diel: float
adbcor: float
data_uv: RISM_Obj = None
chi: np.ndarray = field(init=False)
h_c0: float = field(init=False)
y: float = field(init=False)
def compute_vv(self):
I = np.eye(self.data_vv.ns1, M=self.data_vv.ns2, dtype=np.float64)
ck = np.zeros((self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.float64)
w_bar = np.zeros((self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.float64)
k = self.data_vv.grid.ki
r = self.data_vv.grid.ri
#print(self.data_vv.h)
for i, j in np.ndindex(self.data_vv.ns1, self.data_vv.ns2):
ck[:, i, j] = self.data_vv.grid.dht(self.data_vv.c[:, i, j])
ck[:, i, j] -= self.data_vv.B * self.data_vv.uk_lr[:, i, j]
for i in range(self.data_vv.grid.npts):
chi = self.chi
w_bar[i] = (self.data_vv.w[i] + self.data_vv.p @ chi[i])
iwcp = np.linalg.inv(I - w_bar[i] @ ck[i] @ self.data_vv.p)
wcw = (w_bar[i] @ ck[i] @ w_bar[i])
self.data_vv.h[i] = (iwcp @ wcw) + (chi[i])
for i, j in np.ndindex(self.data_vv.ns1, self.data_vv.ns2):
self.data_vv.t[:, i, j] = self.data_vv.grid.idht(self.data_vv.h[:, i, j] - ck[:, i, j]) - (
self.data_vv.B * self.data_vv.ur_lr[:, i, j])
#print(self.data_vv.h)
def compute_uv(self):
if self.data_uv is not None:
I = np.eye(self.data_uv.ns1, M=self.data_uv.ns2)
ck_uv = np.zeros((self.data_uv.npts, self.data_uv.ns1, self.data_uv.ns2), dtype=np.float64)
for i, j in np.ndindex(self.data_uv.ns1, self.data_uv.ns2):
ck_uv[:, i, j] = self.data_uv.grid.dht(self.data_uv.c[:, i, j])
ck_uv[:, i, j] -= self.data_uv.B * self.data_uv.uk_lr[:, i, j]
for i in range(self.data_uv.grid.npts):
self.data_uv.h[i] = (self.data_uv.w[i] @ ck_uv[i]) @ (self.data_vv.w[i] + self.data_vv.p @ self.data_vv.h[i])
for i, j in np.ndindex(self.data_uv.ns1, self.data_uv.ns2):
self.data_uv.t[:, i, j] = self.data_uv.grid.idht(self.data_uv.h[:, i, j] - ck_uv[:, i, j]) - (
self.data_uv.B * self.data_uv.ur_lr[:, i, j])
else:
raise RuntimeError("uv dataclass not defined")
def calculate_DRISM_params(self):
total_density = 0
Util.align_dipole(self.data_vv)
dm, _ = Util.dipole_moment(self.data_vv)
for isp in self.data_vv.species:
total_density += isp.dens
dmdensity = total_density * dm * dm
ptxv = self.data_vv.species[0].dens / total_density
self.y = 4.0 * np.pi * dmdensity / 9.0
self.h_c0 = (((self.diel - 1.0) / self.y) - 3.0) / (total_density * ptxv)
def D_matrix(self):
d0x = np.zeros((self.data_vv.ns1), dtype=np.float)
d0y = np.zeros((self.data_vv.ns1), dtype=np.float)
d1z = np.zeros((self.data_vv.ns1), dtype=np.float)
for ki, k in enumerate(self.data_vv.grid.ki):
hck = self.h_c0 * np.exp(-np.power((self.adbcor * k / 2.0), 2))
i = -1
for isp in self.data_vv.species:
for iat in isp.atom_sites:
i += 1
k_coord = k*iat.coords
if k_coord[0] == 0.0:
d0x[i] = 1.0
else:
d0x[i] = Util.j0(k_coord[0])
if k_coord[1] == 0.0:
d0y[i] = 1.0
else:
d0y[i] = Util.j0(k_coord[1])
if k_coord[2] == 0.0:
d1z[i] = 0.0
else:
d1z[i] = Util.j1(k_coord[2])
for i, j in np.ndindex((self.data_vv.ns1, self.data_vv.ns2)):
self.chi[ki, i, j] = d0x[i] * d0y[i] * d1z[i] * hck * d0x[j] * d0y[j] * d1z[j]
def __post_init__(self):
self.calculate_DRISM_params()
self.chi = np.zeros((self.data_vv.grid.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.float)
self.D_matrix()
def vv_impl():
pass
def uv_impl():
pass
| [
"numpy.eye",
"Util.align_dipole",
"numpy.power",
"numpy.ndindex",
"numpy.zeros",
"numpy.linalg.inv",
"Util.j1",
"Util.dipole_moment",
"Util.j0",
"dataclasses.field"
] | [((302, 319), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (307, 319), False, 'from dataclasses import dataclass, field\n'), ((338, 355), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (343, 355), False, 'from dataclasses import dataclass, field\n'), ((371, 388), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (376, 388), False, 'from dataclasses import dataclass, field\n'), ((428, 490), 'numpy.eye', 'np.eye', (['self.data_vv.ns1'], {'M': 'self.data_vv.ns2', 'dtype': 'np.float64'}), '(self.data_vv.ns1, M=self.data_vv.ns2, dtype=np.float64)\n', (434, 490), True, 'import numpy as np\n'), ((504, 592), 'numpy.zeros', 'np.zeros', (['(self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2)'], {'dtype': 'np.float64'}), '((self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.\n float64)\n', (512, 592), True, 'import numpy as np\n'), ((604, 692), 'numpy.zeros', 'np.zeros', (['(self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2)'], {'dtype': 'np.float64'}), '((self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.\n float64)\n', (612, 692), True, 'import numpy as np\n'), ((805, 851), 'numpy.ndindex', 'np.ndindex', (['self.data_vv.ns1', 'self.data_vv.ns2'], {}), '(self.data_vv.ns1, self.data_vv.ns2)\n', (815, 851), True, 'import numpy as np\n'), ((1338, 1384), 'numpy.ndindex', 'np.ndindex', (['self.data_vv.ns1', 'self.data_vv.ns2'], {}), '(self.data_vv.ns1, self.data_vv.ns2)\n', (1348, 1384), True, 'import numpy as np\n'), ((2616, 2647), 'Util.align_dipole', 'Util.align_dipole', (['self.data_vv'], {}), '(self.data_vv)\n', (2633, 2647), False, 'import Util\n'), ((2664, 2696), 'Util.dipole_moment', 'Util.dipole_moment', (['self.data_vv'], {}), '(self.data_vv)\n', (2682, 2696), False, 'import Util\n'), ((3049, 3091), 'numpy.zeros', 'np.zeros', (['self.data_vv.ns1'], {'dtype': 'np.float'}), '(self.data_vv.ns1, dtype=np.float)\n', (3057, 3091), True, 'import numpy as np\n'), ((3108, 3150), 'numpy.zeros', 'np.zeros', (['self.data_vv.ns1'], {'dtype': 'np.float'}), '(self.data_vv.ns1, dtype=np.float)\n', (3116, 3150), True, 'import numpy as np\n'), ((3167, 3209), 'numpy.zeros', 'np.zeros', (['self.data_vv.ns1'], {'dtype': 'np.float'}), '(self.data_vv.ns1, dtype=np.float)\n', (3175, 3209), True, 'import numpy as np\n'), ((4249, 4339), 'numpy.zeros', 'np.zeros', (['(self.data_vv.grid.npts, self.data_vv.ns1, self.data_vv.ns2)'], {'dtype': 'np.float'}), '((self.data_vv.grid.npts, self.data_vv.ns1, self.data_vv.ns2),\n dtype=np.float)\n', (4257, 4339), True, 'import numpy as np\n'), ((1161, 1213), 'numpy.linalg.inv', 'np.linalg.inv', (['(I - w_bar[i] @ ck[i] @ self.data_vv.p)'], {}), '(I - w_bar[i] @ ck[i] @ self.data_vv.p)\n', (1174, 1213), True, 'import numpy as np\n'), ((1663, 1707), 'numpy.eye', 'np.eye', (['self.data_uv.ns1'], {'M': 'self.data_uv.ns2'}), '(self.data_uv.ns1, M=self.data_uv.ns2)\n', (1669, 1707), True, 'import numpy as np\n'), ((1728, 1816), 'numpy.zeros', 'np.zeros', (['(self.data_uv.npts, self.data_uv.ns1, self.data_uv.ns2)'], {'dtype': 'np.float64'}), '((self.data_uv.npts, self.data_uv.ns1, self.data_uv.ns2), dtype=np.\n float64)\n', (1736, 1816), True, 'import numpy as np\n'), ((1836, 1882), 'numpy.ndindex', 'np.ndindex', (['self.data_uv.ns1', 'self.data_uv.ns2'], {}), '(self.data_uv.ns1, self.data_uv.ns2)\n', (1846, 1882), True, 'import numpy as np\n'), ((2245, 2291), 'numpy.ndindex', 'np.ndindex', (['self.data_uv.ns1', 'self.data_uv.ns2'], {}), '(self.data_uv.ns1, self.data_uv.ns2)\n', (2255, 2291), True, 'import numpy as np\n'), ((4017, 4065), 'numpy.ndindex', 'np.ndindex', (['(self.data_vv.ns1, self.data_vv.ns2)'], {}), '((self.data_vv.ns1, self.data_vv.ns2))\n', (4027, 4065), True, 'import numpy as np\n'), ((3304, 3338), 'numpy.power', 'np.power', (['(self.adbcor * k / 2.0)', '(2)'], {}), '(self.adbcor * k / 2.0, 2)\n', (3312, 3338), True, 'import numpy as np\n'), ((3657, 3676), 'Util.j0', 'Util.j0', (['k_coord[0]'], {}), '(k_coord[0])\n', (3664, 3676), False, 'import Util\n'), ((3815, 3834), 'Util.j0', 'Util.j0', (['k_coord[1]'], {}), '(k_coord[1])\n', (3822, 3834), False, 'import Util\n'), ((3973, 3992), 'Util.j1', 'Util.j1', (['k_coord[2]'], {}), '(k_coord[2])\n', (3980, 3992), False, 'import Util\n')] |
import math
import numpy as np
import torch
from sklearn.metrics import average_precision_score, roc_auc_score, f1_score
def recall(node, rank, top_k):
rank = rank[:, :top_k]
recall = np.array([node[i] in a for i, a in enumerate(rank)])
recall = recall.sum() / recall.size
return recall
def MRR(node, rank):
rank = rank.cpu()
mrr = np.array([(np.where(a == node[i])) for i, a in enumerate(rank)])
mrr = (1 / (mrr + 1)).mean()
return mrr
def get_target(src_embedding, dest_embedding, src_batch):
cos_similarity = torch.matmul(src_embedding[src_batch], dest_embedding.T)
cos_similarity, idx = torch.sort(cos_similarity, descending=True)
return cos_similarity, idx
def eval_edge_prediction(model, neg_edge_sampler, data, n_neighbors, batch_size=200, use_recall=False):
assert neg_edge_sampler.seed is not None
neg_edge_sampler.reset_random_state()
val_ap, val_macro_auc, val_micro_auc, val_macro_f1, val_micro_f1 = [], [], [], [], []
val_mrr, val_recall_20, val_recall_50 = [], [], []
with torch.no_grad():
model = model.eval()
TEST_BATCH_SIZE = batch_size
num_test_instance = len(data.sources)
num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE)
for k in range(num_test_batch):
start_idx = k * TEST_BATCH_SIZE
end_idx = min(num_test_instance, start_idx + TEST_BATCH_SIZE)
size = end_idx - start_idx
src_batch = data.sources[start_idx:end_idx]
dest_batch = data.destinations[start_idx:end_idx]
edge_idx_batch = data.edge_idxs[start_idx:end_idx]
timestamp_batch = data.timestamps[start_idx:end_idx]
_, neg_batch = neg_edge_sampler.sample(size)
pos_prob, neg_prob = model.compute_edge_probabilities(source_nodes=src_batch,
destination_nodes=dest_batch,
negative_nodes=neg_batch,
edge_times=timestamp_batch,
edge_idxs=edge_idx_batch,
n_neighbors=n_neighbors,
is_test=False)
# src_embedding = src_embedding.detach()
# dest_embedding = dest_embedding.detach()
src_embedding = dest_embedding = model.memory.memory
pred_label = np.concatenate([(pos_prob).cpu().numpy(), (neg_prob).cpu().numpy()])
true_label = np.concatenate([np.ones(size), np.zeros(size)])
val_ap.append(average_precision_score(true_label, pred_label))
val_macro_auc.append(roc_auc_score(true_label, pred_label, average='macro'))
val_micro_auc.append(roc_auc_score(true_label, pred_label, average='micro'))
val_macro_f1.append(f1_score(true_label, np.array(pred_label >= 0.5, dtype=int), average='macro'))
val_micro_f1.append(f1_score(true_label, np.array(pred_label >= 0.5, dtype=int), average='micro'))
if use_recall:
cos_similarity, dest_rank = get_target(src_embedding, dest_embedding, src_batch)
cos_similarity, src_rank = get_target(dest_embedding, src_embedding, dest_batch)
recall_20 = (recall(dest_batch, dest_rank, 20) + recall(src_batch, src_rank, 20)) / 2
recall_50 = (recall(dest_batch, dest_rank, 50) + recall(src_batch, src_rank, 50)) / 2
mrr = (MRR(dest_batch, dest_rank) + MRR(src_batch, src_rank)) / 2
val_mrr.append(mrr)
val_recall_20.append(recall_20)
val_recall_50.append(recall_50)
else:
val_mrr.append(0)
val_recall_20.append(0)
val_recall_50.append(0)
return np.mean(val_ap), np.mean(val_macro_auc), np.mean(val_micro_auc), np.mean(val_macro_f1), np.mean(
val_micro_f1), np.mean(val_mrr), np.mean(val_recall_20), np.mean(val_recall_50)
def eval_node_classification(tgn, decoder, data, edge_idxs, batch_size, n_neighbors):
pred_prob = np.zeros(len(data.sources))
num_instance = len(data.sources)
num_batch = math.ceil(num_instance / batch_size)
with torch.no_grad():
decoder.eval()
tgn.eval()
for k in range(num_batch):
s_idx = k * batch_size
e_idx = min(num_instance, s_idx + batch_size)
sources_batch = data.sources[s_idx: e_idx]
destinations_batch = data.destinations[s_idx: e_idx]
timestamps_batch = data.timestamps[s_idx:e_idx]
edge_idxs_batch = edge_idxs[s_idx: e_idx]
source_embedding, destination_embedding, _ = tgn.compute_temporal_embeddings(sources_batch,
destinations_batch,
destinations_batch,
timestamps_batch,
edge_idxs_batch,
n_neighbors)
pred_prob_batch = decoder(source_embedding).sigmoid()
pred_prob[s_idx: e_idx] = pred_prob_batch.cpu().numpy()
auc_roc = roc_auc_score(data.labels, pred_prob)
return auc_roc
def eval_edge_classification(model, decoder, data, n_neighbors, batch_size=200):
val_macro_auc, val_macro_f1, val_micro_f1 = [], [], []
with torch.no_grad():
model = model.eval()
decoder = decoder.eval()
TEST_BATCH_SIZE = batch_size
num_test_instance = len(data.sources)
num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE)
for k in range(num_test_batch):
start_idx = k * TEST_BATCH_SIZE
end_idx = min(num_test_instance, start_idx + TEST_BATCH_SIZE)
size = end_idx - start_idx
src_batch = data.sources[start_idx:end_idx]
dest_batch = data.destinations[start_idx:end_idx]
edge_idx_batch = data.edge_idxs[start_idx:end_idx]
timestamp_batch = data.timestamps[start_idx:end_idx]
label_batch = data.labels[start_idx:end_idx]
source_embedding, destination_embedding, _ = model.compute_temporal_embeddings(src_batch,
dest_batch,
dest_batch,
timestamp_batch,
edge_idx_batch,
n_neighbors)
true_label = label_batch
pred_label = decoder(torch.cat([source_embedding, destination_embedding], dim=1))
pred_label = pred_label.cpu().numpy()
val_macro_auc.append(roc_auc_score(true_label, pred_label, average='macro', multi_class='ovo'))
val_macro_f1.append(f1_score(true_label, pred_label.argmax(1), average='macro'))
val_micro_f1.append(f1_score(true_label, pred_label.argmax(1), average='micro'))
return np.mean(val_macro_auc), np.mean(val_macro_f1), np.mean(val_micro_f1)
| [
"torch.sort",
"numpy.mean",
"math.ceil",
"numpy.ones",
"numpy.where",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"numpy.zeros",
"torch.matmul",
"torch.no_grad",
"torch.cat"
] | [((555, 611), 'torch.matmul', 'torch.matmul', (['src_embedding[src_batch]', 'dest_embedding.T'], {}), '(src_embedding[src_batch], dest_embedding.T)\n', (567, 611), False, 'import torch\n'), ((638, 681), 'torch.sort', 'torch.sort', (['cos_similarity'], {'descending': '(True)'}), '(cos_similarity, descending=True)\n', (648, 681), False, 'import torch\n'), ((4374, 4410), 'math.ceil', 'math.ceil', (['(num_instance / batch_size)'], {}), '(num_instance / batch_size)\n', (4383, 4410), False, 'import math\n'), ((5630, 5667), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['data.labels', 'pred_prob'], {}), '(data.labels, pred_prob)\n', (5643, 5667), False, 'from sklearn.metrics import average_precision_score, roc_auc_score, f1_score\n'), ((1062, 1077), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1075, 1077), False, 'import torch\n'), ((1216, 1262), 'math.ceil', 'math.ceil', (['(num_test_instance / TEST_BATCH_SIZE)'], {}), '(num_test_instance / TEST_BATCH_SIZE)\n', (1225, 1262), False, 'import math\n'), ((4004, 4019), 'numpy.mean', 'np.mean', (['val_ap'], {}), '(val_ap)\n', (4011, 4019), True, 'import numpy as np\n'), ((4021, 4043), 'numpy.mean', 'np.mean', (['val_macro_auc'], {}), '(val_macro_auc)\n', (4028, 4043), True, 'import numpy as np\n'), ((4045, 4067), 'numpy.mean', 'np.mean', (['val_micro_auc'], {}), '(val_micro_auc)\n', (4052, 4067), True, 'import numpy as np\n'), ((4069, 4090), 'numpy.mean', 'np.mean', (['val_macro_f1'], {}), '(val_macro_f1)\n', (4076, 4090), True, 'import numpy as np\n'), ((4092, 4113), 'numpy.mean', 'np.mean', (['val_micro_f1'], {}), '(val_micro_f1)\n', (4099, 4113), True, 'import numpy as np\n'), ((4124, 4140), 'numpy.mean', 'np.mean', (['val_mrr'], {}), '(val_mrr)\n', (4131, 4140), True, 'import numpy as np\n'), ((4142, 4164), 'numpy.mean', 'np.mean', (['val_recall_20'], {}), '(val_recall_20)\n', (4149, 4164), True, 'import numpy as np\n'), ((4166, 4188), 'numpy.mean', 'np.mean', (['val_recall_50'], {}), '(val_recall_50)\n', (4173, 4188), True, 'import numpy as np\n'), ((4421, 4436), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4434, 4436), False, 'import torch\n'), ((5839, 5854), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5852, 5854), False, 'import torch\n'), ((6026, 6072), 'math.ceil', 'math.ceil', (['(num_test_instance / TEST_BATCH_SIZE)'], {}), '(num_test_instance / TEST_BATCH_SIZE)\n', (6035, 6072), False, 'import math\n'), ((7682, 7704), 'numpy.mean', 'np.mean', (['val_macro_auc'], {}), '(val_macro_auc)\n', (7689, 7704), True, 'import numpy as np\n'), ((7706, 7727), 'numpy.mean', 'np.mean', (['val_macro_f1'], {}), '(val_macro_f1)\n', (7713, 7727), True, 'import numpy as np\n'), ((7729, 7750), 'numpy.mean', 'np.mean', (['val_micro_f1'], {}), '(val_micro_f1)\n', (7736, 7750), True, 'import numpy as np\n'), ((372, 394), 'numpy.where', 'np.where', (['(a == node[i])'], {}), '(a == node[i])\n', (380, 394), True, 'import numpy as np\n'), ((2770, 2817), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['true_label', 'pred_label'], {}), '(true_label, pred_label)\n', (2793, 2817), False, 'from sklearn.metrics import average_precision_score, roc_auc_score, f1_score\n'), ((2852, 2906), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['true_label', 'pred_label'], {'average': '"""macro"""'}), "(true_label, pred_label, average='macro')\n", (2865, 2906), False, 'from sklearn.metrics import average_precision_score, roc_auc_score, f1_score\n'), ((2941, 2995), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['true_label', 'pred_label'], {'average': '"""micro"""'}), "(true_label, pred_label, average='micro')\n", (2954, 2995), False, 'from sklearn.metrics import average_precision_score, roc_auc_score, f1_score\n'), ((7264, 7323), 'torch.cat', 'torch.cat', (['[source_embedding, destination_embedding]'], {'dim': '(1)'}), '([source_embedding, destination_embedding], dim=1)\n', (7273, 7323), False, 'import torch\n'), ((7409, 7482), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['true_label', 'pred_label'], {'average': '"""macro"""', 'multi_class': '"""ovo"""'}), "(true_label, pred_label, average='macro', multi_class='ovo')\n", (7422, 7482), False, 'from sklearn.metrics import average_precision_score, roc_auc_score, f1_score\n'), ((2711, 2724), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (2718, 2724), True, 'import numpy as np\n'), ((2726, 2740), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (2734, 2740), True, 'import numpy as np\n'), ((3050, 3088), 'numpy.array', 'np.array', (['(pred_label >= 0.5)'], {'dtype': 'int'}), '(pred_label >= 0.5, dtype=int)\n', (3058, 3088), True, 'import numpy as np\n'), ((3161, 3199), 'numpy.array', 'np.array', (['(pred_label >= 0.5)'], {'dtype': 'int'}), '(pred_label >= 0.5, dtype=int)\n', (3169, 3199), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Functions to calculate rankings.
@author: Scott
"""
# %% Setup
import pandas as pd
import numpy as np
import json
# %% Functions
def find_teams(results, debug=False):
"""
Create a list of all teams in results database.
Parameters
----------
results : TYPE
Game results database.
Returns
-------
allTeams : numpy list
List of unique teams in database.
"""
if debug:
print('find_teams in Debug mode.')
allTeams = pd.Series(results['Home']).unique()
if debug:
print('Home teams: ' + len(allTeams))
allTeams = np.append(allTeams, pd.Series(results['Away']).unique())
if debug:
print('Home and Away teams: ' + len(allTeams))
allTeams = np.unique(allTeams)
if debug:
print('Unique teams: ' + len(allTeams))
# print(allTeams)
print(str(len(allTeams)) + ' unique teams.')
return allTeams
def rankings_init(allTeams, ratingCoeff, rankingTypes, debug=False):
"""
Initialize rankings for all teams.
Parameters
----------
allTeams : numpy list
List of unique teams in database.
ratingCoeff : dict
Dict of parameters for each possible ranking system.
rankingType : list
List of which ranking systems to initialize for.
Returns
-------
rankingDict : dict
Each team and their current ranking for each ranking system.
"""
if debug:
print('rankings_init in Debug mode.')
# print(rankingType)
rankingDict = {}
for x, team in np.ndenumerate(allTeams):
if debug == 'verbose':
print('x: ' + str(x))
print('value: ' + str(team))
rankingDict[team] = {'gameCount': 0, 'yearCount': 1}
for rankingType in rankingTypes:
rankingDict[team].update({
rankingType: ratingCoeff[rankingType]['avgRating']})
if debug:
print('rankingDict shape: ' + str(len(rankingDict)))
if debug == 'verbose':
print(rankingDict)
return(rankingDict)
def season_start(results, rankingDict, ratingCoeff, rankingTypes, season,
allTeams, debug=False):
"""
Regress Rankings at Season Start.
Parameters
----------
results : TYPE
DESCRIPTION.
rankingDict : TYPE
DESCRIPTION.
ratingCoeff : TYPE
DESCRIPTION.
rankingType : TYPE
DESCRIPTION.
season : TYPE
DESCRIPTION.
allTeams : TYPE
DESCRIPTION.
Returns
-------
rankingDict : TYPE
DESCRIPTION.
"""
if debug:
print('season_start in debug mode.')
seasonGames = results[results.Season == season]
seasonTeams = pd.concat(
[seasonGames['Home'], seasonGames['Away']]).unique()
if debug == 'verbose':
print(seasonGames)
print(seasonTeams)
# Regress each team's rating
for team in allTeams:
if team in seasonTeams:
yearCount = rankingDict[team]['yearCount']
rankingDict[team]['yearCount'] = yearCount + 1
for rankingType in rankingTypes:
regress = ratingCoeff[rankingType]['regress']
avgRating = ratingCoeff[rankingType]['avgRating']
# if play this season and last, regress
if team in seasonTeams:
currentRating = rankingDict[team][rankingType]
rankingDict[team][rankingType] = round(
currentRating - (regress * (currentRating - avgRating)), 2)
if debug:
print(team + ' played in ' + str(season) +
'. Regressed from ' + str(currentRating) +
' to ' + str(rankingDict[team][rankingType]))
# if don't play this season, reset
else:
initRating = ratingCoeff[rankingType]['initRating']
rankingDict[team][rankingType] = initRating
if debug:
print(team + " reverted to " +
str(ratingCoeff[rankingType]['initRating']))
return(rankingDict)
# %% Ranking Formulas
def elo_simple(homeElo, awayElo, goalDiff, k, debug=False):
"""
Elo ranking system based only on wins.
No Homefield Advantage.
No Goal Diferential.
No season regression.
Parameters
----------
homeElo : float
Elo Rating of home team.
awayElo : float
Elo Rating of away team.
goalDiff : Int
Goal difference of game (Home - Away)
k : Int
Elo k-factor.
Returns
-------
homeElo_adj : float
Adjustment to home team's elo
awayElo_adj : float
Adjustment to away team's elo
predictError : float
Prediction error as a Brier score for event
"""
if debug:
print('elo_simple in debug mode')
# Determine winner of game
if goalDiff > 0:
result = 1
elif goalDiff < 0:
result = 0
else:
result = 0.5
# Calutlate expected match score
Qa = pow(10, homeElo / 400)
Qb = pow(10, awayElo / 400)
Ea = Qa / (Qa + Qb)
Eb = Qb / (Qa + Qb)
# Change in Elo ratings
deltaElo = round(k * (result - Ea), 2)
# Expected match score error
predictError = (result - Ea) ** 2
# Adjust Elo ratings of each team based on result
homeElo_adj = round(homeElo + deltaElo, 2)
awayElo_adj = round(awayElo - deltaElo, 2)
if debug:
print('Qa: ', Qa,
' Qb: ', Qb,
' Ea: ', Ea,
' Eb: ', Eb,
' homeElo_adj: ', homeElo_adj,
' awayElo_adj: ', awayElo_adj)
return (homeElo_adj, awayElo_adj, predictError)
def rating_elo(homeElo, awayElo, goalDiff, ratingCoeffMethod, debug=False):
"""
Elo ranking system.
Includes Homefield Advantage.
Parameters
----------
homeElo : float
Elo Rating of home team.
awayElo : float
Elo Rating of away team.
goalDiff : Int
Goal difference of game (Home - Away)
ratingCoeffMethod : TYPE
DESCRIPTION
Returns
-------
homeElo_adj : float
Adjustment to home team's elo
awayElo_adj : float
Adjustment to away team's elo
predictError : float
Prediction error as a Brier score for event
"""
if debug:
print('rating_elo in debug mode.')
print(ratingCoeffMethod)
k = ratingCoeffMethod['kRating']
hfAdv = ratingCoeffMethod['hfAdvantage'] # Home Team
hiAdv = ratingCoeffMethod['hiAdvantage'] # Home Ice
# goalDiffExp = ratingCoeffMethod['goalDiffExp']
# Determine winner of game
if goalDiff > 0:
result = 1
elif goalDiff < 0:
result = 0
else:
result = 0.5
if debug:
print("home Elo: " + type(homeElo).__name__)
print("hf Adv: " + type(hfAdv).__name__)
print("hi Adv: " + type(hiAdv).__name__)
# Calutlate expected match score
Qa = pow(10, (homeElo + hfAdv + hiAdv) / 400)
Qb = pow(10, awayElo / 400)
Ea = Qa / (Qa + Qb)
Eb = Qb / (Qa + Qb)
# Change in Elo ratings
deltaElo = round(k * (result - Ea), 2)
# Expected match score error
predictError = (result - Ea) ** 2
# Adjust Elo ratings of each team based on result
homeElo_adj = round(homeElo + deltaElo, 2)
awayElo_adj = round(awayElo - deltaElo, 2)
if debug:
print('Qa: ', Qa,
' Qb: ', Qb,
' Ea: ', Ea,
' Eb: ', Eb,
' homeElo_adj: ', homeElo_adj,
' awayElo_adj: ', awayElo_adj)
return (homeElo_adj, awayElo_adj, predictError)
def game_ranking(results, ratingCoeff, rankingType,
debug=False, saveResults=True):
"""
Calculate rankings for each match.
Parameters
----------
results : TYPE
DESCRIPTION.
ratingCoeff : TYPE
DESCRIPTION.
rankingType : TYPE
DESCRIPTION.
Returns
-------
results : TYPE
DESCRIPTION.
rankingDict : TYPE
DESCRIPTION.
"""
if debug == 'verbose':
debugVerbose = True
debug = True
else:
debugVerbose = False
if debug:
print('game_ranking in debug mode.')
sep = ", "
print("Ranking systems to run: " + sep.join(rankingType))
# Get list of all teams in results
allTeams = find_teams(results)
# Create columns for ranking results for each ranking system
for rankType in rankingType:
results[rankType + '_Away'] = np.nan
results[rankType + '_Home'] = np.nan
results[rankType + '_Error'] = np.nan
# Evaluate each game for given ranking methods
print('Start scoring each game')
for index, row in enumerate(results.itertuples(index=True)):
season = row.Season
if debugVerbose:
print('Index: ' + str(index) + ' Season: ' + str(season))
print(row)
# Intitialize first season
if index == 0:
rankingDict = rankings_init(allTeams, ratingCoeff,
rankingType) # , debug)
seasonLast = season
if debug:
print('First season initialized.')
# Initialize new seasons
elif (season - seasonLast) > 0:
rankingDict = season_start(results, rankingDict, ratingCoeff,
rankingType, season, allTeams, debug)
seasonLast = season
if debug:
print(str(season) + ' season initialized')
for rankingMethod in rankingType:
if debug:
print(rankingMethod)
# print(row)
# print(ratingCoeff)
# Home and Away teams
teamAway = row.Away
teamHome = row.Home
# Home and Away teams' ratings
eloAway = rankingDict.get(teamAway, {}).get(rankingMethod)
eloHome = rankingDict.get(teamHome, {}).get(rankingMethod)
goalDiff = row.Home_Score - row.Away_Score
# goalDiff = row[5] - row[2]
if debug:
print("Away: " + teamAway + " Elo: " + str(eloAway))
print("Home: " + teamHome + " Elo: " + str(eloHome))
# Choose ranking function based on method
if 'Elo' in rankingMethod:
rateCoeff = ratingCoeff[rankingMethod]
[eloHome, eloAway, predictError] = rating_elo(eloHome,
eloAway,
goalDiff,
rateCoeff)
else:
raise ValueError('Unknown Ranking Method.')
# [eloHome, eloAway, predictError] = elo_simple(
# eloHome, eloAway, goalDiff, ratingCoeff[rankingType]['kRating'])
# Update Current Ranking Tracker
rankingDict[teamAway][rankingMethod] = eloAway
rankingDict[teamHome][rankingMethod] = eloHome
# Add Updated Elo to Results table
results.at[row.Index, rankingMethod + '_Away'] = eloAway
results.at[row.Index, rankingMethod + '_Home'] = eloHome
results.at[row.Index, rankingMethod + '_Error'] = predictError
# Increment game counter
awayCount = rankingDict[teamAway]['gameCount'] + 1
homeCount = rankingDict[teamHome]['gameCount'] + 1
rankingDict[teamAway]['gameCount'] = awayCount
rankingDict[teamHome]['gameCount'] = homeCount
# Write to CSV
if saveResults:
path_or_buf = 'Results_Rankings.csv'
results.to_csv(path_or_buf=path_or_buf, index='False')
print('Results saved to ' + path_or_buf)
with open('Ranking_Dict.txt', 'w') as file:
# use `json.loads` to do the reverse
file.write(json.dumps(rankingDict))
return (results, rankingDict)
# %% Results analysis
def team_games(results, team='Northeastern'):
"""
Collect all games by given team.
Parameters
----------
results : TYPE
DESCRIPTION.
team : TYPE, optional
DESCRIPTION. The default is 'Northeastern'.
Returns
-------
teamGames : TYPE
DESCRIPTION.
"""
# Get columns
awayCol = ([col for col in results.columns if '_Away' in col])
homeCol = ([col for col in results.columns if '_Home' in col])
commonCols = ['Date', 'Season']
awayGames = results.loc[results['Away'] == team, commonCols + awayCol]
homeGames = results.loc[results['Home'] == team, commonCols + homeCol]
awayGames = awayGames.drop_suffix('_Away')
homeGames = homeGames.drop_suffix('_Home')
teamGames = awayGames.append(homeGames)
teamGames['Date'] = pd.to_datetime(teamGames['Date'], format='%Y-%m-%d')
teamGames = teamGames.sort_values(by='Date')
teamGames = teamGames.set_index('Date')
return teamGames
def team_season_metrics(results, rankingDict):
"""
Add season summary stats for each team to ratingDict.
Parameters
----------
results : TYPE
DESCRIPTION.
rankingDict : TYPE
DESCRIPTION.
Returns
-------
rankingDict : TYPE
DESCRIPTION.
"""
for team in rankingDict:
teamGames = team_games(results, team)
groupedGames = teamGames.groupby(['Season'])
seasonMean = groupedGames.mean()
seasonMax = groupedGames.max()
seasonMin = groupedGames.min()
rankingDict[team]['seasonMean'] = seasonMean
rankingDict[team]['seasonMax'] = seasonMax
rankingDict[team]['seasonMin'] = seasonMin
return rankingDict
def overall_metrics(rankDict):
"""
Get rating summaries for all teams for each season.
Parameters
----------
rankDict : TYPE
DESCRIPTION.
Returns
-------
overallMetrics : TYPE
DESCRIPTION.
"""
# Parameters
minGameThresh = 0
# Prep
overallMetrics = pd.DataFrame()
# Convert rankingDict to multi-level dataframe
# Columns: Season, Team, {rating [mean, max, min]}
for team, value in rankDict.items():
teamMetrics = pd.DataFrame()
for metric, df in value.items():
# Pull data from columns look at season summary stats
if 'season' in metric:
dfMetrics = df.copy() # otherwise modifies input df in dict
dfMetrics.columns = pd.MultiIndex.from_product(
[dfMetrics.columns, [metric]])
# Build dataframe with all ranking methods and their season
# summaries, for each season
if teamMetrics.empty:
teamMetrics = dfMetrics
else:
teamMetrics = pd.merge(
teamMetrics, dfMetrics, left_on='Season',
right_index=True, how='left', sort=False)
# Clean up dataframe indicies
teamMetrics.sort_index(axis=1, inplace=True)
teamMetrics['Team'] = team
teamMetrics.set_index(['Team'], append=True, inplace=True)
# Add secondary info
teamMetrics['gameCount'] = value['gameCount']
# Add team's data to top level dataframe
if overallMetrics.empty:
overallMetrics = teamMetrics
else:
overallMetrics = overallMetrics.append(teamMetrics)
# Sort idicies
overallMetrics.sort_index(axis=0, inplace=True)
# Filter out low game count teams
filteredMetrics = overallMetrics[(
overallMetrics.gameCount > minGameThresh)]
# Create output metrics
for season, seasonData in filteredMetrics.groupby(level=0):
for col in seasonData:
if 'Max' in col[1]:
summaryData = seasonData.loc[:, col].max()
if 'Min' in col[1]:
summaryData = seasonData.loc[:, col].min()
if 'Mean' in col[1]:
summaryData = seasonData.loc[:, col].mean()
overallMetrics.loc[(season, 'Average'), col] = summaryData
overallMetrics.to_hdf('Overall_Metrics.h5', key='overallMetrics', mode='w')
return overallMetrics
| [
"pandas.Series",
"pandas.MultiIndex.from_product",
"numpy.unique",
"pandas.merge",
"json.dumps",
"numpy.ndenumerate",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] | [((770, 789), 'numpy.unique', 'np.unique', (['allTeams'], {}), '(allTeams)\n', (779, 789), True, 'import numpy as np\n'), ((1583, 1607), 'numpy.ndenumerate', 'np.ndenumerate', (['allTeams'], {}), '(allTeams)\n', (1597, 1607), True, 'import numpy as np\n'), ((12912, 12964), 'pandas.to_datetime', 'pd.to_datetime', (["teamGames['Date']"], {'format': '"""%Y-%m-%d"""'}), "(teamGames['Date'], format='%Y-%m-%d')\n", (12926, 12964), True, 'import pandas as pd\n'), ((14138, 14152), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14150, 14152), True, 'import pandas as pd\n'), ((14323, 14337), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14335, 14337), True, 'import pandas as pd\n'), ((516, 542), 'pandas.Series', 'pd.Series', (["results['Home']"], {}), "(results['Home'])\n", (525, 542), True, 'import pandas as pd\n'), ((2738, 2791), 'pandas.concat', 'pd.concat', (["[seasonGames['Home'], seasonGames['Away']]"], {}), "([seasonGames['Home'], seasonGames['Away']])\n", (2747, 2791), True, 'import pandas as pd\n'), ((648, 674), 'pandas.Series', 'pd.Series', (["results['Away']"], {}), "(results['Away'])\n", (657, 674), True, 'import pandas as pd\n'), ((12007, 12030), 'json.dumps', 'json.dumps', (['rankingDict'], {}), '(rankingDict)\n', (12017, 12030), False, 'import json\n'), ((14593, 14650), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[dfMetrics.columns, [metric]]'], {}), '([dfMetrics.columns, [metric]])\n', (14619, 14650), True, 'import pandas as pd\n'), ((14931, 15028), 'pandas.merge', 'pd.merge', (['teamMetrics', 'dfMetrics'], {'left_on': '"""Season"""', 'right_index': '(True)', 'how': '"""left"""', 'sort': '(False)'}), "(teamMetrics, dfMetrics, left_on='Season', right_index=True, how=\n 'left', sort=False)\n", (14939, 15028), True, 'import pandas as pd\n')] |
import logging
import math
from cv2 import split
import numpy as np
from PIL import Image
from torchvision import datasets, transforms
from .transforms.randaugment import RandAugmentMC
logger = logging.getLogger(__name__)
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
cifar100_mean = (0.5071, 0.4867, 0.4408)
cifar100_std = (0.2675, 0.2565, 0.2761)
# normal_mean = (0.5, 0.5, 0.5)
# normal_std = (0.5, 0.5, 0.5)
# cifar10_mean = (0.4914, 0.4822, 0.4465)
# cifar10_std = (0.2471, 0.2435, 0.2616)
# cifar100_mean = (0.5071, 0.4867, 0.4408)
# cifar100_std = (0.2675, 0.2565, 0.2761)
"""
prepare CIFAR10/CIFAR100 for semi-superised learning
code in this file is adpated from
https://github.com/kekmodel/FixMatch-pytorch/blob/master/dataset/cifar.py
thanks!
"""
# prepare CIFAR10/CIFAR100 for semi-superised learning
# code in this file is adpated from
# https://github.com/kekmodel/FixMatch-pytorch/blob/master/dataset/cifar.py
# thanks!
# split CIFAR10 into labled/unlabed/val set
def get_cifar10(args, root):
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
base_dataset = datasets.CIFAR10(root, train=True, download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split(
args, base_dataset.targets)
train_labeled_dataset = CIFAR10SSL(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset = CIFAR10SSL(
root, train_unlabeled_idxs, train=True,
transform=TransformFixMatch(mean=cifar10_mean, std=cifar10_std))
test_dataset = datasets.CIFAR10(
root, train=False, transform=transform_val, download=False)
return train_labeled_dataset, train_unlabeled_dataset, test_dataset
#split CIFAR100 into labled/unlabed/val set
def get_cifar100(args, root):
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])
base_dataset = datasets.CIFAR100(
root, train=True, download=True)
#split data into labeled and unlabeled by idxs
train_labeled_idxs, train_unlabeled_idxs = x_u_split(
args, base_dataset.targets)
train_labeled_dataset = CIFAR100SSL(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset = CIFAR100SSL(
root, train_unlabeled_idxs, train=True,
transform=TransformFixMatch(mean=cifar100_mean, std=cifar100_std))
test_dataset = datasets.CIFAR100(
root, train=False, transform=transform_val, download=False)
return train_labeled_dataset, train_unlabeled_dataset, test_dataset
def x_u_split(args, labels):
if isinstance(args.num_labeled, str) and args.num_labeled == "sup":
return list(range(len(labels))), list(range(len(labels)))
label_per_class = args.num_labeled // args.num_classes
labels = np.array(labels)
labeled_idx = []
# unlabeled data: all data (https://github.com/kekmodel/FixMatch-pytorch/issues/10)
unlabeled_idx = np.array(range(len(labels)))
for i in range(args.num_classes):
idx = np.where(labels == i)[0]
idx = np.random.choice(idx, label_per_class, False)
labeled_idx.extend(idx)
labeled_idx = np.array(labeled_idx)
assert len(labeled_idx) == args.num_labeled
if args.get("expand_labels", False) or args.num_labeled < args.batch_size:
num_expand_x = math.ceil(
args.batch_size * args.eval_step / args.num_labeled)
labeled_idx = np.hstack([labeled_idx for _ in range(num_expand_x)])
np.random.shuffle(labeled_idx)
return labeled_idx, unlabeled_idx
#FixMatch applies weak and strong transforms to images
class TransformFixMatch(object):
def __init__(self, mean, std):
self.weak = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect')])
self.strong = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
RandAugmentMC(n=2, m=10)])
self.normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
def __call__(self, x):
weak = self.weak(x)
strong = self.strong(x)
return self.normalize(weak), self.normalize(strong)
#prepare CIFAR100 for SSL
class CIFAR10SSL(datasets.CIFAR10):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False,
anno_file=None):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
# the highest prioirty for loading data from file
if anno_file is not None:
logging.info("Loading from file {}".format(anno_file))
anno_data = np.load(anno_file, allow_pickle=True).item()
self.data = np.array(anno_data["data"])
self.targets = np.array(anno_data["label"])
if len(self.data) < 64:
repeat_num = 64//len(self.data) + 1
self.data = np.tile(self.data, [repeat_num, 1, 1, 1])
self.targets = np.tile(self.targets, repeat_num)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
#prepare CIFAR100 for SSL
class CIFAR100SSL(datasets.CIFAR100):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False, anno_file=False):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
DATASET_GETTERS = {'cifar10': get_cifar10,
'cifar100': get_cifar100}
# DATASET_GETTERS = {'cifar10': get_cifar10,
# 'cifar100': get_cifar100}
| [
"logging.getLogger",
"numpy.tile",
"PIL.Image.fromarray",
"torchvision.datasets.CIFAR100",
"math.ceil",
"numpy.random.choice",
"numpy.where",
"torchvision.transforms.RandomHorizontalFlip",
"numpy.array",
"torchvision.transforms.Normalize",
"torchvision.datasets.CIFAR10",
"torchvision.transform... | [((197, 224), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (214, 224), False, 'import logging\n'), ((1549, 1598), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['root'], {'train': '(True)', 'download': '(True)'}), '(root, train=True, download=True)\n', (1565, 1598), False, 'from torchvision import datasets, transforms\n'), ((2002, 2078), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['root'], {'train': '(False)', 'transform': 'transform_val', 'download': '(False)'}), '(root, train=False, transform=transform_val, download=False)\n', (2018, 2078), False, 'from torchvision import datasets, transforms\n'), ((2734, 2784), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', (['root'], {'train': '(True)', 'download': '(True)'}), '(root, train=True, download=True)\n', (2751, 2784), False, 'from torchvision import datasets, transforms\n'), ((3252, 3329), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', (['root'], {'train': '(False)', 'transform': 'transform_val', 'download': '(False)'}), '(root, train=False, transform=transform_val, download=False)\n', (3269, 3329), False, 'from torchvision import datasets, transforms\n'), ((3654, 3670), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3662, 3670), True, 'import numpy as np\n'), ((4016, 4037), 'numpy.array', 'np.array', (['labeled_idx'], {}), '(labeled_idx)\n', (4024, 4037), True, 'import numpy as np\n'), ((4345, 4375), 'numpy.random.shuffle', 'np.random.shuffle', (['labeled_idx'], {}), '(labeled_idx)\n', (4362, 4375), True, 'import numpy as np\n'), ((3920, 3965), 'numpy.random.choice', 'np.random.choice', (['idx', 'label_per_class', '(False)'], {}), '(idx, label_per_class, False)\n', (3936, 3965), True, 'import numpy as np\n'), ((4189, 4251), 'math.ceil', 'math.ceil', (['(args.batch_size * args.eval_step / args.num_labeled)'], {}), '(args.batch_size * args.eval_step / args.num_labeled)\n', (4198, 4251), False, 'import math\n'), ((6588, 6608), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (6603, 6608), False, 'from PIL import Image\n'), ((7469, 7489), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (7484, 7489), False, 'from PIL import Image\n'), ((1101, 1134), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1132, 1134), False, 'from torchvision import datasets, transforms\n'), ((1291, 1312), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1310, 1312), False, 'from torchvision import datasets, transforms\n'), ((1322, 1378), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'cifar10_mean', 'std': 'cifar10_std'}), '(mean=cifar10_mean, std=cifar10_std)\n', (1342, 1378), False, 'from torchvision import datasets, transforms\n'), ((1435, 1456), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1454, 1456), False, 'from torchvision import datasets, transforms\n'), ((1466, 1522), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'cifar10_mean', 'std': 'cifar10_std'}), '(mean=cifar10_mean, std=cifar10_std)\n', (1486, 1522), False, 'from torchvision import datasets, transforms\n'), ((2290, 2323), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2321, 2323), False, 'from torchvision import datasets, transforms\n'), ((2480, 2501), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2499, 2501), False, 'from torchvision import datasets, transforms\n'), ((2511, 2569), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'cifar100_mean', 'std': 'cifar100_std'}), '(mean=cifar100_mean, std=cifar100_std)\n', (2531, 2569), False, 'from torchvision import datasets, transforms\n'), ((2622, 2643), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2641, 2643), False, 'from torchvision import datasets, transforms\n'), ((2653, 2711), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'cifar100_mean', 'std': 'cifar100_std'}), '(mean=cifar100_mean, std=cifar100_std)\n', (2673, 2711), False, 'from torchvision import datasets, transforms\n'), ((3881, 3902), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (3889, 3902), True, 'import numpy as np\n'), ((6172, 6199), 'numpy.array', 'np.array', (["anno_data['data']"], {}), "(anno_data['data'])\n", (6180, 6199), True, 'import numpy as np\n'), ((6227, 6255), 'numpy.array', 'np.array', (["anno_data['label']"], {}), "(anno_data['label'])\n", (6235, 6255), True, 'import numpy as np\n'), ((4591, 4624), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4622, 4624), False, 'from torchvision import datasets, transforms\n'), ((4841, 4874), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4872, 4874), False, 'from torchvision import datasets, transforms\n'), ((5132, 5153), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5151, 5153), False, 'from torchvision import datasets, transforms\n'), ((5167, 5207), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (5187, 5207), False, 'from torchvision import datasets, transforms\n'), ((5888, 5910), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (5896, 5910), True, 'import numpy as np\n'), ((6372, 6413), 'numpy.tile', 'np.tile', (['self.data', '[repeat_num, 1, 1, 1]'], {}), '(self.data, [repeat_num, 1, 1, 1])\n', (6379, 6413), True, 'import numpy as np\n'), ((6445, 6478), 'numpy.tile', 'np.tile', (['self.targets', 'repeat_num'], {}), '(self.targets, repeat_num)\n', (6452, 6478), True, 'import numpy as np\n'), ((7329, 7351), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (7337, 7351), True, 'import numpy as np\n'), ((6103, 6140), 'numpy.load', 'np.load', (['anno_file'], {'allow_pickle': '(True)'}), '(anno_file, allow_pickle=True)\n', (6110, 6140), True, 'import numpy as np\n')] |
"""
This file contains helper functions for evaluate_segmentation.py script.
"""
import os
import csv
import xml.etree.cElementTree as ET
import numpy as np
import config
from utils import get_paths
# compare segmentations based on the EvaluateSegmentation software of Taha
def segment_comparison(
goldstandard_path,
segmentation_path,
executable_path,
eval_result_path,
threshold,
measures,
):
print(measures)
print("goldstandard path: ", goldstandard_path)
print("segmentation path: ", segmentation_path)
print("executable path: ", executable_path)
print("eval result path: ", eval_result_path)
command_string = (
executable_path
+ " "
+ goldstandard_path
+ " "
+ segmentation_path
+ " -use "
+ measures
+ " -xml "
+ eval_result_path
+ " -thd "
+ str(threshold)
+ " -unit millimeter"
)
print("command string: ", command_string)
os.system(command_string)
# parse the xml file and create dataframes for the relevant metric data.
# Also, save the dataframe data into csvs
def parse_xml_to_csv(xml_path, csv_path, run_params=None):
# get all the metrics as a list
if run_params is None:
run_params = {}
list_of_measures = []
measures_values = []
tree = ET.parse(xml_path)
root = tree.getroot()
for child in root.findall(".//metrics/*"):
list_of_measures.append(child.attrib["symbol"])
value = child.attrib["value"]
measures_values.append(value)
with open(csv_path, "a+") as f1:
writer = csv.writer(f1)
if os.path.isfile(csv_path) and os.path.getsize(csv_path) == 0:
writer.writerow(list(run_params.keys()) + list_of_measures)
writer.writerow(list(run_params.values()) + measures_values)
# calculate_sensibility
def calculate_sensibility(metrics_dic):
try:
fp = int(metrics_dic["FP"])
fn = int(metrics_dic["FN"])
tp = int(metrics_dic["TP"])
valsensibility = (1 - fp / (tp + fn)) * 100
except (KeyError, ZeroDivisionError) as e:
valsensibility = np.nan
return round(valsensibility, 6)
# calculate_conformity
def calculate_conformity(metrics_dic):
try:
fp = int(metrics_dic["FP"])
fn = int(metrics_dic["FN"])
tp = int(metrics_dic["TP"])
valconformity = (1 - (fp + fn) / tp) * 100
except (KeyError, ZeroDivisionError) as e:
valconformity = np.nan
return round(valconformity, 6)
# create dictionary of
def create_dict_from_xml(xml_path, metrics_list=None):
if metrics_list is None:
metrics_list = ["TP", "FP", "TN", "FN"]
value_metrics_dic = []
tree = ET.parse(xml_path)
root = tree.getroot()
for child in root.findall(".//metrics/*"):
if child.tag in metrics_list:
value_metrics_dic.append(child.attrib["value"])
metrics_dic = dict(zip(metrics_list, value_metrics_dic))
return metrics_dic
def sensibility_conformity_to_xml(xml_path):
"""
Insert Sensibility and Conformity values into Evaluation xml.
:param xml_path: path to xml file generated by EvaluateSegmentation.exe
"""
print("ADDING SENSBIL and CFM to:", xml_path)
tree = ET.parse(xml_path)
root = tree.getroot()
metrics_dic = create_dict_from_xml(xml_path)
valsensibility = calculate_sensibility(metrics_dic)
valconformity = calculate_conformity(metrics_dic)
sensibility_attributes = {
"name": "sensibility",
"value": str(valsensibility),
"symbol": "SENSBIL",
"type": "similarity",
"unit": "voxel",
}
SENSBIL = ET.Element("SENSBIL", attrib=sensibility_attributes)
conformity_attributes = {
"name": "conformity",
"value": str(valconformity),
"symbol": "CFM",
"type": "similarity",
"unit": "voxel",
}
CFM = ET.Element("CFM", attrib=conformity_attributes)
root[2].insert(2, SENSBIL)
root[2].insert(3, CFM)
tree.write(xml_path)
def parse_xml_to_csv_avg_for_patients(xml_paths, csv_path, run_params):
measures_values_all_patients = []
# get all the metrics as a list
measures_symbols = []
for i, path in enumerate(xml_paths):
measures_values = []
tree = ET.parse(path)
root = tree.getroot()
for child in root.findall(".//metrics/*"):
if i == 0:
measures_symbols.append(child.attrib["symbol"])
measures_values.append(child.attrib["value"])
# if list is empty because image only contains zeros
if not measures_values:
measures_values_all_patients.append(["-100.001", "-100.001"])
else:
# print(measures_values)
measures_values_all_patients.append(measures_values)
# count average for each metric
print("measures values all patients: ", measures_values_all_patients)
measures_values_avg = np.mean(
np.asarray(measures_values_all_patients, dtype=np.float32), axis=0
)
measures_values_sd = np.std(
np.asarray(measures_values_all_patients, dtype=np.float32), axis=0
)
print(measures_values_avg)
print(measures_values_avg.shape)
print(measures_values_avg.dtype)
with open(csv_path, "a+") as f1:
writer = csv.writer(f1)
if os.path.isfile(csv_path) and os.path.getsize(csv_path) == 0:
writer.writerow(list(run_params.keys()) + measures_symbols)
writer.writerow(list(run_params.values()) + measures_values_avg.tolist())
return measures_values_avg, measures_values_sd
def evaluate_segmentation(
patients_segm,
dataset,
epoch,
batchsize,
lr,
dropout,
augm,
train_input,
measures,
csv_path,
csv_path_per_patient,
executable_path,
realonly=False,
ext_run="",
e=0,
):
# create the name of current run
run_name = get_paths.get_run_name(epoch, batchsize, lr, dropout, augm, train_input)
print(run_name)
xml_paths = []
for patient in patients_segm:
print("_______________________________________________________________")
# load labels and segmentations
label_path = (
get_paths.get_original_data_path(dataset) + str(patient) + "_label.nii.gz"
)
segmentation_path = get_paths.get_prob_path(patient, run_name, dataset)
# for saving results of evaluate segmentation to xml and to csv
xml_path_patient = get_paths.get_result_xml_path(patient, run_name, dataset)
xml_path_patient_quotes = "'" + xml_path_patient + "'"
xml_paths.append(xml_path_patient)
# compare the segmentation with ground truth and save the xml file in
# the results folder
segment_comparison(
label_path,
segmentation_path,
executable_path,
xml_path_patient_quotes,
config.threshold_unet,
measures,
)
print("xml path patient: ", xml_path_patient)
# print("Is file?" + str(os.path.isfile(xml_path_patient)))
# parse the generated xmls and insert two more metrics: Sensibility
# and Conformity
# sensibility_conformity_to_xml(xml_path_patient)
# parse the xml files in each folder, do stats and save the dataframes
# as csvs with the parse_xml function
run_params = {
"patch_size": config.PATCH_SIZE,
"num_epochs": epoch,
"batch_size": batchsize,
"dropout": dropout,
"lr": lr,
"patient": patient,
"nr_patches": config.NUM_PATCHES,
"augm": augm,
"train_input": train_input,
"dataset": dataset,
}
parse_xml_to_csv(xml_path_patient, csv_path_per_patient, run_params)
run_params = {
"patch_size": config.PATCH_SIZE,
"num_epochs": epoch,
"batch_size": batchsize,
"dropout": dropout,
"lr": lr,
"patient": patient,
"nr_patches": config.NUM_PATCHES,
"augm": augm,
"train_input": train_input,
"dataset": dataset,
}
avg, sd = parse_xml_to_csv_avg_for_patients(xml_paths, csv_path, run_params)
return avg, sd
| [
"os.path.getsize",
"utils.get_paths.get_run_name",
"utils.get_paths.get_original_data_path",
"utils.get_paths.get_result_xml_path",
"csv.writer",
"numpy.asarray",
"utils.get_paths.get_prob_path",
"os.path.isfile",
"xml.etree.cElementTree.parse",
"xml.etree.cElementTree.Element",
"os.system"
] | [((991, 1016), 'os.system', 'os.system', (['command_string'], {}), '(command_string)\n', (1000, 1016), False, 'import os\n'), ((1342, 1360), 'xml.etree.cElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (1350, 1360), True, 'import xml.etree.cElementTree as ET\n'), ((2742, 2760), 'xml.etree.cElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (2750, 2760), True, 'import xml.etree.cElementTree as ET\n'), ((3283, 3301), 'xml.etree.cElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (3291, 3301), True, 'import xml.etree.cElementTree as ET\n'), ((3694, 3746), 'xml.etree.cElementTree.Element', 'ET.Element', (['"""SENSBIL"""'], {'attrib': 'sensibility_attributes'}), "('SENSBIL', attrib=sensibility_attributes)\n", (3704, 3746), True, 'import xml.etree.cElementTree as ET\n'), ((3940, 3987), 'xml.etree.cElementTree.Element', 'ET.Element', (['"""CFM"""'], {'attrib': 'conformity_attributes'}), "('CFM', attrib=conformity_attributes)\n", (3950, 3987), True, 'import xml.etree.cElementTree as ET\n'), ((5958, 6030), 'utils.get_paths.get_run_name', 'get_paths.get_run_name', (['epoch', 'batchsize', 'lr', 'dropout', 'augm', 'train_input'], {}), '(epoch, batchsize, lr, dropout, augm, train_input)\n', (5980, 6030), False, 'from utils import get_paths\n'), ((1621, 1635), 'csv.writer', 'csv.writer', (['f1'], {}), '(f1)\n', (1631, 1635), False, 'import csv\n'), ((4331, 4345), 'xml.etree.cElementTree.parse', 'ET.parse', (['path'], {}), '(path)\n', (4339, 4345), True, 'import xml.etree.cElementTree as ET\n'), ((5009, 5067), 'numpy.asarray', 'np.asarray', (['measures_values_all_patients'], {'dtype': 'np.float32'}), '(measures_values_all_patients, dtype=np.float32)\n', (5019, 5067), True, 'import numpy as np\n'), ((5123, 5181), 'numpy.asarray', 'np.asarray', (['measures_values_all_patients'], {'dtype': 'np.float32'}), '(measures_values_all_patients, dtype=np.float32)\n', (5133, 5181), True, 'import numpy as np\n'), ((5356, 5370), 'csv.writer', 'csv.writer', (['f1'], {}), '(f1)\n', (5366, 5370), False, 'import csv\n'), ((6377, 6428), 'utils.get_paths.get_prob_path', 'get_paths.get_prob_path', (['patient', 'run_name', 'dataset'], {}), '(patient, run_name, dataset)\n', (6400, 6428), False, 'from utils import get_paths\n'), ((6529, 6586), 'utils.get_paths.get_result_xml_path', 'get_paths.get_result_xml_path', (['patient', 'run_name', 'dataset'], {}), '(patient, run_name, dataset)\n', (6558, 6586), False, 'from utils import get_paths\n'), ((1647, 1671), 'os.path.isfile', 'os.path.isfile', (['csv_path'], {}), '(csv_path)\n', (1661, 1671), False, 'import os\n'), ((5382, 5406), 'os.path.isfile', 'os.path.isfile', (['csv_path'], {}), '(csv_path)\n', (5396, 5406), False, 'import os\n'), ((1676, 1701), 'os.path.getsize', 'os.path.getsize', (['csv_path'], {}), '(csv_path)\n', (1691, 1701), False, 'import os\n'), ((5411, 5436), 'os.path.getsize', 'os.path.getsize', (['csv_path'], {}), '(csv_path)\n', (5426, 5436), False, 'import os\n'), ((6264, 6305), 'utils.get_paths.get_original_data_path', 'get_paths.get_original_data_path', (['dataset'], {}), '(dataset)\n', (6296, 6305), False, 'from utils import get_paths\n')] |
#!/usr/bin/env python
import sys
sys.path.append('..')
from libs.helpers import get_input
from collections import Counter
import numpy as np
def simulate(grid, neighbours, maxocc):
changed = True
steps = 0
cache = {}
while changed:
steps += 1
print(f'Step {steps}')
grid,changed = change(grid, neighbours, maxocc, cache)
# printgrid(grid)
return grid
def neighbourssimple(grid, x,y):
ns = [(x-1,y-1), (x, y-1), (x+1, y-1), (x-1, y), (x+1, y), (x-1, y+1), (x,y+1), (x+1,y+1)]
return [(px,py) for px,py in filter(lambda p: 0 <= p[0] < len(grid[0]) and 0 <= p[1] < len(grid), ns)]
def change(grid, neighbours, maxocc, cache = {}):
ng = [[c for c in l] for l in grid]
changed = False
for y in range(len(grid)):
for x in range(len(grid[y])):
if grid [y][x] == '.':
continue
if (x,y) not in cache:
nbs = neighbours(grid, x,y)
cache[(x,y)] = nbs
nbs = [grid[py][px] for px,py in cache[(x,y)]]
nbs = Counter(nbs)
if grid[y][x] == 'L' and '#' not in nbs:
ng[y][x] = '#'
changed = True
elif grid[y][x] == '#' and '#' in nbs and nbs['#'] >= maxocc:
ng[y][x] = 'L'
changed = True
return (ng, changed)
def printgrid(grid):
for g in grid:
print(''.join(g))
vecs = [
[-1,-1],
[0,-1],
[+1,-1],
[-1,0],
[+1,0],
[-1,+1],
[0,+1],
[+1,+1]]
vectors = [np.array(v) for v in vecs]
def nbscomplex(grid, x, y):
nbs = []
for v in vectors:
for i in range(1,max(len(grid),len(grid[0]))):
vx,vy = v * i
nx = x + vx
ny = y + vy
if (nx not in range(len(grid[0]))) or (ny not in range(len(grid))):
break
c = grid[ny][nx]
if c == 'L' or c == '#':
nbs.append((nx,ny))
break
return nbs
lines = get_input('input.txt')
grid = [[c for c in l] for l in lines]
g = simulate(grid, neighbourssimple, 4)
s = sum(sum(1 if c == '#' else 0 for c in l) for l in g)
print(s)
g = simulate(grid, nbscomplex, 5)
s = sum(sum(1 if c == '#' else 0 for c in l) for l in g)
print(s)
| [
"collections.Counter",
"numpy.array",
"sys.path.append",
"libs.helpers.get_input"
] | [((34, 55), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (49, 55), False, 'import sys\n'), ((2088, 2110), 'libs.helpers.get_input', 'get_input', (['"""input.txt"""'], {}), "('input.txt')\n", (2097, 2110), False, 'from libs.helpers import get_input\n'), ((1617, 1628), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1625, 1628), True, 'import numpy as np\n'), ((1075, 1087), 'collections.Counter', 'Counter', (['nbs'], {}), '(nbs)\n', (1082, 1087), False, 'from collections import Counter\n')] |
import numpy as np
from ._wavedata import Wavedata
import scipy.signal as signal
import matplotlib.pyplot as plt
'''Wavedata 滤波器模块,包含一些数字滤波器'''
class Filter(object):
"""Filter baseclass, filt nothing."""
def __init__(self, process=None):
super(Filter, self).__init__()
if process is not None:
self.process = process
def process(self,data,sRate):
'''Filter处理函数,输入输出都是(data,sRate)格式'''
return data,sRate
def filt(self,w):
'''传入Wavedata,返回滤波后的Waveda'''
assert isinstance(w,Wavedata)
data,sRate = self.process(w.data,w.sRate)
return Wavedata(data,sRate)
def series(*arg):
'''串联多个Filter'''
def process(data,sRate):
for f in arg:
data,sRate = f.process(data,sRate)
return data,sRate
F = Filter(process)
return F
def parallel(*arg):
'''并联多个Filter'''
def process(data,sRate):
d_list = [f.process(data,sRate)[0] for f in arg]
d = np.array(d_list).sum(axis=0)/len(arg)
return d,sRate
F = Filter(process)
return F
class WGN(Filter):
'''White Gaussian Noise adder: 向波形w中添加一个信噪比为 snr dB 的高斯白噪声'''
def __init__(self, snr):
self.snr = snr
def process(self,data,sRate):
x=data
snr = 10**(self.snr/10.0)
xpower = np.sum(x**2)/len(x)
npower = xpower / snr
n = np.random.randn(len(x)) * np.sqrt(npower)
_data = x + n
return _data,sRate
class baFilter(Filter):
"""指定signal里包含的滤波器函数名,生成相关的结果为 ba 的数字滤波器."""
def __init__(self, name='', **kw):
# 指定signal里包含的滤波器函数名,传入相关的参数
kw.update(output='ba',analog=False)
self.dict=kw # self.dict必须包含fs
filtertype = getattr(signal,name)
self.ba = filtertype(**self.dict)
def process(self,data,sRate):
assert sRate == self.dict['fs']
b,a = self.ba
_data = signal.filtfilt(b, a, data)
return _data, sRate
def freqz(self):
'''返回数字滤波器频率响应'''
w,h = signal.freqz(*self.ba,fs=self.dict['fs'])
return w,h
def plot(self):
'''画出频率响应曲线'''
w,h=self.freqz()
plt.plot(w, np.abs(h))
plt.xlabel('Frequency')
plt.ylabel('factor')
class IIRFilter(baFilter):
'''参考scipy.signal.iirfilter'''
def __init__(self, N=2, Wn=[49e6, 51e6], rp=0.01, rs=100, btype='band',
ftype='ellip', fs=1e9):
# 为避免麻烦,不继承 baFilter.__init__ 函数,只继承其他函数
# 默认参数是一个50MHz的 ellip 滤波器
# 配置字典, default: output='ba',analog=False,
self.dict=dict(N=N, Wn=Wn, rp=rp, rs=rs, btype=btype,
analog=False, ftype=ftype, output='ba', fs=fs)
self.ba = signal.iirfilter(**self.dict)
class BesselFilter(baFilter):
'''参考scipy.signal.bessel'''
def __init__(self, N=2, Wn=100e6, btype='low',
norm='phase', fs=1e9):
# 为避免麻烦,不继承 baFilter.__init__ 函数,只继承其他函数
# 默认参数是一个100MHz的 2阶低通贝塞尔滤波器
# 配置字典, default: output='ba',analog=False,
self.dict=dict(N=N, Wn=Wn, btype=btype,
analog=False, output='ba', norm=norm, fs=fs)
self.ba = signal.bessel(**self.dict)
| [
"numpy.abs",
"numpy.sqrt",
"scipy.signal.iirfilter",
"matplotlib.pyplot.ylabel",
"scipy.signal.filtfilt",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"numpy.array",
"scipy.signal.bessel",
"scipy.signal.freqz"
] | [((1912, 1939), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (1927, 1939), True, 'import scipy.signal as signal\n'), ((2030, 2072), 'scipy.signal.freqz', 'signal.freqz', (['*self.ba'], {'fs': "self.dict['fs']"}), "(*self.ba, fs=self.dict['fs'])\n", (2042, 2072), True, 'import scipy.signal as signal\n'), ((2199, 2222), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency"""'], {}), "('Frequency')\n", (2209, 2222), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2251), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""factor"""'], {}), "('factor')\n", (2241, 2251), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2750), 'scipy.signal.iirfilter', 'signal.iirfilter', ([], {}), '(**self.dict)\n', (2737, 2750), True, 'import scipy.signal as signal\n'), ((3180, 3206), 'scipy.signal.bessel', 'signal.bessel', ([], {}), '(**self.dict)\n', (3193, 3206), True, 'import scipy.signal as signal\n'), ((1327, 1341), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (1333, 1341), True, 'import numpy as np\n'), ((1415, 1430), 'numpy.sqrt', 'np.sqrt', (['npower'], {}), '(npower)\n', (1422, 1430), True, 'import numpy as np\n'), ((2180, 2189), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (2186, 2189), True, 'import numpy as np\n'), ((989, 1005), 'numpy.array', 'np.array', (['d_list'], {}), '(d_list)\n', (997, 1005), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pandas as pd
def plot_clusters(data):
x, y, cluster = zip(*data)
plt.scatter(x, y, c = cluster, cmap=cm.Dark2)
plt.title("Clustering, k = {}".format(len(np.unique(cluster))))
plt.xlabel("x1")
plt.ylabel("x2")
plt.show()
def plot_sse(data):
k, sse = zip(*data.items())
plt.plot(k, sse)
plt.title("Sum of Squared Error")
plt.xlabel("k")
plt.ylabel("SSE")
plt.show()
def plot_linear(x, y):
# Plot outputs
plt.plot(x, y, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
def plot_roc(title, label, proba):
"""Plots ROC curve
Arguments:
label {np.ndarray} -- true label
proba {np.ndarray} -- predicted probabilites of class membership
"""
rates = []
pair = pd.concat((pd.DataFrame(proba), pd.DataFrame(label)), axis=1)
pair.columns = ("proba", "target")
pair_sorted = pair.sort_values(pair.columns[0], ascending=False)
matrix = pair_sorted.values
for i in matrix:
thresh = i[0]
rates.append(_calc_rates(matrix, thresh))
r = pd.DataFrame(rates)
plt.plot(r[1].values, r[0].values)
plt.ylim([0,1])
plt.xlim([0,1])
plt.xticks(np.arange(0, 1.1, 0.1))
plt.yticks(np.arange(0,1.1, 0.1))
plt.title(title)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive')
plt.show()
# Helper Functions
def _calc_rates(matrix, thresh):
"""Calculates true positive rate and false positive rate for a given threshold
Arguments:
matrix {np.ndarray} -- true labels and predicted probabilities
thresh {float} -- threshold for a round of ROC construction
Returns:
[float, float] -- true positive rate, false positive rate
"""
tp = 0
fp = 0
tn = 0
fn = 0
n = len(matrix)
for i in matrix:
pred = 1 if i[0] >= thresh else 0
if pred == 1:
if i[1] == 1:
tp += 1
else:
fp += 1
else:
if i[1] == 0:
tn += 1
else:
fn += 1
tpr = tp / (tp+fn)
fpr = fp / (fp + tn)
return tpr, fpr
| [
"numpy.unique",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"numpy.arang... | [((159, 202), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'cluster', 'cmap': 'cm.Dark2'}), '(x, y, c=cluster, cmap=cm.Dark2)\n', (170, 202), True, 'import matplotlib.pyplot as plt\n'), ((277, 293), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x1"""'], {}), "('x1')\n", (287, 293), True, 'import matplotlib.pyplot as plt\n'), ((298, 314), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x2"""'], {}), "('x2')\n", (308, 314), True, 'import matplotlib.pyplot as plt\n'), ((319, 329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (327, 329), True, 'import matplotlib.pyplot as plt\n'), ((387, 403), 'matplotlib.pyplot.plot', 'plt.plot', (['k', 'sse'], {}), '(k, sse)\n', (395, 403), True, 'import matplotlib.pyplot as plt\n'), ((408, 441), 'matplotlib.pyplot.title', 'plt.title', (['"""Sum of Squared Error"""'], {}), "('Sum of Squared Error')\n", (417, 441), True, 'import matplotlib.pyplot as plt\n'), ((446, 461), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (456, 461), True, 'import matplotlib.pyplot as plt\n'), ((466, 483), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSE"""'], {}), "('SSE')\n", (476, 483), True, 'import matplotlib.pyplot as plt\n'), ((488, 498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (496, 498), True, 'import matplotlib.pyplot as plt\n'), ((546, 587), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""blue"""', 'linewidth': '(3)'}), "(x, y, color='blue', linewidth=3)\n", (554, 587), True, 'import matplotlib.pyplot as plt\n'), ((593, 607), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (603, 607), True, 'import matplotlib.pyplot as plt\n'), ((612, 626), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (622, 626), True, 'import matplotlib.pyplot as plt\n'), ((632, 642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (640, 642), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1190), 'pandas.DataFrame', 'pd.DataFrame', (['rates'], {}), '(rates)\n', (1183, 1190), True, 'import pandas as pd\n'), ((1195, 1229), 'matplotlib.pyplot.plot', 'plt.plot', (['r[1].values', 'r[0].values'], {}), '(r[1].values, r[0].values)\n', (1203, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1234, 1250), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (1242, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1270), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (1262, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1367), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1360, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1405), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (1382, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1410, 1437), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive"""'], {}), "('True Positive')\n", (1420, 1437), True, 'import matplotlib.pyplot as plt\n'), ((1442, 1452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1450, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1285, 1307), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (1294, 1307), True, 'import numpy as np\n'), ((1324, 1346), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (1333, 1346), True, 'import numpy as np\n'), ((878, 897), 'pandas.DataFrame', 'pd.DataFrame', (['proba'], {}), '(proba)\n', (890, 897), True, 'import pandas as pd\n'), ((899, 918), 'pandas.DataFrame', 'pd.DataFrame', (['label'], {}), '(label)\n', (911, 918), True, 'import pandas as pd\n'), ((251, 269), 'numpy.unique', 'np.unique', (['cluster'], {}), '(cluster)\n', (260, 269), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PROGRAMMER: <NAME>
# DATE CREATED: Feb. 23, 2019
# PURPOSE: Predict flower name from an image with predict.py along with the probability of that name.
# Can also predict the Top K classes and use category name mapping to show the real names.
import numpy as np
import torch
import torchvision
from torchvision import models, utils
import torch.nn.functional as F
import helpers as hlp
import checkpoint_manager
from get_input_args import get_input_args_predict
def main():
# command line arguments
c_img_path = None
c_chk_path = None
c_top_k = None
c_mapping_path = None
c_use_gpu = False
# Get command line arguments
in_args = get_input_args_predict()
c_img_path = in_args.img_path
c_chk_path = in_args.checkpoint
c_top_k = in_args.top_k
c_mapping_path = in_args.category_names
c_use_gpu = in_args.gpu
print("Running predict.py with the following arguments: ")
print("Image Path: {}\nCheckpoint: {}\nTop K: {}\nMapping: {}\nGPU: {}".format(c_img_path, c_chk_path, c_top_k,
c_mapping_path, c_use_gpu))
# load the checkpoint
model = checkpoint_manager.load_checkpoint(c_chk_path)
# set the device (gpu or cpu)
device = torch.device("cuda" if torch.cuda.is_available() and c_use_gpu else "cpu")
# call the predict function and get the topK (c_top_k) classes and probabilities
probs, classes = predict(c_img_path, model, device, c_top_k)
# check to see if we want to map the classes to the category names (one of the command
# line arguments)
if c_mapping_path is not None:
cat_to_name = hlp.open_label_mapping_file(c_mapping_path)
classes = map_cat_to_real_names(classes, cat_to_name)
# print the results
print_results(probs, classes)
def predict(image_path, model, device, topk):
"""
Predict the class (or classes) of an image using a trained deep learning model.
Parameters:
- image_path: path to the image for which we will predict the class(es)
- model: the model to be used
- device to be used: gpu or cpu
- topk: the number of K most likely classes we want to calculate/return
Returns:
- top_probs: the top probabilities
- classes: the top classes
"""
# set the mode for inference
model.eval()
# set the device
model.to(device)
# process the image
image = hlp.process_image(image_path)
image = np.expand_dims(image, 0)
img_to_fwd = torch.from_numpy(image)
img_to_fwd = img_to_fwd.to(device)
# Turn off gradients to speed up this part
with torch.no_grad():
# fwd pass get logits
output = model.forward(img_to_fwd)
# Calculate the class probabilities for img
# ps = torch.exp(output)
# Calculate the class probabilities (softmax) for img
ps = F.softmax(output, dim=1)
# get the top K largest values
probs, classes = ps.topk(topk)
# probs and classes are tensors, so we convert to lists so we return
# as is required
top_probs = probs.cpu().detach().numpy().tolist()[0]
top_classes = classes.cpu().detach().numpy().tolist()[0]
# I was getting the wrong class labels when converting,
# the solution in the following helped me:
# https://knowledge.udacity.com/questions/31597
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
classes = []
# convert the classes using idx_to_class
for cls in top_classes:
c = idx_to_class[cls]
classes.append(c)
# return the
return top_probs, classes
def map_cat_to_real_names(classes, cat_to_name):
"""
Maps class categories to real names
Parameters:
- classes: the classes (list of ids)
- cat_to_name: dictionary mapping the integer encoded categories to the actual names of the flowers
Returns:
- labels: the classes mapped to their actual names
"""
labels = []
for cls in classes:
labels.append(cat_to_name[cls])
return labels
def print_results(probs, classes):
"""
Prints the results of predict.py
Parameters:
- probs: the probabilities to print
- classes: the classes to print
Returns:
- None
"""
print()
print("Prediction Results: ")
print("=================================")
for i in range(len(probs)):
print("Class: {}, Probability: {}".format(classes[i], probs[i]))
if __name__ == "__main__":
main()
| [
"checkpoint_manager.load_checkpoint",
"get_input_args.get_input_args_predict",
"torch.from_numpy",
"torch.cuda.is_available",
"helpers.process_image",
"numpy.expand_dims",
"torch.no_grad",
"helpers.open_label_mapping_file",
"torch.nn.functional.softmax"
] | [((744, 768), 'get_input_args.get_input_args_predict', 'get_input_args_predict', ([], {}), '()\n', (766, 768), False, 'from get_input_args import get_input_args_predict\n'), ((1195, 1241), 'checkpoint_manager.load_checkpoint', 'checkpoint_manager.load_checkpoint', (['c_chk_path'], {}), '(c_chk_path)\n', (1229, 1241), False, 'import checkpoint_manager\n'), ((2497, 2526), 'helpers.process_image', 'hlp.process_image', (['image_path'], {}), '(image_path)\n', (2514, 2526), True, 'import helpers as hlp\n'), ((2539, 2563), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (2553, 2563), True, 'import numpy as np\n'), ((2586, 2609), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2602, 2609), False, 'import torch\n'), ((2945, 2969), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (2954, 2969), True, 'import torch.nn.functional as F\n'), ((1688, 1731), 'helpers.open_label_mapping_file', 'hlp.open_label_mapping_file', (['c_mapping_path'], {}), '(c_mapping_path)\n', (1715, 1731), True, 'import helpers as hlp\n'), ((2710, 2725), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2723, 2725), False, 'import torch\n'), ((1313, 1338), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1336, 1338), False, 'import torch\n')] |
from typing import Dict, List, Optional, Tuple
from datetime import datetime, timedelta
from cachetools import TTLCache
from pandas import DataFrame, Series
import numpy as np
## Indicator libs
import talib.abstract as ta
from finta import TA as fta
import technical.indicators as ftt
from technical.indicators import hull_moving_average
from technical.indicators import PMAX, zema
from technical.indicators import cmf
## FT stuffs
from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter
import freqtrade.vendor.qtpylib.indicators as qtpylib
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.persistence import Trade
from skopt.space import Dimension
### @Rallipanos mod
"""
NOTE:
docker-compose run --rm freqtrade hyperopt -c user_data/config-backtesting.json --strategy IchimokuHaulingV8a --hyperopt-loss SortinoHyperOptLossDaily --spaces roi buy sell --timerange=1624940400-1630447200 -j 4 -e 1000
"""
class MacheteV8bRallimod2(IStrategy):
# Buy hyperspace params:
buy_params = {
"buy_should_use_get_buy_signal_offset_strategy": True,
"buy_should_use_get_buy_signal_bbrsi_strategy": False,
"ewo_high": 2.327,
"rsi_buy": 45,
"base_nb_candles_buy": 14,
"low_offset": 0.965
}
# Sell hyperspace params:
sell_params = {
"cstp_bail_how": "roc",
"cstp_bail_roc": -0.032,
"cstp_bail_time": 1108,
"cstp_bb_trailing_input": "bb_lowerband_neutral_inf",
"cstp_threshold": -0.036,
"cstp_trailing_max_stoploss": 0.054,
"cstp_trailing_only_offset_is_reached": 0.09,
"cstp_trailing_stop_profit_devider": 2,
"droi_pullback": True,
"droi_pullback_amount": 0.01,
"droi_pullback_respect_table": False,
"droi_trend_type": "any",
"base_nb_candles_sell": 24,
"high_offset": 0.991,
"high_offset_2": 0.995
}
# ROI table:
minimal_roi = {
"0": 0.279,
"92": 0.109,
"245": 0.059,
"561": 0.02
}
# Stoploss:
stoploss = -0.05#-0.046
# Trailing stop:
trailing_stop = False
#trailing_stop_positive = 0.0247
#trailing_stop_positive_offset = 0.0248
#trailing_only_offset_is_reached = True
use_custom_stoploss = False
# buy signal
buy_should_use_get_buy_signal_offset_strategy = CategoricalParameter([True, False], default=buy_params['buy_should_use_get_buy_signal_offset_strategy'], space='buy', optimize=True)
buy_should_use_get_buy_signal_bbrsi_strategy = CategoricalParameter([True, False], default=buy_params['buy_should_use_get_buy_signal_bbrsi_strategy'], space='buy', optimize=True)
# Dynamic ROI
droi_trend_type = CategoricalParameter(['rmi', 'ssl', 'candle', 'any'], default=sell_params['droi_trend_type'], space='sell', optimize=True)
droi_pullback = CategoricalParameter([True, False], default=sell_params['droi_pullback'], space='sell', optimize=True)
droi_pullback_amount = DecimalParameter(0.005, 0.02, default=sell_params['droi_pullback_amount'], space='sell')
droi_pullback_respect_table = CategoricalParameter([True, False], default=sell_params['droi_pullback_respect_table'], space='sell', optimize=True)
# Custom Stoploss
cstp_threshold = DecimalParameter(-0.05, 0, default=sell_params['cstp_threshold'], space='sell')
cstp_bail_how = CategoricalParameter(['roc', 'time', 'any'], default=sell_params['cstp_bail_how'], space='sell', optimize=True)
cstp_bail_roc = DecimalParameter(-0.05, -0.01, default=sell_params['cstp_bail_roc'], space='sell')
cstp_bail_time = IntParameter(720, 1440, default=sell_params['cstp_bail_time'], space='sell')
cstp_trailing_only_offset_is_reached = DecimalParameter(0.01, 0.06, default=sell_params['cstp_trailing_only_offset_is_reached'], space='sell')
cstp_trailing_stop_profit_devider = IntParameter(2, 4, default=sell_params['cstp_trailing_stop_profit_devider'], space='sell')
cstp_trailing_max_stoploss = DecimalParameter(0.02, 0.08, default=sell_params['cstp_trailing_max_stoploss'], space='sell')
cstp_bb_trailing_input = CategoricalParameter(['bb_lowerband_trend', 'bb_lowerband_trend_inf', 'bb_lowerband_neutral', 'bb_lowerband_neutral_inf', 'bb_upperband_neutral_inf'], default=sell_params['cstp_bb_trailing_input'], space='sell', optimize=True)
fast_ewo = 50
slow_ewo = 200
ewo_high = DecimalParameter(2.0, 12.0, default=buy_params['ewo_high'], space='buy', optimize=True)
rsi_buy = IntParameter(30, 70, default=buy_params['rsi_buy'], space='buy', optimize=True)
base_nb_candles_buy = IntParameter(5, 80, default=buy_params['base_nb_candles_buy'], space='buy', optimize=True)
base_nb_candles_sell = IntParameter(5, 80, default=sell_params['base_nb_candles_sell'], space='sell', optimize=True)
high_offset = DecimalParameter(0.95, 1.1, default=sell_params['high_offset'], space='sell', optimize=True)
high_offset_2 = DecimalParameter(0.99, 1.5, default=sell_params['high_offset_2'], space='sell', optimize=True)
# nested hyperopt class
class HyperOpt:
# defining as dummy, so that no error is thrown about missing
# sell indicator space when hyperopting for all spaces
@staticmethod
def indicator_space() -> List[Dimension]:
return []
custom_trade_info = {}
custom_current_price_cache: TTLCache = TTLCache(maxsize=100, ttl=300) # 5 minutes
# run "populate_indicators" only for new candle
process_only_new_candles = False
# Experimental settings (configuration will overide these if set)
use_sell_signal = True
sell_profit_only = False
ignore_roi_if_buy_signal = False
startup_candle_count = 200#149
use_dynamic_roi = True
timeframe = '5m'
informative_timeframe = '1h'
# Optional order type mapping
order_types = {
'buy': 'limit',
'sell': 'limit',
'stoploss': 'market',
'stoploss_on_exchange': False
}
def informative_pairs(self):
pairs = self.dp.current_whitelist()
informative_pairs = [(pair, self.informative_timeframe) for pair in pairs]
return informative_pairs
#
# Processing indicators
#
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
self.custom_trade_info[metadata['pair']] = self.populate_trades(metadata['pair'])
if not self.dp:
return dataframe
dataframe = self.get_buy_signal_indicators(dataframe, metadata)
informative_tmp = self.dp.get_pair_dataframe(pair=metadata['pair'], timeframe=self.informative_timeframe)
informative = self.get_market_condition_indicators(informative_tmp.copy(), metadata)
informative = self.get_custom_stoploss_indicators(informative, metadata)
dataframe = merge_informative_pair(dataframe, informative, self.timeframe, self.informative_timeframe, ffill=True)
dataframe.rename(columns=lambda s: s.replace("_{}".format(self.informative_timeframe), "_inf"), inplace=True)
# Slam some indicators into the trade_info dict so we can dynamic roi and custom stoploss in backtest
if self.dp.runmode.value in ('backtest', 'hyperopt'):
self.custom_trade_info[metadata['pair']]['roc_inf'] = dataframe[['date', 'roc_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['atr_inf'] = dataframe[['date', 'atr_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['sroc_inf'] = dataframe[['date', 'sroc_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['ssl-dir_inf'] = dataframe[['date', 'ssl-dir_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['rmi-up-trend_inf'] = dataframe[['date', 'rmi-up-trend_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['candle-up-trend_inf'] = dataframe[['date', 'candle-up-trend_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['bb_lowerband_trend_inf'] = dataframe[['date', 'bb_lowerband_trend_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['bb_lowerband_trend_inf'] = dataframe[['date', 'bb_lowerband_trend_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['bb_lowerband_neutral_inf'] = dataframe[['date', 'bb_lowerband_neutral_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['bb_lowerband_neutral_inf'] = dataframe[['date', 'bb_lowerband_neutral_inf']].copy().set_index('date')
self.custom_trade_info[metadata['pair']]['bb_upperband_neutral_inf'] = dataframe[['date', 'bb_upperband_neutral_inf']].copy().set_index('date')
return dataframe
def get_buy_signal_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe['sma_9'] = ta.SMA(dataframe, timeperiod=9)
dataframe['EWO'] = EWO(dataframe, self.fast_ewo, self.slow_ewo)
for val in self.base_nb_candles_buy.range:
dataframe[f'ma_buy_{val}'] = ta.EMA(dataframe, timeperiod=val)
for val in self.base_nb_candles_sell.range:
dataframe[f'ma_sell_{val}'] = ta.EMA(dataframe, timeperiod=val)
dataframe['rsi_fast'] = ta.RSI(dataframe, timeperiod=4)
dataframe['rsi_slow'] = ta.RSI(dataframe, timeperiod=20)
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
dataframe['bb_lowerband'] = bollinger['lower']
dataframe['hma_5'] = hull_moving_average(dataframe, 5, 'close')
dataframe['ema_25'] = ta.EMA(dataframe, timeperiod=25)
dataframe['ema_60'] = ta.EMA(dataframe, timeperiod=60)
dataframe['uptrend_5m'] = dataframe['ema_25'] > dataframe['ema_60']
return dataframe
def get_market_condition_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
displacement = 30
ichimoku = ftt.ichimoku(dataframe, conversion_line_period=20, base_line_periods=60, laggin_span=120, displacement=displacement)
dataframe['chikou_span'] = ichimoku['chikou_span']
dataframe['tenkan_sen'] = ichimoku['tenkan_sen']
dataframe['kijun_sen'] = ichimoku['kijun_sen']
dataframe['senkou_a'] = ichimoku['senkou_span_a']
dataframe['senkou_b'] = ichimoku['senkou_span_b']
dataframe['leading_senkou_span_a'] = ichimoku['leading_senkou_span_a']
dataframe['leading_senkou_span_b'] = ichimoku['leading_senkou_span_b']
dataframe['cloud_green'] = ichimoku['cloud_green'] * 1
dataframe['cloud_red'] = ichimoku['cloud_red'] * -1
ssl = SSLChannels_ATR(dataframe, 10)
dataframe['sslDown'] = ssl[0]
dataframe['sslUp'] = ssl[1]
#dataframe['vfi'] = fta.VFI(dataframe, period=14)
# Summary indicators
dataframe['future_green'] = ichimoku['cloud_green'].shift(displacement).fillna(0).astype('int') * 2
dataframe['chikou_high'] = ((dataframe['chikou_span'] > dataframe['senkou_a']) & (dataframe['chikou_span'] > dataframe['senkou_b'])).shift(displacement).fillna(0).astype('int')
dataframe['go_long'] = ((dataframe['tenkan_sen'] > dataframe['kijun_sen']) & (dataframe['close'] > dataframe['leading_senkou_span_a']) & (dataframe['close'] > dataframe['leading_senkou_span_b']) & (dataframe['future_green'] > 0) & (dataframe['chikou_high'] > 0)).fillna(0).astype('int') * 3
dataframe['max'] = dataframe['high'].rolling(3).max()
dataframe['min'] = dataframe['low'].rolling(6).min()
dataframe['upper'] = np.where(dataframe['max'] > dataframe['max'].shift(),1,0)
dataframe['lower'] = np.where(dataframe['min'] < dataframe['min'].shift(),1,0)
dataframe['up_trend'] = np.where(dataframe['upper'].rolling(5, min_periods=1).sum() != 0,1,0)
dataframe['dn_trend'] = np.where(dataframe['lower'].rolling(5, min_periods=1).sum() != 0,1,0)
return dataframe
def get_custom_stoploss_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
bollinger_neutral = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=1)
dataframe['bb_lowerband_neutral'] = bollinger_neutral['lower']
dataframe['bb_middleband_neutral'] = bollinger_neutral['mid']
dataframe['bb_upperband_neutral'] = bollinger_neutral['upper']
bollinger_trend = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
dataframe['bb_lowerband_trend'] = bollinger_trend['lower']
dataframe['bb_middleband_trend'] = bollinger_trend['mid']
dataframe['bb_upperband_trend'] = bollinger_trend['upper']
dataframe['atr'] = ta.ATR(dataframe, timeperiod=14)
dataframe['roc'] = ta.ROC(dataframe, timeperiod=9)
dataframe['rmi'] = RMI(dataframe, length=24, mom=5)
ssldown, sslup = SSLChannels_ATR(dataframe, length=21)
dataframe['sroc'] = SROC(dataframe, roclen=21, emalen=13, smooth=21)
dataframe['ssl-dir'] = np.where(sslup > ssldown,'up','down')
dataframe['rmi-up'] = np.where(dataframe['rmi'] >= dataframe['rmi'].shift(),1,0)
dataframe['rmi-up-trend'] = np.where(dataframe['rmi-up'].rolling(5).sum() >= 3,1,0)
dataframe['candle-up'] = np.where(dataframe['close'] >= dataframe['close'].shift(),1,0)
dataframe['candle-up-trend'] = np.where(dataframe['candle-up'].rolling(5).sum() >= 3,1,0)
return dataframe
#
# Processing buy signals
#
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
(self.get_buy_signal_offset_strategy(dataframe) == True)
|
(self.get_buy_signal_bbrsi_strategy(dataframe) == True)
)
#(dataframe['sslUp_inf'] > dataframe['sslDown_inf'])
,'buy'] = 1
return dataframe
def get_buy_signal_offset_strategy(self, dataframe: DataFrame):
signal = (
(self.buy_should_use_get_buy_signal_offset_strategy.value == True) &
(dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&
(dataframe['rsi_fast']< dataframe['rsi_slow'])&
(dataframe['rsi_fast'] <35)&
(dataframe['rsi_fast'] >4)&
(dataframe['EWO'] > self.ewo_high.value) &
(dataframe['close'] < ta.EMA(dataframe['close'], timeperiod = 14) * 0.970) &
(dataframe['rsi'] < self.rsi_buy.value) &
(dataframe['volume'] > 0)
)
return signal
def get_buy_signal_bbrsi_strategy(self, dataframe: DataFrame):
signal = (
(self.buy_should_use_get_buy_signal_bbrsi_strategy.value == True) &
(dataframe['sslUp_inf'] > dataframe['sslDown_inf'])&
(dataframe['uptrend_5m'] == 0)&
(dataframe['rsi'] < 40) &
(dataframe['rsi_fast']< dataframe['rsi_slow'])&
(dataframe['close'].shift(1) < dataframe['bb_lowerband']*1)&
(dataframe['EWO'] > self.ewo_high.value) &
(dataframe['volume'] > 0)
)
return signal
#
# Processing sell signals
#
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(qtpylib.crossed_above(dataframe['sslDown_inf'], dataframe['sslUp_inf']))
& (
(qtpylib.crossed_below(dataframe['tenkan_sen_inf'], dataframe['kijun_sen_inf']))
|(qtpylib.crossed_below(dataframe['close_inf'], dataframe['kijun_sen_inf']))|
(
(dataframe['close']>dataframe['sma_9'])&
(dataframe['close'] > (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset_2.value)) &
#(dataframe['rsi']>150)&
(dataframe['volume'] > 0)&
(dataframe['rsi_fast']>dataframe['rsi_slow'])
)
) #&
# NOTE: I keep the volume checks of feels like it has not much benifit when trading leverage tokens, maybe im wrong!?
#(dataframe['vfi'] < 0.0) &
#(dataframe['volume'] > 0)
,'sell'] = 1
return dataframe
#
# Custom Stoploss
#
def custom_stoploss(self, pair: str, trade: 'Trade', current_time: datetime, current_rate: float, current_profit: float, **kwargs) -> float:
trade_dur = int((current_time.timestamp() - trade.open_date_utc.timestamp()) // 60)
if self.config['runmode'].value in ('live', 'dry_run'):
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
sroc = dataframe['sroc_inf'].iat[-1]
bb_trailing = dataframe[self.cstp_bb_trailing_input.value].iat[-1]
# If in backtest or hyperopt, get the indicator values out of the trades dict (Thanks @JoeSchr!)
else:
sroc = self.custom_trade_info[trade.pair]['sroc_inf'].loc[current_time]['sroc_inf']
bb_trailing = self.custom_trade_info[trade.pair][self.cstp_bb_trailing_input.value].loc[current_time][self.cstp_bb_trailing_input.value]
if current_profit < self.cstp_threshold.value:
if self.cstp_bail_how.value == 'roc' or self.cstp_bail_how.value == 'any':
# Dynamic bailout based on rate of change
if (sroc/100) <= self.cstp_bail_roc.value:
return 0.001
if self.cstp_bail_how.value == 'time' or self.cstp_bail_how.value == 'any':
# Dynamic bailout based on time
if trade_dur > self.cstp_bail_time.value:
return 0.001
if current_profit < self.cstp_trailing_only_offset_is_reached.value:
if current_rate <= bb_trailing:
return 0.001
else:
return -1
desired_stoploss = current_profit / self.cstp_trailing_stop_profit_devider.value
return max(min(desired_stoploss, self.cstp_trailing_max_stoploss.value), 0.025)
#
# Dynamic ROI
#
def min_roi_reached_dynamic(self, trade: Trade, current_profit: float, current_time: datetime, trade_dur: int) -> Tuple[Optional[int], Optional[float]]:
minimal_roi = self.minimal_roi
_, table_roi = self.min_roi_reached_entry(trade_dur)
# see if we have the data we need to do this, otherwise fall back to the standard table
if self.custom_trade_info and trade and trade.pair in self.custom_trade_info:
if self.config['runmode'].value in ('live', 'dry_run'):
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=trade.pair, timeframe=self.timeframe)
rmi_trend = dataframe['rmi-up-trend_inf'].iat[-1]
candle_trend = dataframe['candle-up-trend_inf'].iat[-1]
ssl_dir = dataframe['ssl-dir_inf'].iat[-1]
# If in backtest or hyperopt, get the indicator values out of the trades dict (Thanks @JoeSchr!)
else:
rmi_trend = self.custom_trade_info[trade.pair]['rmi-up-trend_inf'].loc[current_time]['rmi-up-trend_inf']
candle_trend = self.custom_trade_info[trade.pair]['candle-up-trend_inf'].loc[current_time]['candle-up-trend_inf']
ssl_dir = self.custom_trade_info[trade.pair]['ssl-dir_inf'].loc[current_time]['ssl-dir_inf']
min_roi = table_roi
max_profit = trade.calc_profit_ratio(trade.max_rate)
pullback_value = (max_profit - self.droi_pullback_amount.value)
in_trend = False
if self.droi_trend_type.value == 'rmi' or self.droi_trend_type.value == 'any':
if rmi_trend == 1:
in_trend = True
if self.droi_trend_type.value == 'ssl' or self.droi_trend_type.value == 'any':
if ssl_dir == 'up':
in_trend = True
if self.droi_trend_type.value == 'candle' or self.droi_trend_type.value == 'any':
if candle_trend == 1:
in_trend = True
# Force the ROI value high if in trend
if (in_trend == True):
min_roi = 100
# If pullback is enabled, allow to sell if a pullback from peak has happened regardless of trend
if self.droi_pullback.value == True and (current_profit < pullback_value):
if self.droi_pullback_respect_table.value == True:
min_roi = table_roi
else:
min_roi = current_profit / 1.5
else:
min_roi = table_roi
return trade_dur, min_roi
# Change here to allow loading of the dynamic_roi settings
def min_roi_reached(self, trade: Trade, current_profit: float, current_time: datetime) -> bool:
trade_dur = int((current_time.timestamp() - trade.open_date_utc.timestamp()) // 120)
if self.use_dynamic_roi:
_, roi = self.min_roi_reached_dynamic(trade, current_profit, current_time, trade_dur)
else:
_, roi = self.min_roi_reached_entry(trade_dur)
if roi is None:
return False
else:
return current_profit > roi
# Get the current price from the exchange (or local cache)
def get_current_price(self, pair: str, refresh: bool) -> float:
if not refresh:
rate = self.custom_current_price_cache.get(pair)
# Check if cache has been invalidated
if rate:
return rate
ask_strategy = self.config.get('ask_strategy', {})
if ask_strategy.get('use_order_book', False):
ob = self.dp.orderbook(pair, 1)
rate = ob[f"{ask_strategy['price_side']}s"][0][0]
else:
ticker = self.dp.ticker(pair)
rate = ticker['last']
self.custom_current_price_cache[pair] = rate
return rate
#
# Custom trade info
#
def populate_trades(self, pair: str) -> dict:
# Initialize the trades dict if it doesn't exist, persist it otherwise
if not pair in self.custom_trade_info:
self.custom_trade_info[pair] = {}
# init the temp dicts and set the trade stuff to false
trade_data = {}
trade_data['active_trade'] = False
# active trade stuff only works in live and dry, not backtest
if self.config['runmode'].value in ('live', 'dry_run'):
# find out if we have an open trade for this pair
active_trade = Trade.get_trades([Trade.pair == pair, Trade.is_open.is_(True),]).all()
# if so, get some information
if active_trade:
# get current price and update the min/max rate
current_rate = self.get_current_price(pair, True)
active_trade[0].adjust_min_max_rates(current_rate, current_rate)
return trade_data
#
# Custom indicators
#
def RMI(dataframe, *, length=20, mom=5):
"""
Source: https://github.com/freqtrade/technical/blob/master/technical/indicators/indicators.py#L912
"""
df = dataframe.copy()
df['maxup'] = (df['close'] - df['close'].shift(mom)).clip(lower=0)
df['maxdown'] = (df['close'].shift(mom) - df['close']).clip(lower=0)
df.fillna(0, inplace=True)
df["emaInc"] = ta.EMA(df, price='maxup', timeperiod=length)
df["emaDec"] = ta.EMA(df, price='maxdown', timeperiod=length)
df['RMI'] = np.where(df['emaDec'] == 0, 0, 100 - 100 / (1 + df["emaInc"] / df["emaDec"]))
return df["RMI"]
def SSLChannels_ATR(dataframe, length=7):
"""
SSL Channels with ATR: https://www.tradingview.com/script/SKHqWzql-SSL-ATR-channel/
Credit to @JimmyNixx for python
"""
df = dataframe.copy()
df['ATR'] = ta.ATR(df, timeperiod=14)
df['smaHigh'] = df['high'].rolling(length).mean() + df['ATR']
df['smaLow'] = df['low'].rolling(length).mean() - df['ATR']
df['hlv'] = np.where(df['close'] > df['smaHigh'], 1, np.where(df['close'] < df['smaLow'], -1, np.NAN))
df['hlv'] = df['hlv'].ffill()
df['sslDown'] = np.where(df['hlv'] < 0, df['smaHigh'], df['smaLow'])
df['sslUp'] = np.where(df['hlv'] < 0, df['smaLow'], df['smaHigh'])
return df['sslDown'], df['sslUp']
def SROC(dataframe, roclen=21, emalen=13, smooth=21):
df = dataframe.copy()
roc = ta.ROC(df, timeperiod=roclen)
ema = ta.EMA(df, timeperiod=emalen)
sroc = ta.ROC(ema, timeperiod=smooth)
return sroc
def EWO(dataframe, ema_length=5, ema2_length=35):
df = dataframe.copy()
ema1 = ta.EMA(df, timeperiod=ema_length)
ema2 = ta.EMA(df, timeperiod=ema2_length)
emadif = (ema1 - ema2) / df['low'] * 100
return emadif | [
"freqtrade.strategy.DecimalParameter",
"freqtrade.vendor.qtpylib.indicators.typical_price",
"technical.indicators.ichimoku",
"freqtrade.vendor.qtpylib.indicators.crossed_below",
"freqtrade.persistence.Trade.is_open.is_",
"numpy.where",
"technical.indicators.hull_moving_average",
"talib.abstract.ATR",
... | [((2514, 2656), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (['[True, False]'], {'default': "buy_params['buy_should_use_get_buy_signal_offset_strategy']", 'space': '"""buy"""', 'optimize': '(True)'}), "([True, False], default=buy_params[\n 'buy_should_use_get_buy_signal_offset_strategy'], space='buy', optimize\n =True)\n", (2534, 2656), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((2699, 2840), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (['[True, False]'], {'default': "buy_params['buy_should_use_get_buy_signal_bbrsi_strategy']", 'space': '"""buy"""', 'optimize': '(True)'}), "([True, False], default=buy_params[\n 'buy_should_use_get_buy_signal_bbrsi_strategy'], space='buy', optimize=True\n )\n", (2719, 2840), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((2875, 3002), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (["['rmi', 'ssl', 'candle', 'any']"], {'default': "sell_params['droi_trend_type']", 'space': '"""sell"""', 'optimize': '(True)'}), "(['rmi', 'ssl', 'candle', 'any'], default=sell_params[\n 'droi_trend_type'], space='sell', optimize=True)\n", (2895, 3002), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3019, 3125), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (['[True, False]'], {'default': "sell_params['droi_pullback']", 'space': '"""sell"""', 'optimize': '(True)'}), "([True, False], default=sell_params['droi_pullback'],\n space='sell', optimize=True)\n", (3039, 3125), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3150, 3242), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(0.005)', '(0.02)'], {'default': "sell_params['droi_pullback_amount']", 'space': '"""sell"""'}), "(0.005, 0.02, default=sell_params['droi_pullback_amount'],\n space='sell')\n", (3166, 3242), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3274, 3395), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (['[True, False]'], {'default': "sell_params['droi_pullback_respect_table']", 'space': '"""sell"""', 'optimize': '(True)'}), "([True, False], default=sell_params[\n 'droi_pullback_respect_table'], space='sell', optimize=True)\n", (3294, 3395), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3438, 3517), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(-0.05)', '(0)'], {'default': "sell_params['cstp_threshold']", 'space': '"""sell"""'}), "(-0.05, 0, default=sell_params['cstp_threshold'], space='sell')\n", (3454, 3517), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3539, 3655), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (["['roc', 'time', 'any']"], {'default': "sell_params['cstp_bail_how']", 'space': '"""sell"""', 'optimize': '(True)'}), "(['roc', 'time', 'any'], default=sell_params[\n 'cstp_bail_how'], space='sell', optimize=True)\n", (3559, 3655), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3672, 3759), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(-0.05)', '(-0.01)'], {'default': "sell_params['cstp_bail_roc']", 'space': '"""sell"""'}), "(-0.05, -0.01, default=sell_params['cstp_bail_roc'], space=\n 'sell')\n", (3688, 3759), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3777, 3853), 'freqtrade.strategy.IntParameter', 'IntParameter', (['(720)', '(1440)'], {'default': "sell_params['cstp_bail_time']", 'space': '"""sell"""'}), "(720, 1440, default=sell_params['cstp_bail_time'], space='sell')\n", (3789, 3853), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((3898, 4006), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(0.01)', '(0.06)'], {'default': "sell_params['cstp_trailing_only_offset_is_reached']", 'space': '"""sell"""'}), "(0.01, 0.06, default=sell_params[\n 'cstp_trailing_only_offset_is_reached'], space='sell')\n", (3914, 4006), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4043, 4137), 'freqtrade.strategy.IntParameter', 'IntParameter', (['(2)', '(4)'], {'default': "sell_params['cstp_trailing_stop_profit_devider']", 'space': '"""sell"""'}), "(2, 4, default=sell_params['cstp_trailing_stop_profit_devider'],\n space='sell')\n", (4055, 4137), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4168, 4266), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(0.02)', '(0.08)'], {'default': "sell_params['cstp_trailing_max_stoploss']", 'space': '"""sell"""'}), "(0.02, 0.08, default=sell_params[\n 'cstp_trailing_max_stoploss'], space='sell')\n", (4184, 4266), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4292, 4531), 'freqtrade.strategy.CategoricalParameter', 'CategoricalParameter', (["['bb_lowerband_trend', 'bb_lowerband_trend_inf', 'bb_lowerband_neutral',\n 'bb_lowerband_neutral_inf', 'bb_upperband_neutral_inf']"], {'default': "sell_params['cstp_bb_trailing_input']", 'space': '"""sell"""', 'optimize': '(True)'}), "(['bb_lowerband_trend', 'bb_lowerband_trend_inf',\n 'bb_lowerband_neutral', 'bb_lowerband_neutral_inf',\n 'bb_upperband_neutral_inf'], default=sell_params[\n 'cstp_bb_trailing_input'], space='sell', optimize=True)\n", (4312, 4531), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4576, 4667), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(2.0)', '(12.0)'], {'default': "buy_params['ewo_high']", 'space': '"""buy"""', 'optimize': '(True)'}), "(2.0, 12.0, default=buy_params['ewo_high'], space='buy',\n optimize=True)\n", (4592, 4667), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4679, 4758), 'freqtrade.strategy.IntParameter', 'IntParameter', (['(30)', '(70)'], {'default': "buy_params['rsi_buy']", 'space': '"""buy"""', 'optimize': '(True)'}), "(30, 70, default=buy_params['rsi_buy'], space='buy', optimize=True)\n", (4691, 4758), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4786, 4880), 'freqtrade.strategy.IntParameter', 'IntParameter', (['(5)', '(80)'], {'default': "buy_params['base_nb_candles_buy']", 'space': '"""buy"""', 'optimize': '(True)'}), "(5, 80, default=buy_params['base_nb_candles_buy'], space='buy',\n optimize=True)\n", (4798, 4880), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((4907, 5005), 'freqtrade.strategy.IntParameter', 'IntParameter', (['(5)', '(80)'], {'default': "sell_params['base_nb_candles_sell']", 'space': '"""sell"""', 'optimize': '(True)'}), "(5, 80, default=sell_params['base_nb_candles_sell'], space=\n 'sell', optimize=True)\n", (4919, 5005), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((5020, 5117), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(0.95)', '(1.1)'], {'default': "sell_params['high_offset']", 'space': '"""sell"""', 'optimize': '(True)'}), "(0.95, 1.1, default=sell_params['high_offset'], space=\n 'sell', optimize=True)\n", (5036, 5117), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((5134, 5233), 'freqtrade.strategy.DecimalParameter', 'DecimalParameter', (['(0.99)', '(1.5)'], {'default': "sell_params['high_offset_2']", 'space': '"""sell"""', 'optimize': '(True)'}), "(0.99, 1.5, default=sell_params['high_offset_2'], space=\n 'sell', optimize=True)\n", (5150, 5233), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((5587, 5617), 'cachetools.TTLCache', 'TTLCache', ([], {'maxsize': '(100)', 'ttl': '(300)'}), '(maxsize=100, ttl=300)\n', (5595, 5617), False, 'from cachetools import TTLCache\n'), ((24259, 24303), 'talib.abstract.EMA', 'ta.EMA', (['df'], {'price': '"""maxup"""', 'timeperiod': 'length'}), "(df, price='maxup', timeperiod=length)\n", (24265, 24303), True, 'import talib.abstract as ta\n'), ((24324, 24370), 'talib.abstract.EMA', 'ta.EMA', (['df'], {'price': '"""maxdown"""', 'timeperiod': 'length'}), "(df, price='maxdown', timeperiod=length)\n", (24330, 24370), True, 'import talib.abstract as ta\n'), ((24390, 24467), 'numpy.where', 'np.where', (["(df['emaDec'] == 0)", '(0)', "(100 - 100 / (1 + df['emaInc'] / df['emaDec']))"], {}), "(df['emaDec'] == 0, 0, 100 - 100 / (1 + df['emaInc'] / df['emaDec']))\n", (24398, 24467), True, 'import numpy as np\n'), ((24727, 24752), 'talib.abstract.ATR', 'ta.ATR', (['df'], {'timeperiod': '(14)'}), '(df, timeperiod=14)\n', (24733, 24752), True, 'import talib.abstract as ta\n'), ((25049, 25101), 'numpy.where', 'np.where', (["(df['hlv'] < 0)", "df['smaHigh']", "df['smaLow']"], {}), "(df['hlv'] < 0, df['smaHigh'], df['smaLow'])\n", (25057, 25101), True, 'import numpy as np\n'), ((25121, 25173), 'numpy.where', 'np.where', (["(df['hlv'] < 0)", "df['smaLow']", "df['smaHigh']"], {}), "(df['hlv'] < 0, df['smaLow'], df['smaHigh'])\n", (25129, 25173), True, 'import numpy as np\n'), ((25312, 25341), 'talib.abstract.ROC', 'ta.ROC', (['df'], {'timeperiod': 'roclen'}), '(df, timeperiod=roclen)\n', (25318, 25341), True, 'import talib.abstract as ta\n'), ((25353, 25382), 'talib.abstract.EMA', 'ta.EMA', (['df'], {'timeperiod': 'emalen'}), '(df, timeperiod=emalen)\n', (25359, 25382), True, 'import talib.abstract as ta\n'), ((25395, 25425), 'talib.abstract.ROC', 'ta.ROC', (['ema'], {'timeperiod': 'smooth'}), '(ema, timeperiod=smooth)\n', (25401, 25425), True, 'import talib.abstract as ta\n'), ((25537, 25570), 'talib.abstract.EMA', 'ta.EMA', (['df'], {'timeperiod': 'ema_length'}), '(df, timeperiod=ema_length)\n', (25543, 25570), True, 'import talib.abstract as ta\n'), ((25583, 25617), 'talib.abstract.EMA', 'ta.EMA', (['df'], {'timeperiod': 'ema2_length'}), '(df, timeperiod=ema2_length)\n', (25589, 25617), True, 'import talib.abstract as ta\n'), ((7080, 7187), 'freqtrade.strategy.merge_informative_pair', 'merge_informative_pair', (['dataframe', 'informative', 'self.timeframe', 'self.informative_timeframe'], {'ffill': '(True)'}), '(dataframe, informative, self.timeframe, self.\n informative_timeframe, ffill=True)\n', (7102, 7187), False, 'from freqtrade.strategy import IStrategy, merge_informative_pair, stoploss_from_open, IntParameter, DecimalParameter, CategoricalParameter\n'), ((9202, 9233), 'talib.abstract.SMA', 'ta.SMA', (['dataframe'], {'timeperiod': '(9)'}), '(dataframe, timeperiod=9)\n', (9208, 9233), True, 'import talib.abstract as ta\n'), ((9626, 9657), 'talib.abstract.RSI', 'ta.RSI', (['dataframe'], {'timeperiod': '(4)'}), '(dataframe, timeperiod=4)\n', (9632, 9657), True, 'import talib.abstract as ta\n'), ((9691, 9723), 'talib.abstract.RSI', 'ta.RSI', (['dataframe'], {'timeperiod': '(20)'}), '(dataframe, timeperiod=20)\n', (9697, 9723), True, 'import talib.abstract as ta\n'), ((9752, 9784), 'talib.abstract.RSI', 'ta.RSI', (['dataframe'], {'timeperiod': '(14)'}), '(dataframe, timeperiod=14)\n', (9758, 9784), True, 'import talib.abstract as ta\n'), ((9973, 10015), 'technical.indicators.hull_moving_average', 'hull_moving_average', (['dataframe', '(5)', '"""close"""'], {}), "(dataframe, 5, 'close')\n", (9992, 10015), False, 'from technical.indicators import hull_moving_average\n'), ((10047, 10079), 'talib.abstract.EMA', 'ta.EMA', (['dataframe'], {'timeperiod': '(25)'}), '(dataframe, timeperiod=25)\n', (10053, 10079), True, 'import talib.abstract as ta\n'), ((10111, 10143), 'talib.abstract.EMA', 'ta.EMA', (['dataframe'], {'timeperiod': '(60)'}), '(dataframe, timeperiod=60)\n', (10117, 10143), True, 'import talib.abstract as ta\n'), ((10399, 10519), 'technical.indicators.ichimoku', 'ftt.ichimoku', (['dataframe'], {'conversion_line_period': '(20)', 'base_line_periods': '(60)', 'laggin_span': '(120)', 'displacement': 'displacement'}), '(dataframe, conversion_line_period=20, base_line_periods=60,\n laggin_span=120, displacement=displacement)\n', (10411, 10519), True, 'import technical.indicators as ftt\n'), ((13198, 13230), 'talib.abstract.ATR', 'ta.ATR', (['dataframe'], {'timeperiod': '(14)'}), '(dataframe, timeperiod=14)\n', (13204, 13230), True, 'import talib.abstract as ta\n'), ((13259, 13290), 'talib.abstract.ROC', 'ta.ROC', (['dataframe'], {'timeperiod': '(9)'}), '(dataframe, timeperiod=9)\n', (13265, 13290), True, 'import talib.abstract as ta\n'), ((13526, 13565), 'numpy.where', 'np.where', (['(sslup > ssldown)', '"""up"""', '"""down"""'], {}), "(sslup > ssldown, 'up', 'down')\n", (13534, 13565), True, 'import numpy as np\n'), ((24943, 24991), 'numpy.where', 'np.where', (["(df['close'] < df['smaLow'])", '(-1)', 'np.NAN'], {}), "(df['close'] < df['smaLow'], -1, np.NAN)\n", (24951, 24991), True, 'import numpy as np\n'), ((9413, 9446), 'talib.abstract.EMA', 'ta.EMA', (['dataframe'], {'timeperiod': 'val'}), '(dataframe, timeperiod=val)\n', (9419, 9446), True, 'import talib.abstract as ta\n'), ((9545, 9578), 'talib.abstract.EMA', 'ta.EMA', (['dataframe'], {'timeperiod': 'val'}), '(dataframe, timeperiod=val)\n', (9551, 9578), True, 'import talib.abstract as ta\n'), ((9832, 9864), 'freqtrade.vendor.qtpylib.indicators.typical_price', 'qtpylib.typical_price', (['dataframe'], {}), '(dataframe)\n', (9853, 9864), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((12595, 12627), 'freqtrade.vendor.qtpylib.indicators.typical_price', 'qtpylib.typical_price', (['dataframe'], {}), '(dataframe)\n', (12616, 12627), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((12914, 12946), 'freqtrade.vendor.qtpylib.indicators.typical_price', 'qtpylib.typical_price', (['dataframe'], {}), '(dataframe)\n', (12935, 12946), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((15956, 16027), 'freqtrade.vendor.qtpylib.indicators.crossed_above', 'qtpylib.crossed_above', (["dataframe['sslDown_inf']", "dataframe['sslUp_inf']"], {}), "(dataframe['sslDown_inf'], dataframe['sslUp_inf'])\n", (15977, 16027), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((14969, 15010), 'talib.abstract.EMA', 'ta.EMA', (["dataframe['close']"], {'timeperiod': '(14)'}), "(dataframe['close'], timeperiod=14)\n", (14975, 15010), True, 'import talib.abstract as ta\n'), ((16064, 16142), 'freqtrade.vendor.qtpylib.indicators.crossed_below', 'qtpylib.crossed_below', (["dataframe['tenkan_sen_inf']", "dataframe['kijun_sen_inf']"], {}), "(dataframe['tenkan_sen_inf'], dataframe['kijun_sen_inf'])\n", (16085, 16142), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((16163, 16236), 'freqtrade.vendor.qtpylib.indicators.crossed_below', 'qtpylib.crossed_below', (["dataframe['close_inf']", "dataframe['kijun_sen_inf']"], {}), "(dataframe['close_inf'], dataframe['kijun_sen_inf'])\n", (16184, 16236), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((23480, 23503), 'freqtrade.persistence.Trade.is_open.is_', 'Trade.is_open.is_', (['(True)'], {}), '(True)\n', (23497, 23503), False, 'from freqtrade.persistence import Trade\n')] |
#!/usr/bin/env python
"""
Repartition a parquet file into tracts
Author: <NAME>
"""
import os
import multiprocessing as mp
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
import pandas as pd
from tqdm import tqdm
import lsst.geom
from lsst.daf.persistence import Butler
import desc_dc2_dm_data
__all__ = ["get_tract_patch", "repartition_into_tracts"]
def get_number_of_workers(input_n_cores=None):
try:
n = int(input_n_cores)
except (TypeError, ValueError):
try:
n = len(os.sched_getaffinity(0))
except AttributeError:
try:
n = int(os.cpu_count())
except (TypeError, ValueError):
n = 1
if n < 1:
n = 1
return n
def get_tract_patch(skymap, ra, dec):
radec = lsst.geom.SpherePoint(ra, dec, lsst.geom.degrees)
tractInfo = skymap.findTract(radec)
patchInfo = tractInfo.findPatch(radec)
return tractInfo.getId(), "{},{}".format(*patchInfo.getIndex())
def get_tract_patch_arrays(skymap, ra_arr, dec_arr, disable_tqdm=None):
tract, patch = [], []
for ra, dec in tqdm(zip(ra_arr, dec_arr), total=len(ra_arr), disable=disable_tqdm):
tract_this, patch_this = get_tract_patch(skymap, ra, dec)
tract.append(tract_this)
patch.append(patch_this)
return tract, patch
def repartition_into_tracts(
input_files,
output_root_dir,
skymap_source_repo,
ra_label="ra",
dec_label="dec",
n_cores=None,
silent=False,
**kwargs
):
""" Take a parquet catalog and split it into tracts according to a given skymap, and write to disk
Parameters
----------
input_files : list of str
List of paths to the input parquet file.
output_root_dir : str
Path to the output directory. The output files will have the following filename
<output_root_dir>/<tract>/<input_file_basename>
skymap_source_repo : str
Path or existing key if desc_dc2_dm_data.REPOS to indicate the butler repo for loading skymap
Optional Parameters
----------------
ra_label : str, optional
Column name for RA, default to 'ra'. The unit is assumed to be degrees.
dec_label : str, optional
Column name for Dec, default to 'dec'. The unit is assumed to be degrees.
silent : bool, optional (default: False)
If true, turn off most printout.
"""
my_print = (lambda *x: None) if silent else print
tqdm_disable = silent or None
repo = desc_dc2_dm_data.REPOS.get(skymap_source_repo, skymap_source_repo)
my_print("Obtain skymap from", repo)
skymap = Butler(repo).get("deepCoadd_skyMap")
for input_file in input_files:
my_print("Loading input parquet file", input_file)
df = pd.read_parquet(input_file)
# Add tract, patch columns to df (i.e., input)
n_cores = get_number_of_workers(n_cores)
my_print("Finding tract and patch for each row, using", n_cores, "cores")
skymap_arr = [skymap] * n_cores
ra_arr = np.array_split(df[ra_label].values, n_cores)
dec_arr = np.array_split(df[dec_label].values, n_cores)
tqdm_arr = [True] * n_cores
tqdm_arr[0] = tqdm_disable
with mp.Pool(n_cores) as pool:
tractpatch = pool.starmap(get_tract_patch_arrays, zip(skymap_arr, ra_arr, dec_arr, tqdm_arr))
df["tract"] = np.concatenate([tp[0] for tp in tractpatch])
df["patch"] = np.concatenate([tp[1] for tp in tractpatch])
del skymap_arr, ra_arr, dec_arr, tqdm_arr, tractpatch
my_print("Writing out parquet file for each tract in", output_root_dir)
for tract, df_this_tract in tqdm(df.groupby("tract"), total=df["tract"].nunique(False), disable=tqdm_disable):
output_dir = os.path.join(output_root_dir, str(tract))
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, os.path.basename(input_file))
df_this_tract.to_parquet(output_path, index=False)
my_print("Done with", input_file)
def main():
usage = """Take a parquet catalog and split it into tracts according to a given skymap, and write to disk
For example, to repartition a truth catalog, you can run
python %(prog)s truth_summary_hp10068.parquet -o $CSCRATCH/truth_repartition
The output files will be put into directories:
$CSCRATCH/truth_repartition/3259/truth_summary_hp10068.parquet
$CSCRATCH/truth_repartition/3260/truth_summary_hp10068.parquet
...
Files within each tract directory can be then merged to produce a single file (use `merge_truth_per_tract.py`)
"""
parser = ArgumentParser(description=usage,
formatter_class=RawTextHelpFormatter)
parser.add_argument("input_files", nargs="+", help="Parquet file(s) to read.")
parser.add_argument("-o", '--output-root-dir', default='.', help="Output root directory.")
parser.add_argument("--skymap-source-repo", default="2.2i_dr6_wfd")
parser.add_argument("--silent", action="store_true")
parser.add_argument("--n-cores", "--cores", dest="n_cores", type=int)
repartition_into_tracts(**vars(parser.parse_args()))
if __name__ == "__main__":
main()
| [
"lsst.daf.persistence.Butler",
"pandas.read_parquet",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.array_split",
"desc_dc2_dm_data.REPOS.get",
"os.sched_getaffinity",
"multiprocessing.Pool",
"numpy.concatenate",
"os.path.basename",
"os.cpu_count"
] | [((2516, 2582), 'desc_dc2_dm_data.REPOS.get', 'desc_dc2_dm_data.REPOS.get', (['skymap_source_repo', 'skymap_source_repo'], {}), '(skymap_source_repo, skymap_source_repo)\n', (2542, 2582), False, 'import desc_dc2_dm_data\n'), ((4657, 4728), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'usage', 'formatter_class': 'RawTextHelpFormatter'}), '(description=usage, formatter_class=RawTextHelpFormatter)\n', (4671, 4728), False, 'from argparse import ArgumentParser, RawTextHelpFormatter\n'), ((2782, 2809), 'pandas.read_parquet', 'pd.read_parquet', (['input_file'], {}), '(input_file)\n', (2797, 2809), True, 'import pandas as pd\n'), ((3054, 3098), 'numpy.array_split', 'np.array_split', (['df[ra_label].values', 'n_cores'], {}), '(df[ra_label].values, n_cores)\n', (3068, 3098), True, 'import numpy as np\n'), ((3117, 3162), 'numpy.array_split', 'np.array_split', (['df[dec_label].values', 'n_cores'], {}), '(df[dec_label].values, n_cores)\n', (3131, 3162), True, 'import numpy as np\n'), ((3401, 3445), 'numpy.concatenate', 'np.concatenate', (['[tp[0] for tp in tractpatch]'], {}), '([tp[0] for tp in tractpatch])\n', (3415, 3445), True, 'import numpy as np\n'), ((3468, 3512), 'numpy.concatenate', 'np.concatenate', (['[tp[1] for tp in tractpatch]'], {}), '([tp[1] for tp in tractpatch])\n', (3482, 3512), True, 'import numpy as np\n'), ((2637, 2649), 'lsst.daf.persistence.Butler', 'Butler', (['repo'], {}), '(repo)\n', (2643, 2649), False, 'from lsst.daf.persistence import Butler\n'), ((3247, 3263), 'multiprocessing.Pool', 'mp.Pool', (['n_cores'], {}), '(n_cores)\n', (3254, 3263), True, 'import multiprocessing as mp\n'), ((3854, 3892), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (3865, 3892), False, 'import os\n'), ((3944, 3972), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (3960, 3972), False, 'import os\n'), ((543, 566), 'os.sched_getaffinity', 'os.sched_getaffinity', (['(0)'], {}), '(0)\n', (563, 566), False, 'import os\n'), ((640, 654), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (652, 654), False, 'import os\n')] |
import os
import torch
import numpy
import json
import skimage
import skimage.transform
import skimage.io
import skimage.color
from tqdm import tqdm # 显示进度条
class Load_data:
def __init__(self, data_path_text, data_path_annotation, data_path_image, datasize):
self.data_path_text = data_path_text # 文本集文件路径
self.data_path_annotation = data_path_annotation # 图片注释文件路径
self.data_path_image = data_path_image # 图片文件路径
self.datasize = datasize
def load_text(self): # 得到每篇文章
data_path = self.data_path_text
datasets_text = ['val.txt', 'test.txt'] # , 'val.txt'], 'test.txt']en_core_web_md # 数据集名称
text_data = [] # 每个列表元素代表一篇文章
for data in datasets_text:
filepath = os.path.join(data_path, data)
newdata = []
print('*' * 80)
print('Start process textdata :' + os.path.realpath(filepath))
datasize = 0
with open(filepath, 'r', encoding='utf-8') as f_t:
file = f_t.readlines()
for filerow in tqdm(file): # 实时显示进度
if( datasize == self.datasize ): # 控制数据集大小
break
filerow = filerow.replace('<s>', '')
filerow = filerow.replace('<s>', '')
filerow = filerow.replace('</s>', '')
filerow = filerow.replace('\n', '')
newdata.append(filerow) # 将处理好的数据添加进列表中
datasize += 1
pass
text_data.append(newdata)
print('The number of total text: ', len(newdata))
print('*' * 80, '\n')
f_t.close() # 关闭文件
return text_data
def load_sentence(self):
data_path = self.data_path_text
datasets_sentence = ['val_sentence.json', 'test_sentence.json'] # 句子数据集
sentence_data = [] # 每个列表元素代表一篇文章的分句
for data in datasets_sentence:
filepath = os.path.join(data_path, data)
newdata = []
print('*' * 80)
print('Start process sentencedata :' + os.path.realpath(filepath))
datasize = 0
sum_sentence = 0 # 记录总句数
with open(filepath, 'r', encoding='utf-8') as f_s:
file = f_s.readlines()
for filerow in tqdm(file): # 实时显示进度
if (datasize == self.datasize):
break
sentnce = json.loads(filerow)
newdata.append(sentnce['sentences']) # 将处理好的数据添加进列表中
sum_sentence += len(sentnce['sentences'])
datasize += 1
pass
sentence_data.append(newdata)
print('The number of total sentence: ', sum_sentence)
print('*' * 80, '\n')
f_s.close() # 关闭文件
return sentence_data
def load_annotation(self):
data_path = self.data_path_annotation
datasets = ['processed_captions_val2017.json', 'processed_captions_train2017.json']
annotation_data = []
for data in datasets:
file_path = os.path.join(data_path, data)
newdata = []
print('*' * 80)
print('Start process text ' + os.path.realpath(file_path))
with open(file_path, 'r', encoding='utf-8') as f_a: # 获得验证集的annotation
file = f_a.readlines()
for filerow in tqdm(file):
annotation = json.loads(filerow)
newdata.append(annotation['annotation'])
pass
annotation_data.append(newdata)
print('*' * 80, '\n')
f_a.close()
return annotation_data
def load_image(self):
data_path = self.data_path_image
datasets = ['new_val2017', 'new_test2017']
image_data = []
for data in datasets:
filepath = data_path + data
datasize = 0
print('*' * 80)
print('Start process imagedata:' + os.path.realpath(filepath))
new_data = []
list_file = os.listdir(filepath)
for filename in tqdm(list_file):
if datasize == self.datasize:
break
image = skimage.io.imread(filepath + '/' + filename)
if (image.shape[-1] != 3): # 将灰度图转为彩色图
image = skimage.color.gray2rgb(image)
newimage = torch.tensor(numpy.transpose(image, (2, 0, 1)), dtype=torch.float)
new_data.append(newimage)
datasize += 1
image_data.append(new_data)
print('*' * 80, '\n')
return image_data
def load_dependencytree(self):
data_path = self.data_path_text
datasets = ['val_parents.json', 'test_parents.json']
dependency_data = []
for data in datasets:
filepath = os.path.join(data_path, data)
newdata = []
datasize = 0
print('*' * 80)
print('Start process dependency sentence:' + os.path.realpath(filepath))
with open(filepath, 'r', encoding='utf-8') as f_d:
file = f_d.readlines()
for parents in tqdm(file):
if datasize == self.datasize:
break
parent = json.loads(parents)
newdata.append(parent['parent'])
datasize += 1
pass
dependency_data.append(newdata)
print('*' * 80, '\n')
f_d.close()
return dependency_data
| [
"json.loads",
"os.listdir",
"tqdm.tqdm",
"os.path.join",
"os.path.realpath",
"skimage.io.imread",
"skimage.color.gray2rgb",
"numpy.transpose"
] | [((754, 783), 'os.path.join', 'os.path.join', (['data_path', 'data'], {}), '(data_path, data)\n', (766, 783), False, 'import os\n'), ((1066, 1076), 'tqdm.tqdm', 'tqdm', (['file'], {}), '(file)\n', (1070, 1076), False, 'from tqdm import tqdm\n'), ((1946, 1975), 'os.path.join', 'os.path.join', (['data_path', 'data'], {}), '(data_path, data)\n', (1958, 1975), False, 'import os\n'), ((2300, 2310), 'tqdm.tqdm', 'tqdm', (['file'], {}), '(file)\n', (2304, 2310), False, 'from tqdm import tqdm\n'), ((3077, 3106), 'os.path.join', 'os.path.join', (['data_path', 'data'], {}), '(data_path, data)\n', (3089, 3106), False, 'import os\n'), ((3382, 3392), 'tqdm.tqdm', 'tqdm', (['file'], {}), '(file)\n', (3386, 3392), False, 'from tqdm import tqdm\n'), ((4045, 4065), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (4055, 4065), False, 'import os\n'), ((4094, 4109), 'tqdm.tqdm', 'tqdm', (['list_file'], {}), '(list_file)\n', (4098, 4109), False, 'from tqdm import tqdm\n'), ((4853, 4882), 'os.path.join', 'os.path.join', (['data_path', 'data'], {}), '(data_path, data)\n', (4865, 4882), False, 'import os\n'), ((5175, 5185), 'tqdm.tqdm', 'tqdm', (['file'], {}), '(file)\n', (5179, 5185), False, 'from tqdm import tqdm\n'), ((2422, 2441), 'json.loads', 'json.loads', (['filerow'], {}), '(filerow)\n', (2432, 2441), False, 'import json\n'), ((3423, 3442), 'json.loads', 'json.loads', (['filerow'], {}), '(filerow)\n', (3433, 3442), False, 'import json\n'), ((4207, 4251), 'skimage.io.imread', 'skimage.io.imread', (["(filepath + '/' + filename)"], {}), "(filepath + '/' + filename)\n", (4224, 4251), False, 'import skimage\n'), ((5284, 5303), 'json.loads', 'json.loads', (['parents'], {}), '(parents)\n', (5294, 5303), False, 'import json\n'), ((884, 910), 'os.path.realpath', 'os.path.realpath', (['filepath'], {}), '(filepath)\n', (900, 910), False, 'import os\n'), ((2080, 2106), 'os.path.realpath', 'os.path.realpath', (['filepath'], {}), '(filepath)\n', (2096, 2106), False, 'import os\n'), ((3203, 3230), 'os.path.realpath', 'os.path.realpath', (['file_path'], {}), '(file_path)\n', (3219, 3230), False, 'import os\n'), ((3967, 3993), 'os.path.realpath', 'os.path.realpath', (['filepath'], {}), '(filepath)\n', (3983, 3993), False, 'import os\n'), ((4338, 4367), 'skimage.color.gray2rgb', 'skimage.color.gray2rgb', (['image'], {}), '(image)\n', (4360, 4367), False, 'import skimage\n'), ((4408, 4441), 'numpy.transpose', 'numpy.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (4423, 4441), False, 'import numpy\n'), ((5018, 5044), 'os.path.realpath', 'os.path.realpath', (['filepath'], {}), '(filepath)\n', (5034, 5044), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 18:08:00 2019
@author: ben
"""
import pointCollection as pc
#import ATL11
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import scipy.stats as sps
import argparse
def get_map(map_file, bounds=None):
#if bounds is None:
# print("map file is " + map_file)
# backgrnd=pc.grid.data().from_geotif(map_file)
#else:
backgrnd=pc.grid.data().from_geotif(map_file, bounds=bounds)
if backgrnd.z.ndim >2:
backgrnd.z=backgrnd.z[:,:,0].astype(float)
cr=sps.scoreatpercentile(backgrnd.z[np.isfinite(backgrnd.z) & (backgrnd.z > np.min(backgrnd.z)) ], [16, 84])
backgrnd.show(cmap='gray', vmin=cr[0]-np.diff(cr), vmax=cr[1]+np.diff(cr))
class IndexPicker:
def __init__(self, fig, index_file, map_file=None, W=4e4, datatype='ATL06'):
self.xy=pc.geoIndex().from_file(index_file).bins_as_array()
self.index_file=index_file
self.map_file=map_file
self.fig=fig
self.datatype=datatype
self.W=4.e4
if fig is None:
self.fig=plt.figure()
if map_file is not None:
get_map(map_file, None)
print("about to plot %d points" % len(self.xy[0]))
plt.plot(self.xy[0], self.xy[1],'.')
print("starting picker")
self.cid=self.fig.canvas.mpl_connect('button_press_event', self.click)
plt.show(block=True)
#self.fig.canvas.draw()
def click(self, event):
if not event.dblclick:
return
print('click', event)
print(self)
if self.datatype=='ATL06':
field_dict={None:['delta_time','h_li','h_li_sigma','latitude','longitude','atl06_quality_summary','segment_id','sigma_geo_h'],
'fit_statistics':['dh_fit_dx'],
'ground_track':['x_atc', 'sigma_geo_xt','sigma_geo_at'],
'geophysical' : ['dac','tide_ocean','r_eff'],
'orbit_info':['rgt','cycle_number'],
'derived':['valid','matlab_time','LR','BP','spot','rss_along_track_dh']}
elif self.datatype=='ATL11':
field_dict={\
'corrected_h':['latitude','longitude','delta_time','h_corr','h_corr_sigma','ref_pt'],\
'cycle_stats':['ATL06_summary_zero_count'],\
'crossing_track_data':['delta_time','h_corr','h_corr_sigma','ref_pt','rgt',\
'spot_crossing', 'along_track_rss',\
'atl06_quality_summary','cycle_number'],
'ref_surf':['x_atc','y_atc']}
elif self.datatype=='CS2':
field_dict={'None':['x','y','time','h']}
W=self.W
self.fig.gca().plot(event.xdata, event.ydata,'m*')
self.fig.canvas.draw()
xy0=np.round(np.array([event.xdata, event.ydata])/1.e4)*1.e4
print(xy0)
D=pc.geoIndex().from_file(self.index_file).query_xy_box(\
event.xdata+np.array([-W/2, W/2]), event.ydata+np.array([-W/2, W/2]), fields=field_dict)
#D=geo_index().from_file(self.index_file).query_xy_box((self.xy[0][best], self.xy[1][best]), fields=field_dict)
print(f"click: index_file is {self.index_file}")
TrackPicker(D, self.map_file, self.index_file, self.datatype)
class TrackPicker:
def __init__(self, D, map_file, index_file, datatype):
self.files=[]
self.datatype=datatype
self.map_file=map_file
self.index_file=index_file
print(f"TrackPicker: index_file is {index_file}")
srs_proj4=pc.geoIndex().from_file(index_file).attrs['SRS_proj4']
if datatype == 'ATL06':
for ii, Di in enumerate(D):
Di.get_xy(srs_proj4)
Di.assign({'file_num':np.zeros_like(Di.x)+ii})
self.files += [Di.filename]
self.D_all=pc.data().from_list(D)
elif datatype == 'ATL11':
D_list=[]
for ii, Di in enumerate(D):
self.files += [Di.filename]
Di.get_xy(srs_proj4)
D_list.append(pc.data().from_dict({
'x':Di.x, 'y':Di.y, 'file_num':np.zeros_like(Di.x)+ii,
'h_corr':Di.corrected_h.h_corr[:,-1].ravel()}))
self.D_all=pc.data().from_list(D_list)
else:
self.D_all=pc.data().from_list(D)
self.D=D
XR=[np.nanmin(self.D_all.x), np.nanmax(self.D_all.x)]
YR=[np.nanmin(self.D_all.y), np.nanmax(self.D_all.y)]
self.fig=plt.figure()
if map_file is not None:
get_map(map_file, bounds=[XR, YR])
#for Di in D:
# plt.plot(Di.x, Di.y,'.')
if self.datatype=='ATL06':
plt.scatter(self.D_all.x, self.D_all.y, 6, c=self.D_all.r_eff, linewidth=0, vmax=1, vmin=0)
elif self.datatype=='ATL11':
plt.scatter(self.D_all.x, self.D_all.y, 6, c=self.D_all.h_corr); plt.colorbar()
elif self.datatype=='ATM_waveform_fit':
plt.scatter(self.D_all.x, self.D_all.y, 6, c=np.log10(self.D_all.K0))
elif self.datatype=='CS2':
plt.scatter(self.D_all.x, self.D_all.y, 6, c=self.D_all.h); plt.colorbar()
hax=plt.axes([0.7, 0.05, 0.25, 0.25])
hax.hist((self.D_all.time-730486)/365.25+2000, 100)
self.cid=self.fig.canvas.mpl_connect('button_press_event', self)
plt.show()
def __call__(self, event):
xyp=event.xdata+1j*event.ydata
best = np.argmin(np.abs(self.D_all.x+1j*self.D_all.y - xyp))
this = int(self.D_all.file_num[best])
if self.datatype=='ATL06':
self.ATL06_plot(self.D[this], this)
else:
self.ATL11_plot(self.D[this], this)
def ATL06_plot(self, D, this):
fig=plt.figure()
plt.scatter(D.x_atc, D.h_li, c=np.log10(D.r_eff), cmap='rainbow', vmin=np.log10(0.25), vmax=np.log10(4))
plt.colorbar()
plt.title(self.files[this])
print(self.files[this])
fig.canvas.draw()
plt.show()
def ATL11_plot(self, D, this):
print(self.files[this])
fig=plt.figure()
plt.subplot(211)
plt.scatter(D.corrected_h.ref_pt*20, D.corrected_h.h_corr[:,2], 4, D.corrected_h.h_corr_sigma[:,2]); plt.colorbar()
plt.subplot(212)
plt.plot(D.corrected_h.ref_pt*20, D.corrected_h.h_corr[:,2], 'k')
ref, xo, delta=D.get_xovers()
print(delta)
plt.scatter(ref.ref_pt*20, xo.h, 12, c=delta.h, linewidth=0); plt.colorbar()
plt.plot()
fig.canvas.draw()
plt.show()
def main():
#fig=plt.figure()
fig=None
parser=argparse.ArgumentParser("IndexPicker: class to make clickable maps of a geoIndex")
parser.add_argument("index_file", type=str)
parser.add_argument("data_type", type=str)
parser.add_argument("--map_file",'-m', type=str)
args=parser.parse_args()
IndexPicker(fig, args.index_file, map_file=args.map_file, datatype=args.data_type)
#plt.show(block=True)
if __name__=='__main__':
main()
| [
"numpy.log10",
"numpy.array",
"numpy.isfinite",
"numpy.nanmin",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"numpy.diff",
"numpy.nanmax",
"matplotlib.pyplot.scatter",
"numpy.min",
"pointCollection.data",
"numpy.abs",
"matplotlib.pyplot.axes",
"pointCollection... | [((6794, 6881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""IndexPicker: class to make clickable maps of a geoIndex"""'], {}), "(\n 'IndexPicker: class to make clickable maps of a geoIndex')\n", (6817, 6881), False, 'import argparse\n'), ((1446, 1466), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1454, 1466), True, 'import matplotlib.pyplot as plt\n'), ((4663, 4675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4673, 4675), True, 'import matplotlib.pyplot as plt\n'), ((5531, 5541), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5539, 5541), True, 'import matplotlib.pyplot as plt\n'), ((5921, 5933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5931, 5933), True, 'import matplotlib.pyplot as plt\n'), ((6055, 6069), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6067, 6069), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6105), 'matplotlib.pyplot.title', 'plt.title', (['self.files[this]'], {}), '(self.files[this])\n', (6087, 6105), True, 'import matplotlib.pyplot as plt\n'), ((6172, 6182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6180, 6182), True, 'import matplotlib.pyplot as plt\n'), ((6264, 6276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6274, 6276), True, 'import matplotlib.pyplot as plt\n'), ((6285, 6301), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6296, 6301), True, 'import matplotlib.pyplot as plt\n'), ((6310, 6418), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(D.corrected_h.ref_pt * 20)', 'D.corrected_h.h_corr[:, 2]', '(4)', 'D.corrected_h.h_corr_sigma[:, 2]'], {}), '(D.corrected_h.ref_pt * 20, D.corrected_h.h_corr[:, 2], 4, D.\n corrected_h.h_corr_sigma[:, 2])\n', (6321, 6418), True, 'import matplotlib.pyplot as plt\n'), ((6411, 6425), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6423, 6425), True, 'import matplotlib.pyplot as plt\n'), ((6434, 6450), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (6445, 6450), True, 'import matplotlib.pyplot as plt\n'), ((6459, 6527), 'matplotlib.pyplot.plot', 'plt.plot', (['(D.corrected_h.ref_pt * 20)', 'D.corrected_h.h_corr[:, 2]', '"""k"""'], {}), "(D.corrected_h.ref_pt * 20, D.corrected_h.h_corr[:, 2], 'k')\n", (6467, 6527), True, 'import matplotlib.pyplot as plt\n'), ((6592, 6654), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(ref.ref_pt * 20)', 'xo.h', '(12)'], {'c': 'delta.h', 'linewidth': '(0)'}), '(ref.ref_pt * 20, xo.h, 12, c=delta.h, linewidth=0)\n', (6603, 6654), True, 'import matplotlib.pyplot as plt\n'), ((6654, 6668), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6666, 6668), True, 'import matplotlib.pyplot as plt\n'), ((6677, 6687), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (6685, 6687), True, 'import matplotlib.pyplot as plt\n'), ((6722, 6732), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6730, 6732), True, 'import matplotlib.pyplot as plt\n'), ((444, 458), 'pointCollection.grid.data', 'pc.grid.data', ([], {}), '()\n', (456, 458), True, 'import pointCollection as pc\n'), ((1120, 1132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1130, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1285, 1322), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xy[0]', 'self.xy[1]', '"""."""'], {}), "(self.xy[0], self.xy[1], '.')\n", (1293, 1322), True, 'import matplotlib.pyplot as plt\n'), ((4534, 4557), 'numpy.nanmin', 'np.nanmin', (['self.D_all.x'], {}), '(self.D_all.x)\n', (4543, 4557), True, 'import numpy as np\n'), ((4559, 4582), 'numpy.nanmax', 'np.nanmax', (['self.D_all.x'], {}), '(self.D_all.x)\n', (4568, 4582), True, 'import numpy as np\n'), ((4596, 4619), 'numpy.nanmin', 'np.nanmin', (['self.D_all.y'], {}), '(self.D_all.y)\n', (4605, 4619), True, 'import numpy as np\n'), ((4621, 4644), 'numpy.nanmax', 'np.nanmax', (['self.D_all.y'], {}), '(self.D_all.y)\n', (4630, 4644), True, 'import numpy as np\n'), ((4863, 4958), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.D_all.x', 'self.D_all.y', '(6)'], {'c': 'self.D_all.r_eff', 'linewidth': '(0)', 'vmax': '(1)', 'vmin': '(0)'}), '(self.D_all.x, self.D_all.y, 6, c=self.D_all.r_eff, linewidth=0,\n vmax=1, vmin=0)\n', (4874, 4958), True, 'import matplotlib.pyplot as plt\n'), ((5638, 5686), 'numpy.abs', 'np.abs', (['(self.D_all.x + 1.0j * self.D_all.y - xyp)'], {}), '(self.D_all.x + 1.0j * self.D_all.y - xyp)\n', (5644, 5686), True, 'import numpy as np\n'), ((615, 638), 'numpy.isfinite', 'np.isfinite', (['backgrnd.z'], {}), '(backgrnd.z)\n', (626, 638), True, 'import numpy as np\n'), ((730, 741), 'numpy.diff', 'np.diff', (['cr'], {}), '(cr)\n', (737, 741), True, 'import numpy as np\n'), ((754, 765), 'numpy.diff', 'np.diff', (['cr'], {}), '(cr)\n', (761, 765), True, 'import numpy as np\n'), ((3105, 3130), 'numpy.array', 'np.array', (['[-W / 2, W / 2]'], {}), '([-W / 2, W / 2])\n', (3113, 3130), True, 'import numpy as np\n'), ((3140, 3165), 'numpy.array', 'np.array', (['[-W / 2, W / 2]'], {}), '([-W / 2, W / 2])\n', (3148, 3165), True, 'import numpy as np\n'), ((5004, 5067), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.D_all.x', 'self.D_all.y', '(6)'], {'c': 'self.D_all.h_corr'}), '(self.D_all.x, self.D_all.y, 6, c=self.D_all.h_corr)\n', (5015, 5067), True, 'import matplotlib.pyplot as plt\n'), ((5069, 5083), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5081, 5083), True, 'import matplotlib.pyplot as plt\n'), ((5973, 5990), 'numpy.log10', 'np.log10', (['D.r_eff'], {}), '(D.r_eff)\n', (5981, 5990), True, 'import numpy as np\n'), ((6013, 6027), 'numpy.log10', 'np.log10', (['(0.25)'], {}), '(0.25)\n', (6021, 6027), True, 'import numpy as np\n'), ((6034, 6045), 'numpy.log10', 'np.log10', (['(4)'], {}), '(4)\n', (6042, 6045), True, 'import numpy as np\n'), ((655, 673), 'numpy.min', 'np.min', (['backgrnd.z'], {}), '(backgrnd.z)\n', (661, 673), True, 'import numpy as np\n'), ((2941, 2977), 'numpy.array', 'np.array', (['[event.xdata, event.ydata]'], {}), '([event.xdata, event.ydata])\n', (2949, 2977), True, 'import numpy as np\n'), ((3999, 4008), 'pointCollection.data', 'pc.data', ([], {}), '()\n', (4006, 4008), True, 'import pointCollection as pc\n'), ((885, 898), 'pointCollection.geoIndex', 'pc.geoIndex', ([], {}), '()\n', (896, 898), True, 'import pointCollection as pc\n'), ((3018, 3031), 'pointCollection.geoIndex', 'pc.geoIndex', ([], {}), '()\n', (3029, 3031), True, 'import pointCollection as pc\n'), ((3705, 3718), 'pointCollection.geoIndex', 'pc.geoIndex', ([], {}), '()\n', (3716, 3718), True, 'import pointCollection as pc\n'), ((4417, 4426), 'pointCollection.data', 'pc.data', ([], {}), '()\n', (4424, 4426), True, 'import pointCollection as pc\n'), ((4482, 4491), 'pointCollection.data', 'pc.data', ([], {}), '()\n', (4489, 4491), True, 'import pointCollection as pc\n'), ((5261, 5319), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.D_all.x', 'self.D_all.y', '(6)'], {'c': 'self.D_all.h'}), '(self.D_all.x, self.D_all.y, 6, c=self.D_all.h)\n', (5272, 5319), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5335), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5333, 5335), True, 'import matplotlib.pyplot as plt\n'), ((5352, 5385), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.7, 0.05, 0.25, 0.25]'], {}), '([0.7, 0.05, 0.25, 0.25])\n', (5360, 5385), True, 'import matplotlib.pyplot as plt\n'), ((3907, 3926), 'numpy.zeros_like', 'np.zeros_like', (['Di.x'], {}), '(Di.x)\n', (3920, 3926), True, 'import numpy as np\n'), ((5189, 5212), 'numpy.log10', 'np.log10', (['self.D_all.K0'], {}), '(self.D_all.K0)\n', (5197, 5212), True, 'import numpy as np\n'), ((4224, 4233), 'pointCollection.data', 'pc.data', ([], {}), '()\n', (4231, 4233), True, 'import pointCollection as pc\n'), ((4300, 4319), 'numpy.zeros_like', 'np.zeros_like', (['Di.x'], {}), '(Di.x)\n', (4313, 4319), True, 'import numpy as np\n')] |
import numpy as np
def laguerre_eval_zero(deg, pts):
return np.polynomial.laguerre.lagval(pts, np.append(np.zeros(deg), 1))
def laguerre_eval_one(deg, pts):
return np.polynomial.laguerre.lagval(pts, np.ones(deg + 1))
class RadialBasis:
def __init__(self, beta):
self.beta = float(beta)
def eval(self, k, r, weighted=True):
if k % 2 == 0:
B = laguerre_eval_zero(k / 2, r ** 2)
else:
B = np.sqrt(2.0) / np.sqrt(k + 1) * r * laguerre_eval_one((k - 1) / 2, r ** 2)
if weighted:
return B * np.exp(-r**2 / self.beta)
else:
return B
def eval_all(self, C, r, weighted=True):
result = np.zeros_like(r)
for k, c in enumerate(C):
result += np.real(c)*self.eval(k, r, weighted)
return result
def laguerre_approx(K, L, beta, a, q, f):
"""
Keyword Arguments:
K -- number of coefficients
L -- number of angular modes
beta -- basis exponent
a -- f = poly(f)*exp(-a r^2)
q -- quadrature rule, a functional of alpha, the exponent
f -- function
"""
C = np.zeros((K,L), dtype=np.complex)
# compute the total exponent
nu = 1-1./beta + a
quad = q(nu)
b = RadialBasis(beta)
for k in range(0, K, 2):
C[k,0] = 2*np.pi * sum(f(quad.pts) * b.eval(k, quad.pts, False) * quad.wts) / np.pi
# # compute approximation error in l2-norm
# || f - fapprox || = \int -2*f*fapprox + fapprox^2 + f^2 r dr
# ^ ^ ^
# exponents: nu12 nu11 nu22
# factor out the largest common exponent
gamma = min( min(a+1./beta, 2*a) , 2./beta )
nu12 = a + 1./beta - gamma
nu11 = 2./beta - gamma
nu22 = 2*a-gamma
quad = q(gamma)
pts = quad.pts
wts = quad.wts
fapprox = b.eval_all(C[:,0], pts, weighted=False)
fex = f(pts)
mu = np.exp(-pts*pts)
l2_err2 = np.sum((-2*fex*fapprox*mu**nu12 + fapprox**2*mu**nu11 + fex**2*mu**nu22)*wts)
l2_norm = np.sqrt(np.sum(fex**2*mu**nu22*wts))
return C, np.sqrt(np.abs(l2_err2))/l2_norm
class Basis:
def __init__(self, nK, nL, beta):
self.nK = nK
self.nL = nL
self.beta = beta
def q_to_l(self, k, q):
"""
Arguments:
- `k`:
- `q`:
"""
l = 2 * q if q < self.nL / 2 else 2 * (q - self.nL)
return l + (k % 2)
def l_to_q(self, k, l):
q = (l - (k % 2)) / 2
return q if q >= 0 else q + self.nL
def eval(self, l, k, r, phi, eval_weight=True):
"""
eval( l, k, r, phi)
"""
if k % 2 == 0:
B = laguerre_eval_zero(k / 2, r ** 2) * np.exp(1j * float(l) * phi)
if eval_weight == True:
B *= np.exp(-r ** 2 / self.beta)
return B
else:
B = np.sqrt(2.0) / np.sqrt(k + 1) * r * laguerre_eval_one(
(k - 1) / 2, r ** 2) * np.exp(1j * float(l) * phi)
if eval_weight == True:
B *= np.exp(-r ** 2 / self.beta)
return B
def eval_cart(self, coeffs, X, Y, eval_weight=True):
result = np.zeros(X.shape, dtype=complex)
R = np.sqrt(X * X + Y * Y)
PHI = np.arctan2(Y, X)
for q in range(self.nL):
for k in range(self.nK):
l = self.q_to_l(k, q)
result += self.eval(l, k, R, PHI, eval_weight) * coeffs[k, q]
return result
def eval_cartKL(self, K, L, coeffs, X, Y, eval_weight=True):
"""
Evalutes all coefficients K x L
Keyword Arguments:
K -- even integer
L -- even integer
coeffs -- !!! coeffs must have dimension self.K x self.L
X --
Y --
eval_weight -- (default True)
"""
assert coeffs.shape[0] == self.nK
assert coeffs.shape[1] == self.nL
result = np.zeros(X.shape, dtype=complex)
R = np.sqrt(X * X + Y * Y)
PHI = np.arctan2(Y, X)
for k in range(K):
for l in range(-L + (k % 2), L, 2):
q = self.l_to_q(k, l)
result += self.eval(l, k, R, PHI, eval_weight) * coeffs[k, q]
return result
def eval_KL(self, K, L, coeffs, R, PHI, eval_weight=True):
"""
Evalutes all coefficients K x L
Keyword Arguments:
K -- even integer
L -- even integer
coeffs -- !!! coeffs must have dimension self.K x self.L
X --
Y --
eval_weight -- (default True)
"""
assert coeffs.shape[0] == self.nK
assert coeffs.shape[1] == self.nL
result = np.zeros(R.shape, dtype=complex)
for k in range(K):
for l in range(-L + (k % 2), L, 2):
q = self.l_to_q(k, l)
result += self.eval(l, k, R, PHI, eval_weight) * coeffs[k, q]
return result
def eval_grid(self, coeffs, vmin, vmax, npts=100):
# vxx = np.linspace(vmin,vmax,100)
# U,V = np.meshgrid(vxx,vxx)
U, V = np.mgrid[vmin:vmax:1j * npts, vmin:vmax:1j * npts]
# for q in range(self.nL):
# for k in range(self.nK):
# l = self.q_to_l(k,q)
# result += self.eval(l,k, R, PHI)*coeffs[k,q]
return U, V, self.eval_cart(coeffs, U, V, eval_weight=True)
def eval_gridKL(self, K, L, coeffs, vmin, vmax, npts=100):
# vxx = np.linspace(vmin,vmax,100)
# U,V = np.meshgrid(vxx,vxx)
U, V = np.mgrid[vmin:vmax:1j * npts, vmin:vmax:1j * npts]
# for q in range(self.nL):
# for k in range(self.nK):
# l = self.q_to_l(k,q)
# result += self.eval(l,k, R, PHI)*coeffs[k,q]
return U, V, self.eval_cartKL(K, L, coeffs, U, V, eval_weight=True)
| [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.real",
"numpy.arctan2",
"numpy.zeros_like"
] | [((1146, 1180), 'numpy.zeros', 'np.zeros', (['(K, L)'], {'dtype': 'np.complex'}), '((K, L), dtype=np.complex)\n', (1154, 1180), True, 'import numpy as np\n'), ((1954, 1972), 'numpy.exp', 'np.exp', (['(-pts * pts)'], {}), '(-pts * pts)\n', (1960, 1972), True, 'import numpy as np\n'), ((1985, 2088), 'numpy.sum', 'np.sum', (['((-2 * fex * fapprox * mu ** nu12 + fapprox ** 2 * mu ** nu11 + fex ** 2 * \n mu ** nu22) * wts)'], {}), '((-2 * fex * fapprox * mu ** nu12 + fapprox ** 2 * mu ** nu11 + fex **\n 2 * mu ** nu22) * wts)\n', (1991, 2088), True, 'import numpy as np\n'), ((211, 227), 'numpy.ones', 'np.ones', (['(deg + 1)'], {}), '(deg + 1)\n', (218, 227), True, 'import numpy as np\n'), ((701, 717), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (714, 717), True, 'import numpy as np\n'), ((2085, 2120), 'numpy.sum', 'np.sum', (['(fex ** 2 * mu ** nu22 * wts)'], {}), '(fex ** 2 * mu ** nu22 * wts)\n', (2091, 2120), True, 'import numpy as np\n'), ((3226, 3258), 'numpy.zeros', 'np.zeros', (['X.shape'], {'dtype': 'complex'}), '(X.shape, dtype=complex)\n', (3234, 3258), True, 'import numpy as np\n'), ((3271, 3293), 'numpy.sqrt', 'np.sqrt', (['(X * X + Y * Y)'], {}), '(X * X + Y * Y)\n', (3278, 3293), True, 'import numpy as np\n'), ((3308, 3324), 'numpy.arctan2', 'np.arctan2', (['Y', 'X'], {}), '(Y, X)\n', (3318, 3324), True, 'import numpy as np\n'), ((4020, 4052), 'numpy.zeros', 'np.zeros', (['X.shape'], {'dtype': 'complex'}), '(X.shape, dtype=complex)\n', (4028, 4052), True, 'import numpy as np\n'), ((4065, 4087), 'numpy.sqrt', 'np.sqrt', (['(X * X + Y * Y)'], {}), '(X * X + Y * Y)\n', (4072, 4087), True, 'import numpy as np\n'), ((4102, 4118), 'numpy.arctan2', 'np.arctan2', (['Y', 'X'], {}), '(Y, X)\n', (4112, 4118), True, 'import numpy as np\n'), ((4817, 4849), 'numpy.zeros', 'np.zeros', (['R.shape'], {'dtype': 'complex'}), '(R.shape, dtype=complex)\n', (4825, 4849), True, 'import numpy as np\n'), ((111, 124), 'numpy.zeros', 'np.zeros', (['deg'], {}), '(deg)\n', (119, 124), True, 'import numpy as np\n'), ((577, 604), 'numpy.exp', 'np.exp', (['(-r ** 2 / self.beta)'], {}), '(-r ** 2 / self.beta)\n', (583, 604), True, 'import numpy as np\n'), ((774, 784), 'numpy.real', 'np.real', (['c'], {}), '(c)\n', (781, 784), True, 'import numpy as np\n'), ((2137, 2152), 'numpy.abs', 'np.abs', (['l2_err2'], {}), '(l2_err2)\n', (2143, 2152), True, 'import numpy as np\n'), ((2843, 2870), 'numpy.exp', 'np.exp', (['(-r ** 2 / self.beta)'], {}), '(-r ** 2 / self.beta)\n', (2849, 2870), True, 'import numpy as np\n'), ((3101, 3128), 'numpy.exp', 'np.exp', (['(-r ** 2 / self.beta)'], {}), '(-r ** 2 / self.beta)\n', (3107, 3128), True, 'import numpy as np\n'), ((458, 470), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (465, 470), True, 'import numpy as np\n'), ((473, 487), 'numpy.sqrt', 'np.sqrt', (['(k + 1)'], {}), '(k + 1)\n', (480, 487), True, 'import numpy as np\n'), ((2922, 2934), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (2929, 2934), True, 'import numpy as np\n'), ((2937, 2951), 'numpy.sqrt', 'np.sqrt', (['(k + 1)'], {}), '(k + 1)\n', (2944, 2951), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import click
import numpy as np
from htsohm import load_config_file, db
from htsohm.db import Material, VoidFraction
from htsohm.htsohm_run import load_restart_db
from sqlalchemy.orm import joinedload
@click.command()
@click.argument('config-path', type=click.Path())
@click.argument('database-path', type=click.Path())
@click.option('--generation', '-g', type=int, default=None)
@click.option('-o', '--output-path', type=click.Path(), default="-")
def num_materials_per_bin(config_path, database_path, generation=None, output_path="-"):
"""outputs materials per bin for cover visualization script in blender"""
config = load_config_file(config_path)
prop1range = config['prop1range']
prop2range = config['prop2range']
num_bins = config['number_of_convergence_bins']
VoidFraction.set_column_for_void_fraction(config['void_fraction_subtype'])
engine, session = db.init_database(config["database_connection_string"])
generation = 500
_, _, bin_counts, _, _, _ = load_restart_db(generation, num_bins, prop1range, prop2range, session)
with click.open_file(output_path, 'w') as f:
np.savetxt(f, bin_counts, "%d", delimiter=",")
if __name__ == '__main__':
num_materials_per_bin()
| [
"htsohm.db.init_database",
"htsohm.load_config_file",
"click.option",
"htsohm.db.VoidFraction.set_column_for_void_fraction",
"click.Path",
"numpy.savetxt",
"htsohm.htsohm_run.load_restart_db",
"click.command",
"click.open_file"
] | [((229, 244), 'click.command', 'click.command', ([], {}), '()\n', (242, 244), False, 'import click\n'), ((348, 406), 'click.option', 'click.option', (['"""--generation"""', '"""-g"""'], {'type': 'int', 'default': 'None'}), "('--generation', '-g', type=int, default=None)\n", (360, 406), False, 'import click\n'), ((657, 686), 'htsohm.load_config_file', 'load_config_file', (['config_path'], {}), '(config_path)\n', (673, 686), False, 'from htsohm import load_config_file, db\n'), ((819, 893), 'htsohm.db.VoidFraction.set_column_for_void_fraction', 'VoidFraction.set_column_for_void_fraction', (["config['void_fraction_subtype']"], {}), "(config['void_fraction_subtype'])\n", (860, 893), False, 'from htsohm.db import Material, VoidFraction\n'), ((917, 971), 'htsohm.db.init_database', 'db.init_database', (["config['database_connection_string']"], {}), "(config['database_connection_string'])\n", (933, 971), False, 'from htsohm import load_config_file, db\n'), ((1027, 1097), 'htsohm.htsohm_run.load_restart_db', 'load_restart_db', (['generation', 'num_bins', 'prop1range', 'prop2range', 'session'], {}), '(generation, num_bins, prop1range, prop2range, session)\n', (1042, 1097), False, 'from htsohm.htsohm_run import load_restart_db\n'), ((1108, 1141), 'click.open_file', 'click.open_file', (['output_path', '"""w"""'], {}), "(output_path, 'w')\n", (1123, 1141), False, 'import click\n'), ((1156, 1202), 'numpy.savetxt', 'np.savetxt', (['f', 'bin_counts', '"""%d"""'], {'delimiter': '""","""'}), "(f, bin_counts, '%d', delimiter=',')\n", (1166, 1202), True, 'import numpy as np\n'), ((281, 293), 'click.Path', 'click.Path', ([], {}), '()\n', (291, 293), False, 'import click\n'), ((333, 345), 'click.Path', 'click.Path', ([], {}), '()\n', (343, 345), False, 'import click\n'), ((449, 461), 'click.Path', 'click.Path', ([], {}), '()\n', (459, 461), False, 'import click\n')] |
#! /usr/bin/env python
import argparse, sys, os, errno
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)s [%(levelname)s] : %(message)s')
logger = logging.getLogger('report')
def summarize_cv(args):
import pandas as pd
columns = {}
colnames = {}
with h5py.File(os.path.join(args.input_dir, 'cv_split'), 'r') as f:
n_folds = len(f.keys())
cv_split = {}
for cv_fold in f.keys():
cv_split[int(cv_fold)] = {}
g = f[cv_fold]
for key in g.keys():
cv_split[int(cv_fold)][key] = g[key][:]
colnames['classification'] = []
if args.task == 'classification':
with h5py.File(os.path.join(args.input_dir, 'targets'), 'r') as f:
class_name = f['class_name'][:]
y = f['y'][:]
image_id_y = f['image_id'][:]
colnames['classification'] = ['train_size', 'test_size']
colnames['classification'] += ['class_size(%s)'%(class_name[i]) for i in range(len(class_name))]
columns['cv_fold'] = np.full(n_folds, -1, dtype='int32')
colnames['metric'] = []
for cv_fold in range(n_folds):
cv_dir = os.path.join(args.input_dir, 'cv', str(cv_fold))
columns['cv_fold'][cv_fold] = cv_fold
if not os.path.isdir(cv_dir):
continue
pred_file = os.path.join(cv_dir, 'predictions')
with h5py.File(pred_file, 'r') as f:
g = f['metrics']
# get column names
if len(colnames['metric']) == 0 :
colnames['metric'] = []
for metric in g.keys():
if len(g[metric].shape) == 0:
colnames['metric'].append(metric)
elif metric == 'accuracy_by_class':
colnames['metric'] += ['%s(%s)'%(metric, class_name[i]) for i in range(g[metric].shape[0])]
for metric in colnames['metric']:
columns[metric] = np.full(n_folds, np.nan, dtype='float64')
if args.task == 'classification':
for colname in ['train_size', 'test_size']:
columns[colname] = np.zeros(n_folds, dtype='int32')
for i in range(len(class_name)):
columns['class_size(%s)'%(class_name[i])] = np.zeros(n_folds, dtype='int32')
for metric in g.keys():
if len(g[metric].shape) == 0:
columns[metric][cv_fold] = g[metric][()]
elif metric == 'accuracy_by_class':
metric_vals = g[metric][:]
for i in range(g[metric].shape[0]):
columns['%s(%s)'%(metric, class_name[i])][cv_fold] = metric_vals[i]
if args.task == 'classification':
columns['train_size'][cv_fold] = cv_split[cv_fold]['train'].shape[0]
columns['test_size'][cv_fold] = cv_split[cv_fold]['test'].shape[0]
for i in range(len(class_name)):
y_test = y[array_lookup(image_id_y, cv_split[cv_fold]['test'])]
# one-hot coding for multi-class
if len(y_test.shape) > 1:
columns['class_size(%s)'%(class_name[i])][cv_fold] = np.sum(y_test[:, i])
# two-class
else:
columns['class_size(%s)' % (class_name[i])][cv_fold] = np.sum(y_test == i)
summary = pd.DataFrame(columns)
attribute_keys = []
if args.attribute is not None:
for a in args.attribute:
if '=' not in a:
raise ValueError('missing = in attribute: ' + a)
ind = a.index('=')
key = a[:ind].strip()
val = a[(ind + 1):].strip()
summary[key] = val
attribute_keys.append(key)
summary = summary[attribute_keys + ['cv_fold'] + colnames['classification'] + colnames['metric']]
logger.info('create output file: ' + args.output_file)
prepare_output_file(args.output_file)
summary.to_csv(args.output_file, sep='\t', quoting=False, index=False)
if __name__ == '__main__':
main_parser = argparse.ArgumentParser(description='Generate reports')
subparsers = main_parser.add_subparsers(dest='command')
# command: summarize_cv
parser = subparsers.add_parser('summarize_cv',
help='summarize parameters, metrics for cross-validation')
parser.add_argument('-i', '--input-dir', type=str, required=True,
help='cross-validation directory with directory structure like: cv/<cv_fold>/predictions')
parser.add_argument('-a', '--attribute', type=str, action='append',
help='key=value pairs to add to the columns')
parser.add_argument('-t', '--task', type=str, default='classification')
parser.add_argument('-o', '--output-file', type=str, required=True,
help='an output text report file')
args = main_parser.parse_args()
logger = logging.getLogger('report.' + args.command)
command_handlers = {
'summarize_cv': summarize_cv
}
import numpy as np
import h5py
from utils import prepare_output_file, array_lookup
command_handlers[args.command](args)
| [
"logging.basicConfig",
"logging.getLogger",
"argparse.ArgumentParser",
"utils.array_lookup",
"os.path.join",
"h5py.File",
"numpy.sum",
"utils.prepare_output_file",
"os.path.isdir",
"numpy.zeros",
"pandas.DataFrame",
"numpy.full"
] | [((70, 175), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(name)s [%(levelname)s] : %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(name)s [%(levelname)s] : %(message)s')\n", (89, 175), False, 'import logging\n'), ((180, 207), 'logging.getLogger', 'logging.getLogger', (['"""report"""'], {}), "('report')\n", (197, 207), False, 'import logging\n'), ((1063, 1098), 'numpy.full', 'np.full', (['n_folds', '(-1)'], {'dtype': '"""int32"""'}), "(n_folds, -1, dtype='int32')\n", (1070, 1098), True, 'import numpy as np\n'), ((3479, 3500), 'pandas.DataFrame', 'pd.DataFrame', (['columns'], {}), '(columns)\n', (3491, 3500), True, 'import pandas as pd\n'), ((4028, 4065), 'utils.prepare_output_file', 'prepare_output_file', (['args.output_file'], {}), '(args.output_file)\n', (4047, 4065), False, 'from utils import prepare_output_file, array_lookup\n'), ((4187, 4242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate reports"""'}), "(description='Generate reports')\n", (4210, 4242), False, 'import argparse, sys, os, errno\n'), ((5062, 5105), 'logging.getLogger', 'logging.getLogger', (["('report.' + args.command)"], {}), "('report.' + args.command)\n", (5079, 5105), False, 'import logging\n'), ((1353, 1388), 'os.path.join', 'os.path.join', (['cv_dir', '"""predictions"""'], {}), "(cv_dir, 'predictions')\n", (1365, 1388), False, 'import argparse, sys, os, errno\n'), ((311, 351), 'os.path.join', 'os.path.join', (['args.input_dir', '"""cv_split"""'], {}), "(args.input_dir, 'cv_split')\n", (323, 351), False, 'import argparse, sys, os, errno\n'), ((1289, 1310), 'os.path.isdir', 'os.path.isdir', (['cv_dir'], {}), '(cv_dir)\n', (1302, 1310), False, 'import argparse, sys, os, errno\n'), ((1402, 1427), 'h5py.File', 'h5py.File', (['pred_file', '"""r"""'], {}), "(pred_file, 'r')\n", (1411, 1427), False, 'import h5py\n'), ((704, 743), 'os.path.join', 'os.path.join', (['args.input_dir', '"""targets"""'], {}), "(args.input_dir, 'targets')\n", (716, 743), False, 'import argparse, sys, os, errno\n'), ((1988, 2029), 'numpy.full', 'np.full', (['n_folds', 'np.nan'], {'dtype': '"""float64"""'}), "(n_folds, np.nan, dtype='float64')\n", (1995, 2029), True, 'import numpy as np\n'), ((2187, 2219), 'numpy.zeros', 'np.zeros', (['n_folds'], {'dtype': '"""int32"""'}), "(n_folds, dtype='int32')\n", (2195, 2219), True, 'import numpy as np\n'), ((2341, 2373), 'numpy.zeros', 'np.zeros', (['n_folds'], {'dtype': '"""int32"""'}), "(n_folds, dtype='int32')\n", (2349, 2373), True, 'import numpy as np\n'), ((3058, 3109), 'utils.array_lookup', 'array_lookup', (['image_id_y', "cv_split[cv_fold]['test']"], {}), "(image_id_y, cv_split[cv_fold]['test'])\n", (3070, 3109), False, 'from utils import prepare_output_file, array_lookup\n'), ((3287, 3307), 'numpy.sum', 'np.sum', (['y_test[:, i]'], {}), '(y_test[:, i])\n', (3293, 3307), True, 'import numpy as np\n'), ((3445, 3464), 'numpy.sum', 'np.sum', (['(y_test == i)'], {}), '(y_test == i)\n', (3451, 3464), True, 'import numpy as np\n')] |
import numpy as np
# Weighted Voting Game Class
class wvg:
# Initializes WVG. Weights and Quota are optional and defaulted to [0],1 if not provided
def __init__(self, weights = None, quota = None):
self.weights = np.zeros((1), dtype=int) if weights is None else weights
self.num_players = len(self.weights)
self.quota = 1 if quota is None else quota
# Getters
def get_num_players(self):
return self.num_players
def get_weights(self):
return self.weights
def get_quota(self):
return self.quota
# Setters
def set_num_players(self, num_players):
self.num_players = num_players
self.weights = np.zeros((num_players))
def set_weights(self, weights):
if self.num_players != len(weights):
raise Exception("Weights array must be same size as num_players")
self.weights = weights
def set_quota(self, quota):
self.quota = quota
# Takes in subset of players (array of indices) and returns whether that is a winning coalition (1) or not (0)
def v(self, players):
total = 0
for i in players:
if i > self.num_players or i < 0:
raise IndexError("Subsets include too many players. Check n in brute_force_sv")
total += self.weights[i]
if total >= self.quota:
return 1
else:
return 0
| [
"numpy.zeros"
] | [((709, 730), 'numpy.zeros', 'np.zeros', (['num_players'], {}), '(num_players)\n', (717, 730), True, 'import numpy as np\n'), ((239, 261), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'int'}), '(1, dtype=int)\n', (247, 261), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from typing import List, Union
from sklearn.preprocessing import StandardScaler
from cytominer_eval.utils.transform_utils import set_pair_ids
from cytominer_eval.utils.availability_utils import (
check_compare_distribution_method,
check_replicate_summary_method,
)
def assign_replicates(
similarity_melted_df: pd.DataFrame,
replicate_groups: List[str],
) -> pd.DataFrame:
"""Determine which profiles should be considered replicates.
Given an elongated pairwise correlation matrix with metadata annotations, determine
how to assign replicate information.
Parameters
----------
similarity_melted_df : pandas.DataFrame
Long pandas DataFrame of annotated pairwise correlations output from
:py:func:`cytominer_eval.transform.transform.metric_melt`.
replicate_groups : list
a list of metadata column names in the original profile dataframe used to
indicate replicate profiles.
Returns
-------
pd.DataFrame
A similarity_melted_df but with added columns indicating whether or not the
pairwise similarity metric is comparing replicates or not. Used in most eval
operations.
"""
pair_ids = set_pair_ids()
replicate_col_names = {x: "{x}_replicate".format(x=x) for x in replicate_groups}
compare_dfs = []
for replicate_col in replicate_groups:
replicate_cols_with_suffix = [
"{col}{suf}".format(col=replicate_col, suf=pair_ids[x]["suffix"])
for x in pair_ids
]
assert all(
[x in similarity_melted_df.columns for x in replicate_cols_with_suffix]
), "replicate_group not found in melted dataframe columns"
replicate_col_name = replicate_col_names[replicate_col]
compare_df = similarity_melted_df.loc[:, replicate_cols_with_suffix]
compare_df.loc[:, replicate_col_name] = False
compare_df.loc[
np.where(compare_df.iloc[:, 0] == compare_df.iloc[:, 1])[0],
replicate_col_name,
] = True
compare_dfs.append(compare_df)
compare_df = pd.concat(compare_dfs, axis="columns").reset_index(drop=True)
compare_df = compare_df.assign(
group_replicate=compare_df.loc[:, replicate_col_names.values()].min(
axis="columns"
)
).loc[:, list(replicate_col_names.values()) + ["group_replicate"]]
similarity_melted_df = similarity_melted_df.merge(
compare_df, left_index=True, right_index=True
)
return similarity_melted_df
def compare_distributions(
target_distrib: List[float],
control_distrib: List[float],
method: str = "zscore",
replicate_summary_method: str = "mean",
) -> float:
"""Compare two distributions and output a single score indicating the difference.
Given two different vectors of distributions and a comparison method, determine how
the two distributions are different.
Parameters
----------
target_distrib : np.array
A list-like (e.g. numpy.array) of floats representing the first distribution.
Must be of shape (n_samples, 1).
control_distrib : np.array
A list-like (e.g. numpy.array) of floats representing the second distribution.
Must be of shape (n_samples, 1).
method : str, optional
A string indicating how to compare the two distributions. Defaults to "zscore".
replicate_summary_method : str, optional
A string indicating how to summarize the resulting scores, if applicable. Only
in use when method="zscore".
Returns
-------
float
A single value comparing the two distributions
"""
# Confirm that we support the provided methods
check_compare_distribution_method(method)
check_replicate_summary_method(replicate_summary_method)
if method == "zscore":
scaler = StandardScaler()
scaler.fit(control_distrib)
scores = scaler.transform(target_distrib)
if replicate_summary_method == "mean":
scores = np.mean(scores)
elif replicate_summary_method == "median":
scores = np.median(scores)
return scores
| [
"cytominer_eval.utils.transform_utils.set_pair_ids",
"numpy.mean",
"numpy.median",
"numpy.where",
"cytominer_eval.utils.availability_utils.check_replicate_summary_method",
"cytominer_eval.utils.availability_utils.check_compare_distribution_method",
"sklearn.preprocessing.StandardScaler",
"pandas.conca... | [((1246, 1260), 'cytominer_eval.utils.transform_utils.set_pair_ids', 'set_pair_ids', ([], {}), '()\n', (1258, 1260), False, 'from cytominer_eval.utils.transform_utils import set_pair_ids\n'), ((3752, 3793), 'cytominer_eval.utils.availability_utils.check_compare_distribution_method', 'check_compare_distribution_method', (['method'], {}), '(method)\n', (3785, 3793), False, 'from cytominer_eval.utils.availability_utils import check_compare_distribution_method, check_replicate_summary_method\n'), ((3798, 3854), 'cytominer_eval.utils.availability_utils.check_replicate_summary_method', 'check_replicate_summary_method', (['replicate_summary_method'], {}), '(replicate_summary_method)\n', (3828, 3854), False, 'from cytominer_eval.utils.availability_utils import check_compare_distribution_method, check_replicate_summary_method\n'), ((3900, 3916), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3914, 3916), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2141, 2179), 'pandas.concat', 'pd.concat', (['compare_dfs'], {'axis': '"""columns"""'}), "(compare_dfs, axis='columns')\n", (2150, 2179), True, 'import pandas as pd\n'), ((4072, 4087), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (4079, 4087), True, 'import numpy as np\n'), ((4160, 4177), 'numpy.median', 'np.median', (['scores'], {}), '(scores)\n', (4169, 4177), True, 'import numpy as np\n'), ((1974, 2030), 'numpy.where', 'np.where', (['(compare_df.iloc[:, 0] == compare_df.iloc[:, 1])'], {}), '(compare_df.iloc[:, 0] == compare_df.iloc[:, 1])\n', (1982, 2030), True, 'import numpy as np\n')] |
import numpy as np
import tvm
from tvm.contrib import graph_runtime
import topi.testing
import nnvm.symbol as sym
import nnvm.compiler
from nnvm.testing.config import ctx_list
from nnvm.testing.check_computation import check_function
def test_check_function():
# test the testing function
x = sym.Variable("x")
y = sym.Variable("y")
# different styles of returning gradients from the backward function
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: [head_grads, 2*head_grads],
shape={'x': (1, 2), y: (1, 2)}, dtype='float32')
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: (head_grads, 2*head_grads),
shape={'x': (1, 2), y: (1, 2)}, dtype='float32')
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: {'x': head_grads, 'y': 2*head_grads},
shape={'x': (1, 2), y: (1, 2)}, dtype='float32')
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: {'y': 2*head_grads},
shape={'x': (1, 2), y: (1, 2)}, dtype='float32')
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: [2*head_grads],
grad_input_vars=[y],
shape={'x': (1, 2), y: (1, 2)}, dtype='float32')
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: 2*head_grads,
grad_input_vars=[y],
shape={'x': (1, 2), y: (1, 2)}, dtype='float32')
check_function(x + 2*y, lambda x, y: x + 2*y,
lambda x, y, head_grads: 2*head_grads,
grad_input_vars=[y],
shape={'x': (1, 2), y: (1, 2)}, dtype='float64')
# test just numerical gradients
# different styles of shape and dtype passing
check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)},
numerical_grads=True)
check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, dtype='float32',
numerical_grads=True)
check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, dtype={x: 'float32', 'y': 'float32'},
numerical_grads=True)
check_function(x + 2*y, shape=(1, 2), dtype='float32',
numerical_grads=True)
# specifying variable attributes on variable creation
# (in this case type codes must be used)
x = sym.Variable("x", dtype=0, shape=(1, 2))
check_function(x + 2*y, shape={y: (1, 2)}, dtype={'y': 'float32'}, numerical_grads=True)
y = sym.Variable("y", dtype=0, shape=(1, 2))
# shape overriding
def _fwd1(x, y):
assert x.shape == (1, 1)
assert y.shape == (1, 2)
return x + 2*y
check_function(x + 2*y, _fwd1, shape={x: (1, 1)})
# in_range
def _fwd2(x, y):
assert x.shape == (100,)
assert (x <= 0.9).all()
assert (x >= 0.8).all()
return x + 2*y
check_function(x + 2*y, _fwd2, shape=(100,), in_range=(0.8, 0.9), numerical_grads=False)
check_function(x + 2*y, _fwd2, shape=(100,), in_range={'x': (0.8, 0.9)}, numerical_grads=False)
check_function(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0],
in_range={'head_grads_0': (1.0, 1.0)})
# explicit passing of values
check_function(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0],
values={'head_grads_0': np.full((1, 2), 1.0)})
# check that the function reports errors
def _check_function_must_fail(*args, **kwargs):
error = AssertionError
if 'error' in kwargs:
error = kwargs['error']
del kwargs['error']
try:
check_function(*args, quiet=True, **kwargs)
except error:
pass
else:
raise AssertionError("check_function didn't raise an exception")
_check_function_must_fail(x + 2*y, error=ValueError)
_check_function_must_fail(x + 2*y, lambda x, y: x + y)
_check_function_must_fail(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0])
_check_function_must_fail(sym.block_grad(x + 2*y), numerical_grads=True)
_check_function_must_fail(x*x, numerical_grads=True,
numerical_grads_params={'atol': 0.0, 'rtol': 0.0})
_check_function_must_fail(sym.log(-x*x), numerical_grads=True, error=ValueError)
# different styles of returning results from the forward function
check_function(x + 2*y, lambda x, y: [x + 2*y], numerical_grads=False)
_check_function_must_fail(x + 2*y, lambda x, y: [x + 2*y, x], numerical_grads=False,
error=ValueError)
_check_function_must_fail(x + 2*y, lambda x, y: [], numerical_grads=False,
error=ValueError)
# multiple outputs
z = sym.Group([2*x + y, x + 2*y])
check_function(z, lambda x, y: [2*x + y, x + 2*y])
check_function(z, lambda x, y: (2*x + y, x + 2*y))
check_function(z, backward=lambda x, y, head_grads: [2*head_grads[0] + head_grads[1],
head_grads[0] + 2*head_grads[1]])
_check_function_must_fail(z, backward=lambda x, y, head_grads: [2*head_grads[0],
2*head_grads[1]])
check_function(z, backward=lambda x, y, head_grads: [head_grads[1], 2*head_grads[1]],
in_range={'head_grads_0': (0, 0)})
check_function(z, numerical_grads=True)
z = sym.Group([sym.block_grad(2*x + y), x + 2*y])
check_function(z, lambda x, y: [2*x + y, x + 2*y], numerical_grads=False)
_check_function_must_fail(z, lambda x, y: [2*x + y, x + 2*y])
_check_function_must_fail(z, numerical_grads=True)
z = sym.Group([2*x + y, sym.block_grad(x + 2*y)])
_check_function_must_fail(z, numerical_grads=True)
z = sym.Group([2*x + y, x + 2*y, x, y, sym.sum(x)])
check_function(z, lambda x, y: [2*x + y, x + 2*y, x, y, np.sum(x)])
# passing additional parameters to forward and backward
def _fwd3(x, p):
assert p == 'v'
return x + 1
def _bwd3(x, p, head_grads):
assert p == 'v'
return head_grads
check_function(x + 1, _fwd3, _bwd3, additional_params={'p': 'v'})
# implicitly created variables and shape/dtype inference for inputs
x = sym.Variable("x", shape=(2, 3), dtype=0)
b = sym.Variable("b")
y = sym.dense(data=x, bias=b, units=4)
# Don't check gradients on cuda because is doesn't yet support ewise after reduce
check_function(y, exclude_targets={'cuda'}, numerical_grads=True)
check_function(y, shape={'x': (3, 4)}, exclude_targets={'cuda'}, numerical_grads=True)
check_function(y, dtype={'x': 'float64'}, exclude_targets={'cuda'}, numerical_grads=True)
x = sym.Variable("x")
b = sym.Variable("b")
w = sym.Variable("w")
y = sym.dense(data=x, bias=b, weight=w, units=4)
def _fwd_dense(x, w, b):
return np.dot(x, w.T) + b
check_function(y, _fwd_dense, shape={'x': (1,2)}, dtype={'x': 'float32'}, numerical_grads=False)
check_function(y, _fwd_dense, shape={'x': (1,2)}, dtype={'w': 'float64'}, numerical_grads=False)
_check_function_must_fail(y, _fwd_dense, shape={'x': (1,2)},
dtype={'w': 'float64', 'b': 'float32'},
numerical_grads=False,
error=nnvm._base.NNVMError)
# fails because no shape
_check_function_must_fail(y, _fwd_dense, numerical_grads=False, error=ValueError)
# ok because type is float32 by default
check_function(y, _fwd_dense, shape={'x': (1,2)}, numerical_grads=False)
def test_relu():
x = sym.Variable("x")
y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)
def forward(x):
x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
return (x > 0) * x
def backward(head_grads, x):
sub = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
return [(sub > 0).astype("float") * \
((x > 0).astype("float") + 0.3 * (x < 0).astype("float")) * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, shape=shape)
def test_prelu_nchw():
x = sym.Variable("x")
a = sym.Variable("a")
y = sym.prelu(data=x, alpha=a)
def forward(x, a):
return (x < 0) * (x * a.reshape(3, 1, 1)) + (x>=0) * x
shape = {'x': (1, 3, 32, 32), 'a': (3,)}
check_function(y, forward, shape=shape)
def test_prelu_nhwc():
x = sym.Variable("x")
a = sym.Variable("a")
y = sym.prelu(data=x, alpha=a, axis=3)
def forward(x, a):
return (x < 0) * (x * a.reshape(1, 1, 3)) + (x>=0) * x
shape = {'x': (1, 32, 32, 3), 'a': (3,)}
check_function(y, forward, shape=shape)
def test_sym_scalar_pow():
scalar = 3
x = sym.Variable("x")
y = x**scalar
def forward(x):
return x**scalar
def backward(head_grads, x):
return [scalar * x**(scalar - 1) * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, shape=shape)
def test_scalar_sym_pow():
scalar = 3
x = sym.Variable("x")
y = scalar**x
def forward(x):
return scalar**x
def backward(head_grads, x):
return [np.log(scalar) * scalar**x * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, shape=shape)
def test_exp():
x = sym.Variable("x")
y = sym.exp(x)
def forward(x):
return np.exp(x)
def backward(head_grads, x):
return [np.exp(x) * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, shape=shape)
def test_log():
x = sym.Variable("x")
y = sym.log(x)
def forward(x):
return np.log(x)
def backward(head_grads, x):
return [1. / x * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, in_range=(0.002, 2.0), shape=shape)
def test_tanh():
x = sym.Variable("x")
y = sym.tanh(x)
def forward(x):
return np.sinh(x) / np.cosh(x)
def backward(head_grads, x):
y_np = forward(x)
return [(1 - y_np**2) * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, shape=shape)
def test_sigmoid():
x = sym.Variable("x")
y = sym.sigmoid(x)
def forward(x):
return 1.0 / (1.0 + np.exp(-x))
def backward(head_grads, x):
y_np = forward(x)
return [y_np *(1 - y_np) * head_grads]
shape = {'x': (1, 3, 32, 32)}
check_function(y, forward, backward, shape=shape)
def test_softmax():
x = sym.Variable("x")
y = sym.softmax(x)
def forward(x):
return topi.testing.softmax_python(x)
def backward(head_grads, x):
y = topi.testing.softmax_python(x)
grad = y * (head_grads - np.sum(y * head_grads, axis=1, keepdims=True))
return [grad]
check_function(y, forward, backward,
shape={'x': (10, 1000)}, numerical_grads=False)
check_function(y, forward, backward,
shape={'x': (2, 10)})
def test_log_softmax():
x = sym.Variable("x")
y = sym.log_softmax(x)
def forward(x):
return topi.testing.log_softmax_python(x)
def backward(head_grads, x):
y = topi.testing.log_softmax_python(x)
grad = head_grads - np.exp(y) * np.sum(head_grads, axis=1, keepdims=True)
return [grad]
check_function(y, forward, backward,
shape={'x': (10, 1000)}, numerical_grads=False)
check_function(y, forward, backward,
shape={'x': (2, 10)})
def test_dense():
x = sym.Variable("x", shape=(10, 100))
w = sym.Variable("dense_weight", shape=(3, 100))
b = sym.Variable("dense_bias", shape=(3,))
y = sym.dense(x, w, b, use_bias=True, units=3, name="dense")
y = sym.flatten(y)
def forward(x, dense_weight, dense_bias):
return np.dot(x, dense_weight.T) + dense_bias
shape = {
'x': (10, 100),
'w': (3, 100),
'b': (3,)
}
# Don't check gradients on cuda because is doesn't yet support ewise after reduce
check_function(y, forward, shape=shape,
exclude_targets={'cuda'}, numerical_grads=True)
check_function(y, forward, shape=shape,
only_targets={'cuda'}, numerical_grads=False)
def test_batchnorm():
x = sym.Variable("x")
beta = sym.Variable("beta")
gamma = sym.Variable("gamma")
moving_var = sym.Variable("moving_var")
moving_mean = sym.Variable("moving_mean")
eps = 1e-5
y = sym.batch_norm(
x, gamma, beta, moving_mean, moving_var, epsilon=eps)
def forward(x, gamma, beta, moving_mean, moving_var):
return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta
shape = {
'x': (10, 20),
'gamma': (20,),
'beta': (20,),
'moving_mean': (20,),
'moving_var': (20,)
}
check_function(y, forward, in_range=(0.001, 1.0), shape=shape)
def verify_concatenate(ishape, axis):
x = [sym.Variable("x%d" % i, shape=ishape[i]) for i in range(len(ishape))]
y = sym.concatenate(*x, axis=axis) + 1
def forward(**kwargs):
return np.concatenate(list(kwargs.values()), axis=axis) + 1
check_function(y, forward)
def test_concatenate():
verify_concatenate([(2, 3, 4), (1, 3, 4)], axis=0)
verify_concatenate([(2, 4), (2, 7)], axis=1)
def verify_split(ishape, indices_or_sections, axis):
x = sym.Variable("x", shape=ishape)
y = sym.split(x, indices_or_sections=indices_or_sections, axis=axis)
def forward(x):
return np.split(x, indices_or_sections, axis=axis)
check_function(y, forward)
def test_split():
verify_split((2, 3), 2, axis=0)
verify_split((5, 3), [3], axis=0)
verify_split((5, 9, 3), [3, 4], axis=1)
def verify_strided_slice(ishape, begin, end, strideinp=None):
stride = strideinp if strideinp else [1, 1, 1]
x = sym.Variable("x", shape=ishape)
if strideinp:
y = sym.strided_slice(x, begin = begin, end = end, stride = stride) + 1
else:
y = sym.strided_slice(x, begin = begin, end = end) + 1
for i in range(len(begin), 3):
begin.append(0)
for i in range(len(end), 3):
end.append(ishape[i])
def test_forward(x):
return x[begin[0]:end[0]:stride[0],
begin[1]:end[1]:stride[1], begin[2]:end[2]:stride[2]] + 1
check_function(y, test_forward)
def test_strided_slice():
verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 1000, 3])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4])
verify_strided_slice((3, 4, 3), [1, 1], [4, 4, 3])
def verify_take(src_shape, indices_src, axis=None):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
a = sym.Variable("a", shape=src_shape)
indices = sym.Variable("indices", shape=indices_src.shape)
y = sym.take(a, indices, axis=axis)
def forward(a, indices):
return np.take(a, indices=indices, axis=axis)
a_src = np.arange(np.prod(src_shape), dtype=src_dtype).reshape(src_shape)
check_function(y, forward,
dtype={'a': src_dtype, 'indices': indices_dtype},
values={'a': a_src, 'indices': indices_src})
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
def verify_squeeze(shape, axis):
x = sym.Variable("x")
if axis is not None:
y = sym.squeeze(x, axis=axis)
else:
y = sym.squeeze(x)
y = y + 1
def forward(x):
return np.squeeze(x, axis=axis) + 1
def backward(head_grads, x):
return [np.reshape(head_grads, x.shape)]
check_function(y, forward, backward, shape=shape)
def test_squeeze():
verify_squeeze((1, 3, 2, 5), None)
verify_squeeze((1, 3, 1), axis=0)
verify_squeeze((1, 3, 2, 5, 1), axis=-1)
def test_pad():
x = sym.Variable("x")
y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.)
def forward(x):
return np.pad(x,
pad_width=((0, 0), (0, 0), (0, 1), (2, 3)),
mode='constant', constant_values=1.)
shape = {'x': (1, 3, 28, 28)}
check_function(y, forward, shape=shape)
def verify_lrn(ishape, size, axis, bias, alpha, beta):
x = sym.Variable("x", shape=ishape)
y = sym.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
def forward1(x):
return topi.testing.lrn_python(x, size, axis, bias, alpha, beta)
check_function(y, forward1)
def forward2(x):
y = forward1(x)
return (y > 0)*y
#Checking LRN op followed by elementwise op relu
check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})
def verify_l2_normalize(ishape, eps, axis):
x = sym.Variable("x", shape=ishape)
y = sym.l2_normalize(x, eps=eps, axis=axis)
def forward1(x):
return topi.testing.l2_normalize_python(x, eps, axis)
check_function(y, forward1)
def forward2(x):
y = forward1(x)
return (y > 0)*y
#Checking L2 normalization op followed by elementwise op relu
check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})
def test_lrn():
verify_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 20, 20), 3, 1, 2.0, 1.0, 0.75)
def test_l2_normalize():
verify_l2_normalize((1, 3, 20, 20), 0.001, (1,))
verify_l2_normalize((1, 3, 20, 20), 0.001, (1, 2))
def verify_gather_nd(src_shape, indices_src):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
a = sym.Variable("a", shape=src_shape)
indices = sym.Variable("indices", shape=indices_src.shape)
y = sym.gather_nd(a, indices)
def forward(a, indices):
return topi.testing.gather_nd_python(a, indices)
a_src = np.arange(np.prod(src_shape), dtype=src_dtype).reshape(src_shape)
check_function(y, forward,
dtype={'a': src_dtype, 'indices': indices_dtype},
values={'a': a_src, 'indices': indices_src})
def test_gather_nd():
verify_gather_nd((4,), [[1]])
verify_gather_nd((4,), [[1, 3, 2]])
verify_gather_nd((2, 3), [[1]])
verify_gather_nd((2, 3), [[1], [0]])
verify_gather_nd((2, 3), [[1, 0], [0, 2]])
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2]])
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2], [3, 1]])
verify_gather_nd((2, 3, 4), [[[1, 0], [0, 1]], [[0, 2], [1, 2]],
[[3, 1], [0, 2]]])
verify_gather_nd((2, 3, 4, 5), [[1, 0], [0, 2]])
verify_gather_nd((2, 3, 4, 5), [[1, 0], [2, 1], [3, 2], [4, 2]])
if __name__ == "__main__":
test_check_function()
test_split()
test_concatenate()
test_log_softmax()
test_batchnorm()
test_dense()
test_relu()
test_prelu_nchw()
test_prelu_nhwc()
test_sym_scalar_pow()
test_scalar_sym_pow()
test_exp()
test_log()
test_tanh()
test_sigmoid()
test_softmax()
test_squeeze()
test_pad()
test_take()
test_lrn()
test_l2_normalize()
test_strided_slice()
test_gather_nd()
| [
"numpy.prod",
"nnvm.symbol.gather_nd",
"numpy.sqrt",
"nnvm.symbol.l2_normalize",
"nnvm.symbol.strided_slice",
"nnvm.symbol.batch_norm",
"numpy.log",
"nnvm.symbol.concatenate",
"numpy.sinh",
"numpy.array",
"nnvm.symbol.leaky_relu",
"nnvm.symbol.pad",
"nnvm.symbol.block_grad",
"numpy.reshape... | [((303, 320), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (315, 320), True, 'import nnvm.symbol as sym\n'), ((329, 346), 'nnvm.symbol.Variable', 'sym.Variable', (['"""y"""'], {}), "('y')\n", (341, 346), True, 'import nnvm.symbol as sym\n'), ((425, 587), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', '(lambda x, y, head_grads: [head_grads, 2 * head_grads])'], {'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads:\n [head_grads, 2 * head_grads], shape={'x': (1, 2), y: (1, 2)}, dtype=\n 'float32')\n", (439, 587), False, 'from nnvm.testing.check_computation import check_function\n'), ((615, 777), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', '(lambda x, y, head_grads: (head_grads, 2 * head_grads))'], {'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads:\n (head_grads, 2 * head_grads), shape={'x': (1, 2), y: (1, 2)}, dtype=\n 'float32')\n", (629, 777), False, 'from nnvm.testing.check_computation import check_function\n'), ((805, 976), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', "(lambda x, y, head_grads: {'x': head_grads, 'y': 2 * head_grads})"], {'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads:\n {'x': head_grads, 'y': 2 * head_grads}, shape={'x': (1, 2), y: (1, 2)},\n dtype='float32')\n", (819, 976), False, 'from nnvm.testing.check_computation import check_function\n'), ((1005, 1155), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', "(lambda x, y, head_grads: {'y': 2 * head_grads})"], {'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads:\n {'y': 2 * head_grads}, shape={'x': (1, 2), y: (1, 2)}, dtype='float32')\n", (1019, 1155), False, 'from nnvm.testing.check_computation import check_function\n'), ((1188, 1358), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', '(lambda x, y, head_grads: [2 * head_grads])'], {'grad_input_vars': '[y]', 'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads:\n [2 * head_grads], grad_input_vars=[y], shape={'x': (1, 2), y: (1, 2)},\n dtype='float32')\n", (1202, 1358), False, 'from nnvm.testing.check_computation import check_function\n'), ((1406, 1575), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', '(lambda x, y, head_grads: 2 * head_grads)'], {'grad_input_vars': '[y]', 'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: \n 2 * head_grads, grad_input_vars=[y], shape={'x': (1, 2), y: (1, 2)},\n dtype='float32')\n", (1420, 1575), False, 'from nnvm.testing.check_computation import check_function\n'), ((1622, 1791), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: x + 2 * y)', '(lambda x, y, head_grads: 2 * head_grads)'], {'grad_input_vars': '[y]', 'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float64"""'}), "(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: \n 2 * head_grads, grad_input_vars=[y], shape={'x': (1, 2), y: (1, 2)},\n dtype='float64')\n", (1636, 1791), False, 'from nnvm.testing.check_computation import check_function\n'), ((1925, 2004), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)'], {'shape': "{'x': (1, 2), y: (1, 2)}", 'numerical_grads': '(True)'}), "(x + 2 * y, shape={'x': (1, 2), y: (1, 2)}, numerical_grads=True)\n", (1939, 2004), False, 'from nnvm.testing.check_computation import check_function\n'), ((2026, 2126), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)'], {'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': '"""float32"""', 'numerical_grads': '(True)'}), "(x + 2 * y, shape={'x': (1, 2), y: (1, 2)}, dtype='float32',\n numerical_grads=True)\n", (2040, 2126), False, 'from nnvm.testing.check_computation import check_function\n'), ((2144, 2265), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)'], {'shape': "{'x': (1, 2), y: (1, 2)}", 'dtype': "{x: 'float32', 'y': 'float32'}", 'numerical_grads': '(True)'}), "(x + 2 * y, shape={'x': (1, 2), y: (1, 2)}, dtype={x:\n 'float32', 'y': 'float32'}, numerical_grads=True)\n", (2158, 2265), False, 'from nnvm.testing.check_computation import check_function\n'), ((2283, 2361), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)'], {'shape': '(1, 2)', 'dtype': '"""float32"""', 'numerical_grads': '(True)'}), "(x + 2 * y, shape=(1, 2), dtype='float32', numerical_grads=True)\n", (2297, 2361), False, 'from nnvm.testing.check_computation import check_function\n'), ((2491, 2531), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'dtype': '(0)', 'shape': '(1, 2)'}), "('x', dtype=0, shape=(1, 2))\n", (2503, 2531), True, 'import nnvm.symbol as sym\n'), ((2536, 2630), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)'], {'shape': '{y: (1, 2)}', 'dtype': "{'y': 'float32'}", 'numerical_grads': '(True)'}), "(x + 2 * y, shape={y: (1, 2)}, dtype={'y': 'float32'},\n numerical_grads=True)\n", (2550, 2630), False, 'from nnvm.testing.check_computation import check_function\n'), ((2633, 2673), 'nnvm.symbol.Variable', 'sym.Variable', (['"""y"""'], {'dtype': '(0)', 'shape': '(1, 2)'}), "('y', dtype=0, shape=(1, 2))\n", (2645, 2673), True, 'import nnvm.symbol as sym\n'), ((2812, 2863), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '_fwd1'], {'shape': '{x: (1, 1)}'}), '(x + 2 * y, _fwd1, shape={x: (1, 1)})\n', (2826, 2863), False, 'from nnvm.testing.check_computation import check_function\n'), ((3023, 3117), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '_fwd2'], {'shape': '(100,)', 'in_range': '(0.8, 0.9)', 'numerical_grads': '(False)'}), '(x + 2 * y, _fwd2, shape=(100,), in_range=(0.8, 0.9),\n numerical_grads=False)\n', (3037, 3117), False, 'from nnvm.testing.check_computation import check_function\n'), ((3116, 3217), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '_fwd2'], {'shape': '(100,)', 'in_range': "{'x': (0.8, 0.9)}", 'numerical_grads': '(False)'}), "(x + 2 * y, _fwd2, shape=(100,), in_range={'x': (0.8, 0.9)},\n numerical_grads=False)\n", (3130, 3217), False, 'from nnvm.testing.check_computation import check_function\n'), ((3216, 3330), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)'], {'backward': '(lambda x, y, head_grads: [1.0, 2.0])', 'in_range': "{'head_grads_0': (1.0, 1.0)}"}), "(x + 2 * y, backward=lambda x, y, head_grads: [1.0, 2.0],\n in_range={'head_grads_0': (1.0, 1.0)})\n", (3230, 3330), False, 'from nnvm.testing.check_computation import check_function\n'), ((4520, 4594), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 2 * y)', '(lambda x, y: [x + 2 * y])'], {'numerical_grads': '(False)'}), '(x + 2 * y, lambda x, y: [x + 2 * y], numerical_grads=False)\n', (4534, 4594), False, 'from nnvm.testing.check_computation import check_function\n'), ((4887, 4920), 'nnvm.symbol.Group', 'sym.Group', (['[2 * x + y, x + 2 * y]'], {}), '([2 * x + y, x + 2 * y])\n', (4896, 4920), True, 'import nnvm.symbol as sym\n'), ((4921, 4975), 'nnvm.testing.check_computation.check_function', 'check_function', (['z', '(lambda x, y: [2 * x + y, x + 2 * y])'], {}), '(z, lambda x, y: [2 * x + y, x + 2 * y])\n', (4935, 4975), False, 'from nnvm.testing.check_computation import check_function\n'), ((4976, 5030), 'nnvm.testing.check_computation.check_function', 'check_function', (['z', '(lambda x, y: (2 * x + y, x + 2 * y))'], {}), '(z, lambda x, y: (2 * x + y, x + 2 * y))\n', (4990, 5030), False, 'from nnvm.testing.check_computation import check_function\n'), ((5031, 5158), 'nnvm.testing.check_computation.check_function', 'check_function', (['z'], {'backward': '(lambda x, y, head_grads: [2 * head_grads[0] + head_grads[1], head_grads[0] +\n 2 * head_grads[1]])'}), '(z, backward=lambda x, y, head_grads: [2 * head_grads[0] +\n head_grads[1], head_grads[0] + 2 * head_grads[1]])\n', (5045, 5158), False, 'from nnvm.testing.check_computation import check_function\n'), ((5383, 5509), 'nnvm.testing.check_computation.check_function', 'check_function', (['z'], {'backward': '(lambda x, y, head_grads: [head_grads[1], 2 * head_grads[1]])', 'in_range': "{'head_grads_0': (0, 0)}"}), "(z, backward=lambda x, y, head_grads: [head_grads[1], 2 *\n head_grads[1]], in_range={'head_grads_0': (0, 0)})\n", (5397, 5509), False, 'from nnvm.testing.check_computation import check_function\n'), ((5527, 5566), 'nnvm.testing.check_computation.check_function', 'check_function', (['z'], {'numerical_grads': '(True)'}), '(z, numerical_grads=True)\n', (5541, 5566), False, 'from nnvm.testing.check_computation import check_function\n'), ((5626, 5703), 'nnvm.testing.check_computation.check_function', 'check_function', (['z', '(lambda x, y: [2 * x + y, x + 2 * y])'], {'numerical_grads': '(False)'}), '(z, lambda x, y: [2 * x + y, x + 2 * y], numerical_grads=False)\n', (5640, 5703), False, 'from nnvm.testing.check_computation import check_function\n'), ((6274, 6339), 'nnvm.testing.check_computation.check_function', 'check_function', (['(x + 1)', '_fwd3', '_bwd3'], {'additional_params': "{'p': 'v'}"}), "(x + 1, _fwd3, _bwd3, additional_params={'p': 'v'})\n", (6288, 6339), False, 'from nnvm.testing.check_computation import check_function\n'), ((6421, 6461), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'shape': '(2, 3)', 'dtype': '(0)'}), "('x', shape=(2, 3), dtype=0)\n", (6433, 6461), True, 'import nnvm.symbol as sym\n'), ((6470, 6487), 'nnvm.symbol.Variable', 'sym.Variable', (['"""b"""'], {}), "('b')\n", (6482, 6487), True, 'import nnvm.symbol as sym\n'), ((6496, 6530), 'nnvm.symbol.dense', 'sym.dense', ([], {'data': 'x', 'bias': 'b', 'units': '(4)'}), '(data=x, bias=b, units=4)\n', (6505, 6530), True, 'import nnvm.symbol as sym\n'), ((6621, 6686), 'nnvm.testing.check_computation.check_function', 'check_function', (['y'], {'exclude_targets': "{'cuda'}", 'numerical_grads': '(True)'}), "(y, exclude_targets={'cuda'}, numerical_grads=True)\n", (6635, 6686), False, 'from nnvm.testing.check_computation import check_function\n'), ((6691, 6781), 'nnvm.testing.check_computation.check_function', 'check_function', (['y'], {'shape': "{'x': (3, 4)}", 'exclude_targets': "{'cuda'}", 'numerical_grads': '(True)'}), "(y, shape={'x': (3, 4)}, exclude_targets={'cuda'},\n numerical_grads=True)\n", (6705, 6781), False, 'from nnvm.testing.check_computation import check_function\n'), ((6782, 6875), 'nnvm.testing.check_computation.check_function', 'check_function', (['y'], {'dtype': "{'x': 'float64'}", 'exclude_targets': "{'cuda'}", 'numerical_grads': '(True)'}), "(y, dtype={'x': 'float64'}, exclude_targets={'cuda'},\n numerical_grads=True)\n", (6796, 6875), False, 'from nnvm.testing.check_computation import check_function\n'), ((6881, 6898), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (6893, 6898), True, 'import nnvm.symbol as sym\n'), ((6907, 6924), 'nnvm.symbol.Variable', 'sym.Variable', (['"""b"""'], {}), "('b')\n", (6919, 6924), True, 'import nnvm.symbol as sym\n'), ((6933, 6950), 'nnvm.symbol.Variable', 'sym.Variable', (['"""w"""'], {}), "('w')\n", (6945, 6950), True, 'import nnvm.symbol as sym\n'), ((6959, 7003), 'nnvm.symbol.dense', 'sym.dense', ([], {'data': 'x', 'bias': 'b', 'weight': 'w', 'units': '(4)'}), '(data=x, bias=b, weight=w, units=4)\n', (6968, 7003), True, 'import nnvm.symbol as sym\n'), ((7071, 7172), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', '_fwd_dense'], {'shape': "{'x': (1, 2)}", 'dtype': "{'x': 'float32'}", 'numerical_grads': '(False)'}), "(y, _fwd_dense, shape={'x': (1, 2)}, dtype={'x': 'float32'},\n numerical_grads=False)\n", (7085, 7172), False, 'from nnvm.testing.check_computation import check_function\n'), ((7172, 7273), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', '_fwd_dense'], {'shape': "{'x': (1, 2)}", 'dtype': "{'w': 'float64'}", 'numerical_grads': '(False)'}), "(y, _fwd_dense, shape={'x': (1, 2)}, dtype={'w': 'float64'},\n numerical_grads=False)\n", (7186, 7273), False, 'from nnvm.testing.check_computation import check_function\n'), ((7678, 7751), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', '_fwd_dense'], {'shape': "{'x': (1, 2)}", 'numerical_grads': '(False)'}), "(y, _fwd_dense, shape={'x': (1, 2)}, numerical_grads=False)\n", (7692, 7751), False, 'from nnvm.testing.check_computation import check_function\n'), ((7777, 7794), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (7789, 7794), True, 'import nnvm.symbol as sym\n'), ((8205, 8254), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (8219, 8254), False, 'from nnvm.testing.check_computation import check_function\n'), ((8287, 8304), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (8299, 8304), True, 'import nnvm.symbol as sym\n'), ((8313, 8330), 'nnvm.symbol.Variable', 'sym.Variable', (['"""a"""'], {}), "('a')\n", (8325, 8330), True, 'import nnvm.symbol as sym\n'), ((8339, 8365), 'nnvm.symbol.prelu', 'sym.prelu', ([], {'data': 'x', 'alpha': 'a'}), '(data=x, alpha=a)\n', (8348, 8365), True, 'import nnvm.symbol as sym\n'), ((8503, 8542), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'shape': 'shape'}), '(y, forward, shape=shape)\n', (8517, 8542), False, 'from nnvm.testing.check_computation import check_function\n'), ((8575, 8592), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (8587, 8592), True, 'import nnvm.symbol as sym\n'), ((8601, 8618), 'nnvm.symbol.Variable', 'sym.Variable', (['"""a"""'], {}), "('a')\n", (8613, 8618), True, 'import nnvm.symbol as sym\n'), ((8627, 8661), 'nnvm.symbol.prelu', 'sym.prelu', ([], {'data': 'x', 'alpha': 'a', 'axis': '(3)'}), '(data=x, alpha=a, axis=3)\n', (8636, 8661), True, 'import nnvm.symbol as sym\n'), ((8799, 8838), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'shape': 'shape'}), '(y, forward, shape=shape)\n', (8813, 8838), False, 'from nnvm.testing.check_computation import check_function\n'), ((8890, 8907), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (8902, 8907), True, 'import nnvm.symbol as sym\n'), ((9101, 9150), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (9115, 9150), False, 'from nnvm.testing.check_computation import check_function\n'), ((9203, 9220), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (9215, 9220), True, 'import nnvm.symbol as sym\n'), ((9415, 9464), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (9429, 9464), False, 'from nnvm.testing.check_computation import check_function\n'), ((9491, 9508), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (9503, 9508), True, 'import nnvm.symbol as sym\n'), ((9517, 9527), 'nnvm.symbol.exp', 'sym.exp', (['x'], {}), '(x)\n', (9524, 9527), True, 'import nnvm.symbol as sym\n'), ((9687, 9736), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (9701, 9736), False, 'from nnvm.testing.check_computation import check_function\n'), ((9763, 9780), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (9775, 9780), True, 'import nnvm.symbol as sym\n'), ((9789, 9799), 'nnvm.symbol.log', 'sym.log', (['x'], {}), '(x)\n', (9796, 9799), True, 'import nnvm.symbol as sym\n'), ((9956, 10028), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'in_range': '(0.002, 2.0)', 'shape': 'shape'}), '(y, forward, backward, in_range=(0.002, 2.0), shape=shape)\n', (9970, 10028), False, 'from nnvm.testing.check_computation import check_function\n'), ((10056, 10073), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (10068, 10073), True, 'import nnvm.symbol as sym\n'), ((10082, 10093), 'nnvm.symbol.tanh', 'sym.tanh', (['x'], {}), '(x)\n', (10090, 10093), True, 'import nnvm.symbol as sym\n'), ((10297, 10346), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (10311, 10346), False, 'from nnvm.testing.check_computation import check_function\n'), ((10377, 10394), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (10389, 10394), True, 'import nnvm.symbol as sym\n'), ((10403, 10417), 'nnvm.symbol.sigmoid', 'sym.sigmoid', (['x'], {}), '(x)\n', (10414, 10417), True, 'import nnvm.symbol as sym\n'), ((10625, 10674), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (10639, 10674), False, 'from nnvm.testing.check_computation import check_function\n'), ((10705, 10722), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (10717, 10722), True, 'import nnvm.symbol as sym\n'), ((10731, 10745), 'nnvm.symbol.softmax', 'sym.softmax', (['x'], {}), '(x)\n', (10742, 10745), True, 'import nnvm.symbol as sym\n'), ((10997, 11085), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': "{'x': (10, 1000)}", 'numerical_grads': '(False)'}), "(y, forward, backward, shape={'x': (10, 1000)},\n numerical_grads=False)\n", (11011, 11085), False, 'from nnvm.testing.check_computation import check_function\n'), ((11105, 11163), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': "{'x': (2, 10)}"}), "(y, forward, backward, shape={'x': (2, 10)})\n", (11119, 11163), False, 'from nnvm.testing.check_computation import check_function\n'), ((11217, 11234), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (11229, 11234), True, 'import nnvm.symbol as sym\n'), ((11243, 11261), 'nnvm.symbol.log_softmax', 'sym.log_softmax', (['x'], {}), '(x)\n', (11258, 11261), True, 'import nnvm.symbol as sym\n'), ((11523, 11611), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': "{'x': (10, 1000)}", 'numerical_grads': '(False)'}), "(y, forward, backward, shape={'x': (10, 1000)},\n numerical_grads=False)\n", (11537, 11611), False, 'from nnvm.testing.check_computation import check_function\n'), ((11631, 11689), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': "{'x': (2, 10)}"}), "(y, forward, backward, shape={'x': (2, 10)})\n", (11645, 11689), False, 'from nnvm.testing.check_computation import check_function\n'), ((11737, 11771), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'shape': '(10, 100)'}), "('x', shape=(10, 100))\n", (11749, 11771), True, 'import nnvm.symbol as sym\n'), ((11780, 11824), 'nnvm.symbol.Variable', 'sym.Variable', (['"""dense_weight"""'], {'shape': '(3, 100)'}), "('dense_weight', shape=(3, 100))\n", (11792, 11824), True, 'import nnvm.symbol as sym\n'), ((11833, 11871), 'nnvm.symbol.Variable', 'sym.Variable', (['"""dense_bias"""'], {'shape': '(3,)'}), "('dense_bias', shape=(3,))\n", (11845, 11871), True, 'import nnvm.symbol as sym\n'), ((11880, 11936), 'nnvm.symbol.dense', 'sym.dense', (['x', 'w', 'b'], {'use_bias': '(True)', 'units': '(3)', 'name': '"""dense"""'}), "(x, w, b, use_bias=True, units=3, name='dense')\n", (11889, 11936), True, 'import nnvm.symbol as sym\n'), ((11945, 11959), 'nnvm.symbol.flatten', 'sym.flatten', (['y'], {}), '(y)\n', (11956, 11959), True, 'import nnvm.symbol as sym\n'), ((12236, 12327), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'shape': 'shape', 'exclude_targets': "{'cuda'}", 'numerical_grads': '(True)'}), "(y, forward, shape=shape, exclude_targets={'cuda'},\n numerical_grads=True)\n", (12250, 12327), False, 'from nnvm.testing.check_computation import check_function\n'), ((12347, 12436), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'shape': 'shape', 'only_targets': "{'cuda'}", 'numerical_grads': '(False)'}), "(y, forward, shape=shape, only_targets={'cuda'},\n numerical_grads=False)\n", (12361, 12436), False, 'from nnvm.testing.check_computation import check_function\n'), ((12484, 12501), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (12496, 12501), True, 'import nnvm.symbol as sym\n'), ((12513, 12533), 'nnvm.symbol.Variable', 'sym.Variable', (['"""beta"""'], {}), "('beta')\n", (12525, 12533), True, 'import nnvm.symbol as sym\n'), ((12546, 12567), 'nnvm.symbol.Variable', 'sym.Variable', (['"""gamma"""'], {}), "('gamma')\n", (12558, 12567), True, 'import nnvm.symbol as sym\n'), ((12585, 12611), 'nnvm.symbol.Variable', 'sym.Variable', (['"""moving_var"""'], {}), "('moving_var')\n", (12597, 12611), True, 'import nnvm.symbol as sym\n'), ((12630, 12657), 'nnvm.symbol.Variable', 'sym.Variable', (['"""moving_mean"""'], {}), "('moving_mean')\n", (12642, 12657), True, 'import nnvm.symbol as sym\n'), ((12681, 12749), 'nnvm.symbol.batch_norm', 'sym.batch_norm', (['x', 'gamma', 'beta', 'moving_mean', 'moving_var'], {'epsilon': 'eps'}), '(x, gamma, beta, moving_mean, moving_var, epsilon=eps)\n', (12695, 12749), True, 'import nnvm.symbol as sym\n'), ((13048, 13110), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'in_range': '(0.001, 1.0)', 'shape': 'shape'}), '(y, forward, in_range=(0.001, 1.0), shape=shape)\n', (13062, 13110), False, 'from nnvm.testing.check_computation import check_function\n'), ((13374, 13400), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {}), '(y, forward)\n', (13388, 13400), False, 'from nnvm.testing.check_computation import check_function\n'), ((13594, 13625), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'shape': 'ishape'}), "('x', shape=ishape)\n", (13606, 13625), True, 'import nnvm.symbol as sym\n'), ((13634, 13698), 'nnvm.symbol.split', 'sym.split', (['x'], {'indices_or_sections': 'indices_or_sections', 'axis': 'axis'}), '(x, indices_or_sections=indices_or_sections, axis=axis)\n', (13643, 13698), True, 'import nnvm.symbol as sym\n'), ((13784, 13810), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {}), '(y, forward)\n', (13798, 13810), False, 'from nnvm.testing.check_computation import check_function\n'), ((14071, 14102), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'shape': 'ishape'}), "('x', shape=ishape)\n", (14083, 14102), True, 'import nnvm.symbol as sym\n'), ((14550, 14581), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'test_forward'], {}), '(y, test_forward)\n', (14564, 14581), False, 'from nnvm.testing.check_computation import check_function\n'), ((15316, 15358), 'numpy.array', 'np.array', (['indices_src'], {'dtype': 'indices_dtype'}), '(indices_src, dtype=indices_dtype)\n', (15324, 15358), True, 'import numpy as np\n'), ((15367, 15401), 'nnvm.symbol.Variable', 'sym.Variable', (['"""a"""'], {'shape': 'src_shape'}), "('a', shape=src_shape)\n", (15379, 15401), True, 'import nnvm.symbol as sym\n'), ((15416, 15464), 'nnvm.symbol.Variable', 'sym.Variable', (['"""indices"""'], {'shape': 'indices_src.shape'}), "('indices', shape=indices_src.shape)\n", (15428, 15464), True, 'import nnvm.symbol as sym\n'), ((15473, 15504), 'nnvm.symbol.take', 'sym.take', (['a', 'indices'], {'axis': 'axis'}), '(a, indices, axis=axis)\n', (15481, 15504), True, 'import nnvm.symbol as sym\n'), ((15673, 15798), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'dtype': "{'a': src_dtype, 'indices': indices_dtype}", 'values': "{'a': a_src, 'indices': indices_src}"}), "(y, forward, dtype={'a': src_dtype, 'indices': indices_dtype},\n values={'a': a_src, 'indices': indices_src})\n", (15687, 15798), False, 'from nnvm.testing.check_computation import check_function\n'), ((16189, 16206), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (16201, 16206), True, 'import nnvm.symbol as sym\n'), ((16474, 16523), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward', 'backward'], {'shape': 'shape'}), '(y, forward, backward, shape=shape)\n', (16488, 16523), False, 'from nnvm.testing.check_computation import check_function\n'), ((16694, 16711), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (16706, 16711), True, 'import nnvm.symbol as sym\n'), ((16720, 16789), 'nnvm.symbol.pad', 'sym.pad', (['x'], {'pad_width': '((0, 0), (0, 0), (0, 1), (2, 3))', 'pad_value': '(1.0)'}), '(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.0)\n', (16727, 16789), True, 'import nnvm.symbol as sym\n'), ((16999, 17038), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'shape': 'shape'}), '(y, forward, shape=shape)\n', (17013, 17038), False, 'from nnvm.testing.check_computation import check_function\n'), ((17103, 17134), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'shape': 'ishape'}), "('x', shape=ishape)\n", (17115, 17134), True, 'import nnvm.symbol as sym\n'), ((17143, 17210), 'nnvm.symbol.lrn', 'sym.lrn', (['x'], {'size': 'size', 'axis': 'axis', 'bias': 'bias', 'alpha': 'alpha', 'beta': 'beta'}), '(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)\n', (17150, 17210), True, 'import nnvm.symbol as sym\n'), ((17311, 17338), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward1'], {}), '(y, forward1)\n', (17325, 17338), False, 'from nnvm.testing.check_computation import check_function\n'), ((17590, 17621), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {'shape': 'ishape'}), "('x', shape=ishape)\n", (17602, 17621), True, 'import nnvm.symbol as sym\n'), ((17630, 17669), 'nnvm.symbol.l2_normalize', 'sym.l2_normalize', (['x'], {'eps': 'eps', 'axis': 'axis'}), '(x, eps=eps, axis=axis)\n', (17646, 17669), True, 'import nnvm.symbol as sym\n'), ((17759, 17786), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward1'], {}), '(y, forward1)\n', (17773, 17786), False, 'from nnvm.testing.check_computation import check_function\n'), ((18373, 18415), 'numpy.array', 'np.array', (['indices_src'], {'dtype': 'indices_dtype'}), '(indices_src, dtype=indices_dtype)\n', (18381, 18415), True, 'import numpy as np\n'), ((18424, 18458), 'nnvm.symbol.Variable', 'sym.Variable', (['"""a"""'], {'shape': 'src_shape'}), "('a', shape=src_shape)\n", (18436, 18458), True, 'import nnvm.symbol as sym\n'), ((18473, 18521), 'nnvm.symbol.Variable', 'sym.Variable', (['"""indices"""'], {'shape': 'indices_src.shape'}), "('indices', shape=indices_src.shape)\n", (18485, 18521), True, 'import nnvm.symbol as sym\n'), ((18530, 18555), 'nnvm.symbol.gather_nd', 'sym.gather_nd', (['a', 'indices'], {}), '(a, indices)\n', (18543, 18555), True, 'import nnvm.symbol as sym\n'), ((18727, 18852), 'nnvm.testing.check_computation.check_function', 'check_function', (['y', 'forward'], {'dtype': "{'a': src_dtype, 'indices': indices_dtype}", 'values': "{'a': a_src, 'indices': indices_src}"}), "(y, forward, dtype={'a': src_dtype, 'indices': indices_dtype},\n values={'a': a_src, 'indices': indices_src})\n", (18741, 18852), False, 'from nnvm.testing.check_computation import check_function\n'), ((4175, 4200), 'nnvm.symbol.block_grad', 'sym.block_grad', (['(x + 2 * y)'], {}), '(x + 2 * y)\n', (4189, 4200), True, 'import nnvm.symbol as sym\n'), ((4390, 4405), 'nnvm.symbol.log', 'sym.log', (['(-x * x)'], {}), '(-x * x)\n', (4397, 4405), True, 'import nnvm.symbol as sym\n'), ((9564, 9573), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (9570, 9573), True, 'import numpy as np\n'), ((9836, 9845), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (9842, 9845), True, 'import numpy as np\n'), ((13160, 13200), 'nnvm.symbol.Variable', 'sym.Variable', (["('x%d' % i)"], {'shape': 'ishape[i]'}), "('x%d' % i, shape=ishape[i])\n", (13172, 13200), True, 'import nnvm.symbol as sym\n'), ((13238, 13268), 'nnvm.symbol.concatenate', 'sym.concatenate', (['*x'], {'axis': 'axis'}), '(*x, axis=axis)\n', (13253, 13268), True, 'import nnvm.symbol as sym\n'), ((13735, 13778), 'numpy.split', 'np.split', (['x', 'indices_or_sections'], {'axis': 'axis'}), '(x, indices_or_sections, axis=axis)\n', (13743, 13778), True, 'import numpy as np\n'), ((15550, 15588), 'numpy.take', 'np.take', (['a'], {'indices': 'indices', 'axis': 'axis'}), '(a, indices=indices, axis=axis)\n', (15557, 15588), True, 'import numpy as np\n'), ((16244, 16269), 'nnvm.symbol.squeeze', 'sym.squeeze', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (16255, 16269), True, 'import nnvm.symbol as sym\n'), ((16292, 16306), 'nnvm.symbol.squeeze', 'sym.squeeze', (['x'], {}), '(x)\n', (16303, 16306), True, 'import nnvm.symbol as sym\n'), ((16825, 16920), 'numpy.pad', 'np.pad', (['x'], {'pad_width': '((0, 0), (0, 0), (0, 1), (2, 3))', 'mode': '"""constant"""', 'constant_values': '(1.0)'}), "(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), mode='constant',\n constant_values=1.0)\n", (16831, 16920), True, 'import numpy as np\n'), ((17483, 17494), 'nnvm.symbol.relu', 'sym.relu', (['y'], {}), '(y)\n', (17491, 17494), True, 'import nnvm.symbol as sym\n'), ((17944, 17955), 'nnvm.symbol.relu', 'sym.relu', (['y'], {}), '(y)\n', (17952, 17955), True, 'import nnvm.symbol as sym\n'), ((3769, 3812), 'nnvm.testing.check_computation.check_function', 'check_function', (['*args'], {'quiet': '(True)'}), '(*args, quiet=True, **kwargs)\n', (3783, 3812), False, 'from nnvm.testing.check_computation import check_function\n'), ((5587, 5612), 'nnvm.symbol.block_grad', 'sym.block_grad', (['(2 * x + y)'], {}), '(2 * x + y)\n', (5601, 5612), True, 'import nnvm.symbol as sym\n'), ((5850, 5875), 'nnvm.symbol.block_grad', 'sym.block_grad', (['(x + 2 * y)'], {}), '(x + 2 * y)\n', (5864, 5875), True, 'import nnvm.symbol as sym\n'), ((5975, 5985), 'nnvm.symbol.sum', 'sym.sum', (['x'], {}), '(x)\n', (5982, 5985), True, 'import nnvm.symbol as sym\n'), ((7048, 7062), 'numpy.dot', 'np.dot', (['x', 'w.T'], {}), '(x, w.T)\n', (7054, 7062), True, 'import numpy as np\n'), ((7812, 7840), 'nnvm.symbol.leaky_relu', 'sym.leaky_relu', (['x'], {'alpha': '(0.3)'}), '(x, alpha=0.3)\n', (7826, 7840), True, 'import nnvm.symbol as sym\n'), ((10130, 10140), 'numpy.sinh', 'np.sinh', (['x'], {}), '(x)\n', (10137, 10140), True, 'import numpy as np\n'), ((10143, 10153), 'numpy.cosh', 'np.cosh', (['x'], {}), '(x)\n', (10150, 10153), True, 'import numpy as np\n'), ((12022, 12047), 'numpy.dot', 'np.dot', (['x', 'dense_weight.T'], {}), '(x, dense_weight.T)\n', (12028, 12047), True, 'import numpy as np\n'), ((14133, 14190), 'nnvm.symbol.strided_slice', 'sym.strided_slice', (['x'], {'begin': 'begin', 'end': 'end', 'stride': 'stride'}), '(x, begin=begin, end=end, stride=stride)\n', (14150, 14190), True, 'import nnvm.symbol as sym\n'), ((14223, 14265), 'nnvm.symbol.strided_slice', 'sym.strided_slice', (['x'], {'begin': 'begin', 'end': 'end'}), '(x, begin=begin, end=end)\n', (14240, 14265), True, 'import nnvm.symbol as sym\n'), ((16357, 16381), 'numpy.squeeze', 'np.squeeze', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (16367, 16381), True, 'import numpy as np\n'), ((16436, 16467), 'numpy.reshape', 'np.reshape', (['head_grads', 'x.shape'], {}), '(head_grads, x.shape)\n', (16446, 16467), True, 'import numpy as np\n'), ((3494, 3514), 'numpy.full', 'np.full', (['(1, 2)', '(1.0)'], {}), '((1, 2), 1.0)\n', (3501, 3514), True, 'import numpy as np\n'), ((6048, 6057), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (6054, 6057), True, 'import numpy as np\n'), ((9624, 9633), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (9630, 9633), True, 'import numpy as np\n'), ((10467, 10477), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (10473, 10477), True, 'import numpy as np\n'), ((10923, 10968), 'numpy.sum', 'np.sum', (['(y * head_grads)'], {'axis': '(1)', 'keepdims': '(True)'}), '(y * head_grads, axis=1, keepdims=True)\n', (10929, 10968), True, 'import numpy as np\n'), ((11442, 11451), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (11448, 11451), True, 'import numpy as np\n'), ((11454, 11495), 'numpy.sum', 'np.sum', (['head_grads'], {'axis': '(1)', 'keepdims': '(True)'}), '(head_grads, axis=1, keepdims=True)\n', (11460, 11495), True, 'import numpy as np\n'), ((15612, 15630), 'numpy.prod', 'np.prod', (['src_shape'], {}), '(src_shape)\n', (15619, 15630), True, 'import numpy as np\n'), ((18666, 18684), 'numpy.prod', 'np.prod', (['src_shape'], {}), '(src_shape)\n', (18673, 18684), True, 'import numpy as np\n'), ((9335, 9349), 'numpy.log', 'np.log', (['scalar'], {}), '(scalar)\n', (9341, 9349), True, 'import numpy as np\n'), ((12853, 12878), 'numpy.sqrt', 'np.sqrt', (['(moving_var + eps)'], {}), '(moving_var + eps)\n', (12860, 12878), True, 'import numpy as np\n')] |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import random
MAXLIFE = 120
SCALE = 1
RESCALE = 1
true_rul = []
test_engine_id = 0
training_engine_id = 0
def kink_RUL(cycle_list, max_cycle):
'''
Piecewise linear function with zero gradient and unit gradient
^
|
MAXLIFE |-----------
| \
| \
| \
| \
| \
|----------------------->
'''
knee_point = max_cycle - MAXLIFE
kink_RUL = []
stable_life = MAXLIFE
for i in range(0, len(cycle_list)):
if i < knee_point:
kink_RUL.append(MAXLIFE)
else:
tmp = kink_RUL[i - 1] - (stable_life / (max_cycle - knee_point))
kink_RUL.append(tmp)
return kink_RUL
def compute_rul_of_one_id(FD00X_of_one_id, max_cycle_rul=None):
'''
Enter the data of an engine_id of train_FD001 and output the corresponding RUL (remaining life) of these data.
type is list
'''
cycle_list = FD00X_of_one_id['cycle'].tolist()
if max_cycle_rul is None:
max_cycle = max(cycle_list) # Failure cycle
else:
max_cycle = max(cycle_list) + max_cycle_rul
# print(max(cycle_list), max_cycle_rul)
# return kink_RUL(cycle_list,max_cycle)
return kink_RUL(cycle_list, max_cycle)
def compute_rul_of_one_file(FD00X, id='engine_id', RUL_FD00X=None):
'''
Input train_FD001, output a list
'''
rul = []
# In the loop train, each id value of the 'engine_id' column
if RUL_FD00X is None:
for _id in set(FD00X[id]):
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id]))
return rul
else:
rul = []
for _id in set(FD00X[id]):
# print("#### id ####", int(RUL_FD00X.iloc[_id - 1]))
true_rul.append(int(RUL_FD00X.iloc[_id - 1]))
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id], int(RUL_FD00X.iloc[_id - 1])))
return rul
def get_CMAPSSData(save=False, save_training_data=True, save_testing_data=True, files=[1, 2, 3, 4, 5],
min_max_norm=False):
'''
:param save: switch to load the already preprocessed data or begin preprocessing of raw data
:param save_training_data: same functionality as 'save' but for training data only
:param save_testing_data: same functionality as 'save' but for testing data only
:param files: to indicate which sub dataset needed to be loaded for operations
:param min_max_norm: switch to enable min-max normalization
:return: function will save the preprocessed training and testing data as numpy objects
'''
if save == False:
return np.load("normalized_train_data.npy"), np.load("normalized_test_data.npy"), pd.read_csv(
'normalized_train_data.csv', index_col=[0]), pd.read_csv('normalized_test_data.csv', index_col=[0])
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
if save_training_data: ### Training ###
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD002 = pd.read_table("./CMAPSSData/train_FD002.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD004 = pd.read_table("./CMAPSSData/train_FD004.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD002.columns = column_name
train_FD003.columns = column_name
train_FD004.columns = column_name
previous_len = 0
frames = []
for data_file in ['train_FD00' + str(i) for i in files]: # load subdataset by subdataset
#### standard normalization ####
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
# print("std", std)
################################
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file))
current_len = len(eval(data_file))
# print(eval(data_file).index)
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
# print(eval(data_file).index)
frames.append(eval(data_file))
print(data_file)
train = pd.concat(frames)
global training_engine_id
training_engine_id = train['engine_id']
train = train.drop('engine_id', 1)
train = train.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# train = train.drop('setting3', 1)
# train = train.drop('s18', 1)
# train = train.drop('s19', 1)
train_values = train.values * SCALE
np.save('normalized_train_data.npy', train_values)
train.to_csv('normalized_train_data.csv')
###########
else:
train = pd.read_csv('normalized_train_data.csv', index_col=[0])
train_values = train.values
if save_testing_data: ### testing ###
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD002 = pd.read_table("./CMAPSSData/test_FD002.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD004 = pd.read_table("./CMAPSSData/test_FD004.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD002.columns = column_name
test_FD003.columns = column_name
test_FD004.columns = column_name
# load RUL data
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD002 = pd.read_table("./CMAPSSData/RUL_FD002.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD004 = pd.read_table("./CMAPSSData/RUL_FD004.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD002.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD004.columns = ['RUL']
previous_len = 0
frames = []
for (data_file, rul_file) in [('test_FD00' + str(i), 'RUL_FD00' + str(i)) for i in files]:
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file), RUL_FD00X=eval(rul_file))
current_len = len(eval(data_file))
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
frames.append(eval(data_file))
print(data_file)
if len(files) == 1:
global test_engine_id
test_engine_id = eval(data_file)['engine_id']
test = pd.concat(frames)
test = test.drop('engine_id', 1)
test = test.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# test = test.drop('setting3', 1)
# test = test.drop('s18', 1)
# test = test.drop('s19', 1)
test_values = test.values * SCALE
np.save('normalized_test_data.npy', test_values)
test.to_csv('normalized_test_data.csv')
###########
else:
test = pd.read_csv('normalized_test_data.csv', index_col=[0])
test_values = test.values
return train_values, test_values, train, test
def get_PHM08Data(save=False):
"""
Function is to load PHM 2008 challenge dataset
"""
if save == False:
return np.load("./PHM08/processed_data/phm_training_data.npy"), np.load("./PHM08/processed_data/phm_testing_data.npy"), np.load(
"./PHM08/processed_data/phm_original_testing_data.npy")
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
phm_training_data = pd.read_table("./PHM08/train.txt", header=None, delim_whitespace=True)
phm_training_data.columns = column_name
phm_testing_data = pd.read_table("./PHM08/final_test.txt", header=None, delim_whitespace=True)
phm_testing_data.columns = column_name
print("phm training")
mean = phm_training_data.iloc[:, 2:len(list(phm_training_data))].mean()
std = phm_training_data.iloc[:, 2:len(list(phm_training_data))].std()
phm_training_data.iloc[:, 2:len(list(phm_training_data))] = (phm_training_data.iloc[:, 2:len(
list(phm_training_data))] - mean) / std
phm_training_data['RUL'] = compute_rul_of_one_file(phm_training_data)
print("phm testing")
mean = phm_testing_data.iloc[:, 2:len(list(phm_testing_data))].mean()
std = phm_testing_data.iloc[:, 2:len(list(phm_testing_data))].std()
phm_testing_data.iloc[:, 2:len(list(phm_testing_data))] = (phm_testing_data.iloc[:, 2:len(
list(phm_testing_data))] - mean) / std
phm_testing_data['RUL'] = 0
#phm_testing_data['RUL'] = compute_rul_of_one_file(phm_testing_data)
train_engine_id = phm_training_data['engine_id']
# print(phm_training_engine_id[phm_training_engine_id==1].index)
phm_training_data = phm_training_data.drop('engine_id', 1)
phm_training_data = phm_training_data.drop('cycle', 1)
global test_engine_id
test_engine_id = phm_testing_data['engine_id']
phm_testing_data = phm_testing_data.drop('engine_id', 1)
phm_testing_data = phm_testing_data.drop('cycle', 1)
phm_training_data = phm_training_data.values
phm_testing_data = phm_testing_data.values
engine_ids = train_engine_id.unique()
train_test_split = np.random.rand(len(engine_ids)) < 0.80
train_engine_ids = engine_ids[train_test_split]
test_engine_ids = engine_ids[~train_test_split]
# test_engine_id = pd.Series(test_engine_ids)
training_data = phm_training_data[train_engine_id[train_engine_id == train_engine_ids[0]].index]
for id in train_engine_ids[1:]:
tmp = phm_training_data[train_engine_id[train_engine_id == id].index]
training_data = np.concatenate((training_data, tmp))
# print(training_data.shape)
testing_data = phm_training_data[train_engine_id[train_engine_id == test_engine_ids[0]].index]
for id in test_engine_ids[1:]:
tmp = phm_training_data[train_engine_id[train_engine_id == id].index]
testing_data = np.concatenate((testing_data, tmp))
# print(testing_data.shape)
print(phm_training_data.shape, phm_testing_data.shape, training_data.shape, testing_data.shape)
np.save("./PHM08/processed_data/phm_training_data.npy", training_data)
np.savetxt("./PHM08/processed_data/phm_training_data.txt", training_data, delimiter=" ")
np.save("./PHM08/processed_data/phm_testing_data.npy", testing_data)
np.savetxt("./PHM08/processed_data/phm_testing_data.txt", testing_data, delimiter=" ")
np.save("./PHM08/processed_data/phm_original_testing_data.npy", phm_testing_data)
np.savetxt("./PHM08/processed_data/phm_original_testing_data.csv", phm_testing_data, delimiter=",")
return training_data, testing_data, phm_testing_data
def data_augmentation(files=1, low=[10, 40, 90, 170], high=[35, 85, 160, 250], plot=False, combine=False):
'''
This helper function only augments the training data to look like testing data.
Training data always run to a failure. But testing data is mostly stop before a failure.
Therefore, training data augmented to have scenarios without failure
:param files: select wich sub CMPASS dataset
:param low: lower bound for the random selection of the engine cycle
:param high: upper bound for the random selection of the engine cycle
:param plot: switch to plot the augmented data
:return:
'''
DEBUG = False
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
### Loading original data ###
if files == "phm":
train_FD00x = pd.read_table("./PHM08/processed_data/phm_training_data.txt", header=None, delim_whitespace=True)
train_FD00x.drop(train_FD00x.columns[len(train_FD00x.columns) - 1], axis=1, inplace=True)
train_FD00x.columns = column_name
else:
if combine:
train_FD00x,_,_ = combine_FD001_and_FD003()
else:
file_path = "./CMAPSSData/train_FD00" + str(files) + ".txt"
train_FD00x = pd.read_table(file_path, header=None, delim_whitespace=True)
train_FD00x.columns = column_name
print(file_path.split("/")[-1])
### Standered Normal ###
mean = train_FD00x.iloc[:, 2:len(list(train_FD00x))].mean()
std = train_FD00x.iloc[:, 2:len(list(train_FD00x))].std()
std.replace(0, 1, inplace=True)
train_FD00x.iloc[:, 2:len(list(train_FD00x))] = (train_FD00x.iloc[:, 2:len(list(train_FD00x))] - mean) / std
final_train_FD = train_FD00x.copy()
previous_len = 0
frames = []
for i in range(len(high)):
train_FD = train_FD00x.copy()
train_engine_id = train_FD['engine_id']
engine_ids = train_engine_id.unique()
total_ids = len(engine_ids)
train_rul = []
print("*************", final_train_FD.shape, total_ids, low[i], high[i], "*****************")
for id in range(1, total_ids + 1):
train_engine_id = train_FD['engine_id']
indexes = train_engine_id[train_engine_id == id].index ### filter indexes related to id
traj_data = train_FD.loc[indexes] ### filter trajectory data
cutoff_cycle = random.randint(low[i], high[i]) ### randomly selecting the cutoff point of the engine cycle
if cutoff_cycle > max(traj_data['cycle']):
cutoff_cycle = max(traj_data['cycle'])
train_rul.append(max(traj_data['cycle']) - cutoff_cycle) ### collecting remaining cycles
cutoff_cycle_index = traj_data['cycle'][traj_data['cycle'] == cutoff_cycle].index ### cutoff cycle index
if DEBUG:
print("traj_shape: ", traj_data.shape, "current_engine_id:", id, "cutoff_cycle:", cutoff_cycle,
"cutoff_index", cutoff_cycle_index, "engine_fist_index", indexes[0], "engine_last_index",
indexes[-1])
### removing rows after cutoff cycle index ###
if cutoff_cycle_index[0] != indexes[-1]:
drop_range = list(range(cutoff_cycle_index[0] + 1, indexes[-1] + 1))
train_FD.drop(train_FD.index[drop_range], inplace=True)
train_FD.reset_index(drop=True, inplace=True)
### calculating the RUL for augmented data
train_rul = pd.DataFrame.from_dict({'RUL': train_rul})
train_FD['RUL'] = compute_rul_of_one_file(train_FD, RUL_FD00X=train_rul)
### changing the engine_id for augmented data
train_engine_id = train_FD['engine_id']
for id in range(1, total_ids + 1):
indexes = train_engine_id[train_engine_id == id].index
train_FD.loc[indexes, 'engine_id'] = id + total_ids * (i + 1)
if i == 0: # should only execute at the first iteration
final_train_FD['RUL'] = compute_rul_of_one_file(final_train_FD)
current_len = len(final_train_FD)
final_train_FD.index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
### Re-indexing the augmented data
train_FD['RUL'].index = range(previous_len, previous_len + len(train_FD))
previous_len = previous_len + len(train_FD)
final_train_FD = pd.concat(
[final_train_FD, train_FD]) # concatanete the newly augmented data with previous data
frames.append(final_train_FD)
train = pd.concat(frames)
train.reset_index(drop=True, inplace=True)
train_engine_id = train['engine_id']
# print(train_engine_id)
engine_ids = train_engine_id.unique()
# print(engine_ids[1:])
np.random.shuffle(engine_ids)
# print(engine_ids)
training_data = train.loc[train_engine_id[train_engine_id == engine_ids[0]].index]
training_data.reset_index(drop=True, inplace=True)
previous_len = len(training_data)
for id in engine_ids[1:]:
traj_data = train.loc[train_engine_id[train_engine_id == id].index]
current_len = len(traj_data)
traj_data.index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
training_data = pd.concat([training_data, traj_data])
global training_engine_id
training_engine_id = training_data['engine_id']
training_data = training_data.drop('engine_id', 1)
training_data = training_data.drop('cycle', 1)
# if files == 1 or files == 3:
# training_data = training_data.drop('setting3', 1)
# training_data = training_data.drop('s18', 1)
# training_data = training_data.drop('s19', 1)
training_data_values = training_data.values * SCALE
np.save('normalized_train_data.npy', training_data_values)
training_data.to_csv('normalized_train_data.csv')
train = training_data_values
x_train = train[:, :train.shape[1] - 1]
y_train = train[:, train.shape[1] - 1] * RESCALE
print("training in augmentation", x_train.shape, y_train.shape)
if plot:
plt.plot(y_train, label="train")
plt.figure()
plt.plot(x_train)
plt.title("train")
# plt.figure()
# plt.plot(y_train)
# plt.title("test")
plt.show()
def analyse_Data(dataset, files=None, plot=True, min_max=False):
'''
Generate pre-processed data according to the given dataset
:param dataset: choose between "phm" for PHM 2008 dataset or "cmapss" for CMAPSS data set with file number
:param files: Only for CMAPSS dataset to select sub dataset
:param min_max: switch to allow min-max normalization
:return:
'''
if dataset == "phm":
training_data, testing_data, phm_testing_data = get_PHM08Data(save=True)
x_phmtrain = training_data[:, :training_data.shape[1] - 1]
y_phmtrain = training_data[:, training_data.shape[1] - 1]
x_phmtest = testing_data[:, :testing_data.shape[1] - 1]
y_phmtest = testing_data[:, testing_data.shape[1] - 1]
print("phmtrain", x_phmtrain.shape, y_phmtrain.shape)
print("phmtest", x_phmtrain.shape, y_phmtrain.shape)
print("phmtest", phm_testing_data.shape)
if plot:
# plt.plot(x_phmtrain, label="phmtrain_x")
plt.figure()
plt.plot(y_phmtrain, label="phmtrain_y")
# plt.figure()
# plt.plot(x_phmtest, label="phmtest_x")
plt.figure()
plt.plot(y_phmtest, label="phmtest_y")
# plt.figure()
# plt.plot(phm_testing_data, label="test")
plt.show()
elif dataset == "cmapss":
training_data, testing_data, training_pd, testing_pd = get_CMAPSSData(save=True, files=files,
min_max_norm=min_max)
x_train = training_data[:, :training_data.shape[1] - 1]
y_train = training_data[:, training_data.shape[1] - 1]
print("training", x_train.shape, y_train.shape)
x_test = testing_data[:, :testing_data.shape[1] - 1]
y_test = testing_data[:, testing_data.shape[1] - 1]
print("testing", x_test.shape, y_test.shape)
if plot:
plt.plot(y_train, label="train")
plt.figure()
plt.plot(y_test, label="test")
plt.figure()
plt.plot(x_train)
plt.title("train: FD00" + str(files[0]))
plt.figure()
plt.plot(y_train)
plt.title("train: FD00" + str(files[0]))
plt.show()
def combine_FD001_and_FD003():
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD003.columns = column_name
FD001_max_engine_id = max(train_FD001['engine_id'])
train_FD003['engine_id'] = train_FD003['engine_id'] + FD001_max_engine_id
train_FD003.index = range(len(train_FD001), len(train_FD001) + len(train_FD003))
train_FD001_FD002 = pd.concat([train_FD001,train_FD003])
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD003.columns = column_name
FD001_max_engine_id = max(test_FD001['engine_id'])
test_FD003['engine_id'] = test_FD003['engine_id'] + FD001_max_engine_id
test_FD003.index = range(len(test_FD001), len(test_FD001) + len(test_FD003))
test_FD001_FD002 = pd.concat([test_FD001,test_FD003])
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD003.index = range(len(RUL_FD001), len(RUL_FD001) + len(RUL_FD003))
RUL_FD001_FD002 = pd.concat([test_FD001, test_FD003])
return train_FD001_FD002,test_FD001_FD002,RUL_FD001_FD002
| [
"pandas.read_csv",
"matplotlib.pyplot.show",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.plot",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.figure",
"numpy.savetxt",
"pandas.read_table",
"numpy.concatenate",
"matplotlib.pyplot.title",
"numpy.load",
"pandas.concat",
"random.... | [((9798, 9868), 'pandas.read_table', 'pd.read_table', (['"""./PHM08/train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./PHM08/train.txt', header=None, delim_whitespace=True)\n", (9811, 9868), True, 'import pandas as pd\n'), ((9938, 10013), 'pandas.read_table', 'pd.read_table', (['"""./PHM08/final_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./PHM08/final_test.txt', header=None, delim_whitespace=True)\n", (9951, 10013), True, 'import pandas as pd\n'), ((12445, 12515), 'numpy.save', 'np.save', (['"""./PHM08/processed_data/phm_training_data.npy"""', 'training_data'], {}), "('./PHM08/processed_data/phm_training_data.npy', training_data)\n", (12452, 12515), True, 'import numpy as np\n'), ((12521, 12613), 'numpy.savetxt', 'np.savetxt', (['"""./PHM08/processed_data/phm_training_data.txt"""', 'training_data'], {'delimiter': '""" """'}), "('./PHM08/processed_data/phm_training_data.txt', training_data,\n delimiter=' ')\n", (12531, 12613), True, 'import numpy as np\n'), ((12615, 12683), 'numpy.save', 'np.save', (['"""./PHM08/processed_data/phm_testing_data.npy"""', 'testing_data'], {}), "('./PHM08/processed_data/phm_testing_data.npy', testing_data)\n", (12622, 12683), True, 'import numpy as np\n'), ((12689, 12779), 'numpy.savetxt', 'np.savetxt', (['"""./PHM08/processed_data/phm_testing_data.txt"""', 'testing_data'], {'delimiter': '""" """'}), "('./PHM08/processed_data/phm_testing_data.txt', testing_data,\n delimiter=' ')\n", (12699, 12779), True, 'import numpy as np\n'), ((12781, 12866), 'numpy.save', 'np.save', (['"""./PHM08/processed_data/phm_original_testing_data.npy"""', 'phm_testing_data'], {}), "('./PHM08/processed_data/phm_original_testing_data.npy',\n phm_testing_data)\n", (12788, 12866), True, 'import numpy as np\n'), ((12868, 12971), 'numpy.savetxt', 'np.savetxt', (['"""./PHM08/processed_data/phm_original_testing_data.csv"""', 'phm_testing_data'], {'delimiter': '""","""'}), "('./PHM08/processed_data/phm_original_testing_data.csv',\n phm_testing_data, delimiter=',')\n", (12878, 12971), True, 'import numpy as np\n'), ((17943, 17960), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (17952, 17960), True, 'import pandas as pd\n'), ((18160, 18189), 'numpy.random.shuffle', 'np.random.shuffle', (['engine_ids'], {}), '(engine_ids)\n', (18177, 18189), True, 'import numpy as np\n'), ((19206, 19264), 'numpy.save', 'np.save', (['"""normalized_train_data.npy"""', 'training_data_values'], {}), "('normalized_train_data.npy', training_data_values)\n", (19213, 19264), True, 'import numpy as np\n'), ((22453, 22539), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/train_FD001.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/train_FD001.txt', header=None, delim_whitespace\n =True)\n", (22466, 22539), True, 'import pandas as pd\n'), ((22554, 22640), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/train_FD003.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/train_FD003.txt', header=None, delim_whitespace\n =True)\n", (22567, 22640), True, 'import pandas as pd\n'), ((22963, 23000), 'pandas.concat', 'pd.concat', (['[train_FD001, train_FD003]'], {}), '([train_FD001, train_FD003])\n', (22972, 23000), True, 'import pandas as pd\n'), ((23020, 23105), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/test_FD001.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/test_FD001.txt', header=None, delim_whitespace=True\n )\n", (23033, 23105), True, 'import pandas as pd\n'), ((23119, 23204), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/test_FD003.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/test_FD003.txt', header=None, delim_whitespace=True\n )\n", (23132, 23204), True, 'import pandas as pd\n'), ((23517, 23552), 'pandas.concat', 'pd.concat', (['[test_FD001, test_FD003]'], {}), '([test_FD001, test_FD003])\n', (23526, 23552), True, 'import pandas as pd\n'), ((23571, 23650), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/RUL_FD001.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/RUL_FD001.txt', header=None, delim_whitespace=True)\n", (23584, 23650), True, 'import pandas as pd\n'), ((23668, 23747), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/RUL_FD003.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/RUL_FD003.txt', header=None, delim_whitespace=True)\n", (23681, 23747), True, 'import pandas as pd\n'), ((23915, 23950), 'pandas.concat', 'pd.concat', (['[test_FD001, test_FD003]'], {}), '([test_FD001, test_FD003])\n', (23924, 23950), True, 'import pandas as pd\n'), ((3452, 3538), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/train_FD001.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/train_FD001.txt', header=None, delim_whitespace\n =True)\n", (3465, 3538), True, 'import pandas as pd\n'), ((3557, 3643), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/train_FD002.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/train_FD002.txt', header=None, delim_whitespace\n =True)\n", (3570, 3643), True, 'import pandas as pd\n'), ((3662, 3748), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/train_FD003.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/train_FD003.txt', header=None, delim_whitespace\n =True)\n", (3675, 3748), True, 'import pandas as pd\n'), ((3767, 3853), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/train_FD004.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/train_FD004.txt', header=None, delim_whitespace\n =True)\n", (3780, 3853), True, 'import pandas as pd\n'), ((5378, 5395), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (5387, 5395), True, 'import pandas as pd\n'), ((5803, 5853), 'numpy.save', 'np.save', (['"""normalized_train_data.npy"""', 'train_values'], {}), "('normalized_train_data.npy', train_values)\n", (5810, 5853), True, 'import numpy as np\n'), ((5954, 6009), 'pandas.read_csv', 'pd.read_csv', (['"""normalized_train_data.csv"""'], {'index_col': '[0]'}), "('normalized_train_data.csv', index_col=[0])\n", (5965, 6009), True, 'import pandas as pd\n'), ((6117, 6202), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/test_FD001.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/test_FD001.txt', header=None, delim_whitespace=True\n )\n", (6130, 6202), True, 'import pandas as pd\n'), ((6220, 6305), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/test_FD002.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/test_FD002.txt', header=None, delim_whitespace=True\n )\n", (6233, 6305), True, 'import pandas as pd\n'), ((6323, 6408), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/test_FD003.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/test_FD003.txt', header=None, delim_whitespace=True\n )\n", (6336, 6408), True, 'import pandas as pd\n'), ((6426, 6511), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/test_FD004.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/test_FD004.txt', header=None, delim_whitespace=True\n )\n", (6439, 6511), True, 'import pandas as pd\n'), ((6723, 6802), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/RUL_FD001.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/RUL_FD001.txt', header=None, delim_whitespace=True)\n", (6736, 6802), True, 'import pandas as pd\n'), ((6824, 6903), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/RUL_FD002.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/RUL_FD002.txt', header=None, delim_whitespace=True)\n", (6837, 6903), True, 'import pandas as pd\n'), ((6925, 7004), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/RUL_FD003.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/RUL_FD003.txt', header=None, delim_whitespace=True)\n", (6938, 7004), True, 'import pandas as pd\n'), ((7026, 7105), 'pandas.read_table', 'pd.read_table', (['"""./CMAPSSData/RUL_FD004.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./CMAPSSData/RUL_FD004.txt', header=None, delim_whitespace=True)\n", (7039, 7105), True, 'import pandas as pd\n'), ((8557, 8574), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (8566, 8574), True, 'import pandas as pd\n'), ((8886, 8934), 'numpy.save', 'np.save', (['"""normalized_test_data.npy"""', 'test_values'], {}), "('normalized_test_data.npy', test_values)\n", (8893, 8934), True, 'import numpy as np\n'), ((9032, 9086), 'pandas.read_csv', 'pd.read_csv', (['"""normalized_test_data.csv"""'], {'index_col': '[0]'}), "('normalized_test_data.csv', index_col=[0])\n", (9043, 9086), True, 'import pandas as pd\n'), ((11954, 11990), 'numpy.concatenate', 'np.concatenate', (['(training_data, tmp)'], {}), '((training_data, tmp))\n', (11968, 11990), True, 'import numpy as np\n'), ((12266, 12301), 'numpy.concatenate', 'np.concatenate', (['(testing_data, tmp)'], {}), '((testing_data, tmp))\n', (12280, 12301), True, 'import numpy as np\n'), ((14041, 14142), 'pandas.read_table', 'pd.read_table', (['"""./PHM08/processed_data/phm_training_data.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('./PHM08/processed_data/phm_training_data.txt', header=None,\n delim_whitespace=True)\n", (14054, 14142), True, 'import pandas as pd\n'), ((16823, 16865), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'RUL': train_rul}"], {}), "({'RUL': train_rul})\n", (16845, 16865), True, 'import pandas as pd\n'), ((17782, 17819), 'pandas.concat', 'pd.concat', (['[final_train_FD, train_FD]'], {}), '([final_train_FD, train_FD])\n', (17791, 17819), True, 'import pandas as pd\n'), ((18697, 18734), 'pandas.concat', 'pd.concat', (['[training_data, traj_data]'], {}), '([training_data, traj_data])\n', (18706, 18734), True, 'import pandas as pd\n'), ((19551, 19583), 'matplotlib.pyplot.plot', 'plt.plot', (['y_train'], {'label': '"""train"""'}), "(y_train, label='train')\n", (19559, 19583), True, 'from matplotlib import pyplot as plt\n'), ((19595, 19607), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19605, 19607), True, 'from matplotlib import pyplot as plt\n'), ((19617, 19634), 'matplotlib.pyplot.plot', 'plt.plot', (['x_train'], {}), '(x_train)\n', (19625, 19634), True, 'from matplotlib import pyplot as plt\n'), ((19644, 19662), 'matplotlib.pyplot.title', 'plt.title', (['"""train"""'], {}), "('train')\n", (19653, 19662), True, 'from matplotlib import pyplot as plt\n'), ((19756, 19766), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19764, 19766), True, 'from matplotlib import pyplot as plt\n'), ((2920, 2956), 'numpy.load', 'np.load', (['"""normalized_train_data.npy"""'], {}), "('normalized_train_data.npy')\n", (2927, 2956), True, 'import numpy as np\n'), ((2958, 2993), 'numpy.load', 'np.load', (['"""normalized_test_data.npy"""'], {}), "('normalized_test_data.npy')\n", (2965, 2993), True, 'import numpy as np\n'), ((2995, 3050), 'pandas.read_csv', 'pd.read_csv', (['"""normalized_train_data.csv"""'], {'index_col': '[0]'}), "('normalized_train_data.csv', index_col=[0])\n", (3006, 3050), True, 'import pandas as pd\n'), ((3066, 3120), 'pandas.read_csv', 'pd.read_csv', (['"""normalized_test_data.csv"""'], {'index_col': '[0]'}), "('normalized_test_data.csv', index_col=[0])\n", (3077, 3120), True, 'import pandas as pd\n'), ((9324, 9379), 'numpy.load', 'np.load', (['"""./PHM08/processed_data/phm_training_data.npy"""'], {}), "('./PHM08/processed_data/phm_training_data.npy')\n", (9331, 9379), True, 'import numpy as np\n'), ((9381, 9435), 'numpy.load', 'np.load', (['"""./PHM08/processed_data/phm_testing_data.npy"""'], {}), "('./PHM08/processed_data/phm_testing_data.npy')\n", (9388, 9435), True, 'import numpy as np\n'), ((9437, 9500), 'numpy.load', 'np.load', (['"""./PHM08/processed_data/phm_original_testing_data.npy"""'], {}), "('./PHM08/processed_data/phm_original_testing_data.npy')\n", (9444, 9500), True, 'import numpy as np\n'), ((14485, 14545), 'pandas.read_table', 'pd.read_table', (['file_path'], {'header': 'None', 'delim_whitespace': '(True)'}), '(file_path, header=None, delim_whitespace=True)\n', (14498, 14545), True, 'import pandas as pd\n'), ((15690, 15721), 'random.randint', 'random.randint', (['low[i]', 'high[i]'], {}), '(low[i], high[i])\n', (15704, 15721), False, 'import random\n'), ((20815, 20827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20825, 20827), True, 'from matplotlib import pyplot as plt\n'), ((20841, 20881), 'matplotlib.pyplot.plot', 'plt.plot', (['y_phmtrain'], {'label': '"""phmtrain_y"""'}), "(y_phmtrain, label='phmtrain_y')\n", (20849, 20881), True, 'from matplotlib import pyplot as plt\n'), ((20979, 20991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20989, 20991), True, 'from matplotlib import pyplot as plt\n'), ((21005, 21043), 'matplotlib.pyplot.plot', 'plt.plot', (['y_phmtest'], {'label': '"""phmtest_y"""'}), "(y_phmtest, label='phmtest_y')\n", (21013, 21043), True, 'from matplotlib import pyplot as plt\n'), ((21143, 21153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21151, 21153), True, 'from matplotlib import pyplot as plt\n'), ((4559, 4573), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4571, 4573), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((7666, 7680), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (7678, 7680), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((21789, 21821), 'matplotlib.pyplot.plot', 'plt.plot', (['y_train'], {'label': '"""train"""'}), "(y_train, label='train')\n", (21797, 21821), True, 'from matplotlib import pyplot as plt\n'), ((21835, 21847), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21845, 21847), True, 'from matplotlib import pyplot as plt\n'), ((21861, 21891), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test'], {'label': '"""test"""'}), "(y_test, label='test')\n", (21869, 21891), True, 'from matplotlib import pyplot as plt\n'), ((21907, 21919), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21917, 21919), True, 'from matplotlib import pyplot as plt\n'), ((21933, 21950), 'matplotlib.pyplot.plot', 'plt.plot', (['x_train'], {}), '(x_train)\n', (21941, 21950), True, 'from matplotlib import pyplot as plt\n'), ((22018, 22030), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22028, 22030), True, 'from matplotlib import pyplot as plt\n'), ((22044, 22061), 'matplotlib.pyplot.plot', 'plt.plot', (['y_train'], {}), '(y_train)\n', (22052, 22061), True, 'from matplotlib import pyplot as plt\n'), ((22129, 22139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22137, 22139), True, 'from matplotlib import pyplot as plt\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loaders."""
from typing import Generator, Tuple
import jax
import numpy as onp
from jax import numpy as jnp
import tensorflow as tf
import tensorflow_datasets as tfds
from enum import Enum
import re
import os
import warnings
from enum import Enum
imdb = tf.keras.datasets.imdb
sequence = tf.keras.preprocessing.sequence
class Task(Enum):
REGRESSION = "regression"
CLASSIFICATION = "classification"
class ImgDatasets(Enum):
STL10 = "stl10"
STL10_LIKE_CIFAR = "stl10_like_cifar"
CIFAR10 = "cifar10"
CIFAR100 = "cifar100"
MNIST = "mnist"
EMNIST_MNIST = "emnist/mnist"
SVHN = "svhn"
SVHN_LIKE_MNIST = "svhn_like_mnist"
class UCIRegressionDatasets(Enum):
BOSTON = "boston"
ENERGY = "energy"
YACHT = "yacht"
CONCRETE = "concrete"
NAVAL = "naval"
ELEVATORS = "elevators"
KEGGU = "keggu"
KEGGD = "keggd"
PROTEIN = "protein"
POL = "pol"
SKILLCRAFT = "skillcraft"
_UCI_REGRESSION_FILENAMES = {
UCIRegressionDatasets.BOSTON: "boston.npz",
UCIRegressionDatasets.ENERGY: "energy.npz",
UCIRegressionDatasets.YACHT: "yacht.npz",
UCIRegressionDatasets.CONCRETE: "concrete.npz",
UCIRegressionDatasets.NAVAL: "naval.npz",
UCIRegressionDatasets.ELEVATORS: "wilson_elevators.npz",
UCIRegressionDatasets.KEGGU: "wilson_keggundirected.npz",
UCIRegressionDatasets.KEGGD: "wilson_keggdirected.npz",
UCIRegressionDatasets.PROTEIN: "wilson_protein.npz",
UCIRegressionDatasets.POL: "wilson_pol.npz",
UCIRegressionDatasets.SKILLCRAFT: "wilson_skillcraft.npz"
}
# Format: (img_mean, img_std)
_ALL_IMG_DS_STATS = {
ImgDatasets.STL10: ((0.44686386, 0.43994057, 0.40674996),
(0.26031575, 0.25652364, 0.27120718)),
ImgDatasets.STL10_LIKE_CIFAR: ((0.44686386, 0.43994057, 0.40674996),
(0.26031575, 0.25652364, 0.27120718)),
ImgDatasets.CIFAR10: ((0.49, 0.48, 0.44), (0.2, 0.2, 0.2)),
ImgDatasets.CIFAR100: ((0.49, 0.48, 0.44), (0.2, 0.2, 0.2)),
ImgDatasets.MNIST: ((0.1307,), (0.3081,)),
ImgDatasets.EMNIST_MNIST: ((0.1307,), (0.3081,)),
ImgDatasets.SVHN: ((0.22365098, 0.22365098, 0.22365098),
(0.25819412, 0.264116, 0.2797217)),
ImgDatasets.SVHN_LIKE_MNIST: ((0.4452148,), (0.19695179,))
}
_IMDB_CONFIG = {
"max_features": 20000,
"max_len": 100,
"num_train": 20000
}
_STL10_CIFAR10_CLASS_MAPPING = {
0: 0,
1: 2,
2: 1,
3: 3,
4: 4,
5: 5,
6: 7,
7: 6,
8: 8,
9: 9
}
def load_imdb_dataset():
"""
Load the IMDB reviews dataset.
Code adapted from the code for
_How Good is the Bayes Posterior in Deep Neural Networks Really?_:
https://github.com/google-research/google-research/blob/master/cold_posterior_bnn/imdb/imdb_data.py
"""
(x_train, y_train), (x_test, y_test) = imdb.load_data(
path="./datasets", num_words=_IMDB_CONFIG["max_features"])
num_train = _IMDB_CONFIG["num_train"]
x_train, x_val = x_train[:num_train], x_train[num_train:]
y_train, y_val = y_train[:num_train], y_train[num_train:]
def preprocess(x, y, max_length):
x = sequence.pad_sequences(x, maxlen=max_length)
y = onp.array(y)
x = onp.array(x)
return x, y
max_length = _IMDB_CONFIG["max_len"]
x_train, y_train = preprocess(x_train, y_train, max_length=max_length)
x_val, y_val = preprocess(x_val, y_val, max_length=max_length)
x_test, y_test = preprocess(x_test, y_test, max_length=max_length)
data_info = {
"num_classes": 2
}
return (x_train, y_train), (x_test, y_test), (x_val, y_val), data_info
def load_image_dataset(
split, batch_size, name="cifar10", repeat=False, shuffle=False,
shuffle_seed=None
):
"""Loads the dataset as a generator of batches."""
# Do no data augmentation.
if name == "stl10_like_cifar":
tfds_name = "stl10"
elif name in ["svhn", "svhn_like_mnist"]:
tfds_name = "svhn_cropped"
else:
tfds_name = name
ds, dataset_info = tfds.load(tfds_name, split=split, as_supervised=True,
with_info=True)
num_classes = dataset_info.features["label"].num_classes
num_examples = dataset_info.splits[split].num_examples
num_channels = dataset_info.features['image'].shape[-1]
def img_to_float32(image, label):
return tf.image.convert_image_dtype(image, tf.float32), label
ds = ds.map(img_to_float32).cache()
ds_stats = _ALL_IMG_DS_STATS[ImgDatasets(name)]
def img_normalize(image, label):
"""Normalize the image to zero mean and unit variance."""
mean, std = ds_stats
image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)
image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)
return image, label
if name == "svhn_like_mnist":
num_channels = 1
ds = ds.map(lambda x, y: (tf.image.rgb_to_grayscale(x)[:, :, :1], y))
ds = ds.map(img_normalize)
if name == "stl10_like_cifar":
ds = ds.map(lambda x, y: (tf.image.resize(x, [32, 32]), y))
if name == "svhn_like_mnist":
ds = ds.map(lambda x, y: (x[2:30, 2:30], y))
if batch_size == -1:
batch_size = num_examples
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(buffer_size=10 * batch_size, seed=shuffle_seed)
ds = ds.batch(batch_size)
return tfds.as_numpy(ds), num_classes, num_examples
def relabel_stl10_like_cifar(ds):
x, y = ds
y_ = y.copy()
for c_stl, c_cifar in _STL10_CIFAR10_CLASS_MAPPING.items():
y_[y == c_stl] = c_cifar
mask = y_ != 6
x = x[mask]
y_ = y_[mask]
return x, y_
def get_image_dataset(name):
train_set, n_classes, _ = load_image_dataset("train", -1, name)
train_set = next(iter(train_set))
test_set, _, _ = load_image_dataset("test", -1, name)
test_set = next(iter(test_set))
if name == "stl10_like_cifar":
train_set = relabel_stl10_like_cifar(train_set)
test_set = relabel_stl10_like_cifar(test_set)
data_info = {
"num_classes": n_classes
}
return train_set, test_set, data_info
def load_uci_regression_dataset(
name, split_seed, train_fraction=0.9, data_dir="uci_datasets"
):
"""Load a UCI dataset from an npz file.
Ported from https://github.com/wjmaddox/drbayes/blob/master/experiments/uci_exps/bayesian_benchmarks/data.py.
"""
path = os.path.join(
data_dir, _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])
data_arr = onp.load(path)
x, y = data_arr["x"], data_arr["y"]
indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))
indices = onp.asarray(indices)
x, y = x[indices], y[indices]
n_train = int(train_fraction * len(x))
x_train, y_train = x[:n_train], y[:n_train]
x_test, y_test = x[n_train:], y[n_train:]
def normalize_with_stats(arr, arr_mean=None, arr_std=None):
return (arr - arr_mean) / arr_std
def normalize(arr):
eps = 1e-6
arr_mean = arr.mean(axis=0, keepdims=True)
arr_std = arr.std(axis=0, keepdims=True) + eps
return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std
x_train, x_mean, x_std = normalize(x_train)
y_train, y_mean, y_std = normalize(y_train)
x_test = normalize_with_stats(x_test, x_mean, x_std)
y_test = normalize_with_stats(y_test, y_mean, y_std)
data_info = {
"y_scale": float(y_std)
}
return (x_train, y_train), (x_test, y_test), data_info
def _parse_uci_regression_dataset(name_str):
"""Parse name and seed for uci regression data.
E.g. yacht_2 is the yacht dataset with seed 2.
"""
pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)"
pattern = re.compile(pattern_string)
matched = pattern.match(name_str)
if matched:
name = matched.group("name")
seed = matched.group("seed")
return name, seed
return None, None
def load_npz_array(filename):
arr = onp.load(filename, allow_pickle=True)
return ((arr["x_train"], arr["y_train"]),
(arr["x_test"], arr["y_test"]),
arr["data_info"].item())
def batch_split_axis(batch, n_split):
"""Reshapes batch to have first axes size equal n_split."""
x, y = batch
n = x.shape[0]
n_new = n / n_split
assert n_new == int(n_new), (
"First axis cannot be split: batch dimension was {} when "
"n_split was {}.".format(x.shape[0], n_split))
n_new = int(n_new)
return tuple(arr.reshape([n_split, n_new, *arr.shape[1:]]) for arr in (x, y))
def pmap_dataset(ds, n_devices):
"""Shard the dataset to devices."""
n_data = len(ds[0])
if n_data % n_devices:
new_len = n_devices * (n_data // n_devices)
warning_str = (
"Dataset of length {} can not be split onto {} devices."
"Truncating to {} data points.".format(n_data, n_devices, new_len))
warnings.warn(warning_str, UserWarning)
ds = (arr[:new_len] for arr in ds)
return jax.pmap(lambda x: x)(batch_split_axis(ds, n_devices))
def make_ds_pmap_fullbatch(name, dtype, n_devices=None, truncate_to=None):
"""Make train and test sets sharded over batch dim."""
name = name.lower()
n_devices = n_devices or len(jax.local_devices())
if name in ImgDatasets._value2member_map_:
train_set, test_set, data_info = get_image_dataset(name)
loaded = True
task = Task.CLASSIFICATION
elif name == "imdb":
train_set, test_set, _, data_info = load_imdb_dataset()
dtype = jnp.int32
loaded = True
task = Task.CLASSIFICATION
elif name[-4:] == ".npz":
train_set, test_set, data_info = load_npz_array(name)
loaded = True
task = Task.CLASSIFICATION
else:
name, seed = _parse_uci_regression_dataset(name)
loaded = name is not None
if name is not None:
train_set, test_set, data_info = load_uci_regression_dataset(
name, int(seed))
loaded = True
task = Task.REGRESSION
if not loaded:
raise ValueError("Unknown dataset name: {}".format(name))
if truncate_to:
assert truncate_to % n_devices == 0, (
"truncate_to should be devisible by n_devices, but got values "
"truncate_to={}, n_devices={}".format(truncate_to, n_devices)
)
train_set = tuple(arr[:truncate_to] for arr in train_set)
train_set, test_set = tuple(pmap_dataset(ds, n_devices)
for ds in (train_set, test_set))
train_set, test_set = map(
lambda ds: (ds[0].astype(dtype), ds[1]), (train_set, test_set))
return train_set, test_set, task, data_info
| [
"tensorflow.image.rgb_to_grayscale",
"jax.random.PRNGKey",
"tensorflow.image.convert_image_dtype",
"re.compile",
"jax.local_devices",
"jax.pmap",
"tensorflow_datasets.load",
"tensorflow.image.resize",
"numpy.asarray",
"numpy.array",
"tensorflow.constant",
"warnings.warn",
"numpy.load",
"te... | [((4519, 4588), 'tensorflow_datasets.load', 'tfds.load', (['tfds_name'], {'split': 'split', 'as_supervised': '(True)', 'with_info': '(True)'}), '(tfds_name, split=split, as_supervised=True, with_info=True)\n', (4528, 4588), True, 'import tensorflow_datasets as tfds\n'), ((6921, 6935), 'numpy.load', 'onp.load', (['path'], {}), '(path)\n', (6929, 6935), True, 'import numpy as onp\n'), ((7064, 7084), 'numpy.asarray', 'onp.asarray', (['indices'], {}), '(indices)\n', (7075, 7084), True, 'import numpy as onp\n'), ((8099, 8125), 're.compile', 're.compile', (['pattern_string'], {}), '(pattern_string)\n', (8109, 8125), False, 'import re\n'), ((8324, 8361), 'numpy.load', 'onp.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (8332, 8361), True, 'import numpy as onp\n'), ((3725, 3737), 'numpy.array', 'onp.array', (['y'], {}), '(y)\n', (3734, 3737), True, 'import numpy as onp\n'), ((3746, 3758), 'numpy.array', 'onp.array', (['x'], {}), '(x)\n', (3755, 3758), True, 'import numpy as onp\n'), ((5123, 5187), 'tensorflow.constant', 'tf.constant', (['mean'], {'shape': '[1, 1, num_channels]', 'dtype': 'image.dtype'}), '(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n', (5134, 5187), True, 'import tensorflow as tf\n'), ((5201, 5264), 'tensorflow.constant', 'tf.constant', (['std'], {'shape': '[1, 1, num_channels]', 'dtype': 'image.dtype'}), '(std, shape=[1, 1, num_channels], dtype=image.dtype)\n', (5212, 5264), True, 'import tensorflow as tf\n'), ((5833, 5850), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (5846, 5850), True, 'import tensorflow_datasets as tfds\n'), ((7012, 7042), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['split_seed'], {}), '(split_seed)\n', (7030, 7042), False, 'import jax\n'), ((9219, 9258), 'warnings.warn', 'warnings.warn', (['warning_str', 'UserWarning'], {}), '(warning_str, UserWarning)\n', (9232, 9258), False, 'import warnings\n'), ((9307, 9328), 'jax.pmap', 'jax.pmap', (['(lambda x: x)'], {}), '(lambda x: x)\n', (9315, 9328), False, 'import jax\n'), ((4844, 4891), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4872, 4891), True, 'import tensorflow as tf\n'), ((9551, 9570), 'jax.local_devices', 'jax.local_devices', ([], {}), '()\n', (9568, 9570), False, 'import jax\n'), ((5511, 5539), 'tensorflow.image.resize', 'tf.image.resize', (['x', '[32, 32]'], {}), '(x, [32, 32])\n', (5526, 5539), True, 'import tensorflow as tf\n'), ((5373, 5401), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['x'], {}), '(x)\n', (5398, 5401), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.