code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Tests for reverberation time related things. """
from pytest import raises
import numpy as np
import numpy.testing as npt
import roomacoustics as ra
def test_rt_from_edc():
times = np.linspace(0, 1.5, 2**9)
m = -60
edc = times * m
edc_exp = 10**(edc/10)
RT = 1.
TX = ['T20', 'T30', 'T40', 'T50', 'T60', 'LDT', 'EDT']
for T in TX:
RT_est = ra.reverberation_time_energy_decay_curve(edc_exp, times, T=T)
npt.assert_allclose(RT_est, RT)
def test_rt_from_edc_error():
times = np.linspace(0, 1.5, 2**9)
m = -60
edc = times * m
edc_exp = 10**(edc/10)
T = 'Bla'
with raises(ValueError, match='is not a valid interval.'):
ra.reverberation_time_energy_decay_curve(edc_exp, times, T=T)
# npt.assert_allclose(RT_est, RT)
| [
"numpy.testing.assert_allclose",
"numpy.linspace",
"pytest.raises",
"roomacoustics.reverberation_time_energy_decay_curve"
] | [((240, 267), 'numpy.linspace', 'np.linspace', (['(0)', '(1.5)', '(2 ** 9)'], {}), '(0, 1.5, 2 ** 9)\n', (251, 267), True, 'import numpy as np\n'), ((576, 603), 'numpy.linspace', 'np.linspace', (['(0)', '(1.5)', '(2 ** 9)'], {}), '(0, 1.5, 2 ** 9)\n', (587, 603), True, 'import numpy as np\n'), ((430, 491), 'roomacoustics.reverberation_time_energy_decay_curve', 'ra.reverberation_time_energy_decay_curve', (['edc_exp', 'times'], {'T': 'T'}), '(edc_exp, times, T=T)\n', (470, 491), True, 'import roomacoustics as ra\n'), ((500, 531), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['RT_est', 'RT'], {}), '(RT_est, RT)\n', (519, 531), True, 'import numpy.testing as npt\n'), ((685, 737), 'pytest.raises', 'raises', (['ValueError'], {'match': '"""is not a valid interval."""'}), "(ValueError, match='is not a valid interval.')\n", (691, 737), False, 'from pytest import raises\n'), ((747, 808), 'roomacoustics.reverberation_time_energy_decay_curve', 'ra.reverberation_time_energy_decay_curve', (['edc_exp', 'times'], {'T': 'T'}), '(edc_exp, times, T=T)\n', (787, 808), True, 'import roomacoustics as ra\n')] |
# -*- coding: utf-8 -*-
import unittest
import logging
import numpy as np
from votesim.votemethods import irv
import votesim
logger = logging.getLogger(__name__)
class TestIRV(unittest.TestCase):
def test_tie(self):
print('TEST TIE #1')
d = [[1, 2,],
[2, 1,]]
winners1, ties1, h = irv.irv_stv(d, 1)
winners2, ties2, output = irv.irv(d, 1)
print('winners1', winners1)
print('winners2', winners2)
print('ties1', ties1)
print('ties2', ties2)
self.assertTrue(len(winners1) == 0)
self.assertTrue(len(winners2) == 0)
self.assertTrue(
np.all(np.in1d(ties1, ties2))
)
self.assertEqual(len(winners1), 0)
self.assertEqual(len(ties1), 2)
self.assertTrue(0 in ties1)
self.assertTrue(1 in ties1)
winners2, ties2, o = irv.irv(d, 1)
return
def test_tie2(self):
print('TEST TIE #2')
d = [[1,2,3],
[1,3,2]]
# winners, ties, h = irv.IRV_STV(d, 2)
winners, ties, h = irv.irv_stv(d, 2)
print('winners', winners)
print('ties', ties)
self.assertTrue(0 in winners)
return
def test_eliminate(self):
d = [[1, 2, 3, 4],
[1, 3, 2, 4],
[3, 2, 1, 4],
[2, 3, 1, 4],
[3, 0, 2, 1]]
d = np.array(d)
first_round = [
[1, 0, 2, 3],
[1, 0, 2, 3],
[2, 0, 1, 3],
[2, 0, 1, 3],
[3, 0, 2, 1],
]
second_round = [
[1, 0, 2, 0],
[1, 0, 2, 0],
[2, 0, 1, 0],
[2, 0, 1, 0],
[2, 0, 1, 0]]
third_round = [
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]]
first_round = np.array(first_round)
second_round = np.array(second_round)
logger.info('start votes\n%s', d)
logger.info(d)
d1, loser, ties, h = irv.irv_eliminate(d)
logger.info('1st round results\n%s', d1)
self.assertTrue(np.all(first_round == d1))
d2, loser, ties, h = irv.irv_eliminate(d1)
logger.info('2nd round results\n%s', d2)
self.assertTrue(np.all(second_round == d2))
d3, loser, ties, h = irv.irv_eliminate(d2)
logger.info('3rd round results\n%s', d3)
self.assertTrue(np.all(third_round == d3))
w, t, h = irv.irv_stv(d, numwin=1)
self.assertIn(2, w)
return
def test_stv(self):
print('TEST STV')
d = [[1, 2, 3, 4],
[1, 3, 2, 4],
[3, 2, 1, 4],
[2, 3, 1, 4],
[3, 0, 2, 1]]
d = np.array(d)
winners, ties, h = irv.irv_stv(d, 2)
self.assertTrue(0 in winners)
self.assertTrue(2 in winners)
return
def test_RCVReorder(self):
print('\nTEST RCV ReOrder')
a = [[1, 5, 2, 0, 4, 10],
[2, 3, 4, 5, 6, 7],
[0, 0, 0, 5, 6, 7]]
a = np.array(a)
b = irv.rcv_reorder(a)
correct = [
[1, 4, 2, 0, 3, 5],
[1, 2, 3, 4, 5, 6],
[0, 0, 0, 1, 2, 3]
]
correct = np.array(correct)
compare = np.all(correct == b)
self.assertTrue(compare)
return
def test_wiki(self):
"""
Test example from wikipedia, retrieved Dec 19, 2019.
Correct results taken from wikipedia (winner knoxville K)
https://en.wikipedia.org/wiki/Instant-runoff_voting
"""
# M N C K
d = [[1, 2, 3, 4]]*42 + \
[[4, 1, 2, 3]]*26 + \
[[4, 3, 1, 2]]*15 + \
[[4, 3, 2, 1]]*17
d = np.array(d)
winners, ties, output = irv.irv_stv(d, 1)
history = output['round_history']
# print('test wiki')
# print('winners=\n', winners)
# print('history=\n', history)
#
correct_history = [[42, 26, 15, 17],
[42, 26, 0, 32],
[42, 0, 0, 58]]
correct_history = np.array(correct_history)
self.assertTrue(np.all(correct_history == history))
self.assertEqual(winners[0], 3)
def test_irv2(self):
success_count = 0
fail_count = 0
# print('test_irv2 -- compared STV vs IRV')
rstate = np.random.RandomState()
for seed in range(60):
rstate.seed(seed)
ratings = rstate.rand(100, 5)
ranks = votesim.votemethods.tools.score2rank(ratings)
# print(seed)
w1, t1, o1 = irv.irv_stv(ranks)
w2, t2, o2 = irv.irv(ranks)
w1 = np.sort(w1)
w2 = np.sort(w2)
t1 = np.sort(t1)
t2 = np.sort(t2)
# print('Seed # %s' % seed)
success = np.all(w1 == w2) & np.all(t1 == t2)
# print('Methods same result?', success)
if success:
success_count += 1
else:
fail_count += 1
#
# print('FAILED METHOD IRV INPUT')
# print(ranks)
# print('\n\nRUNNING STV RESULTS')
#
# print('\n\nRUNNING IRV RESULTS')
#
# print('history')
# print(o1)
# print(o2['round_history'])
# print('winners=%s', w1)
# print('ties=%s', t1)
# print('winners=%s', w2)
# print('ties=%s', t2)
#
# print('# of successes =', success_count)
# print('# of fails =', fail_count)
self.assertTrue(fail_count == 0)
return
def test_irv_tie3(self):
d = [[5,2,1,4,3],
[3,5,2,1,4],
[2,3,1,5,4],
[2,3,5,1,4],
[5,4,1,3,2],
[3,2,5,4,1],
[1,4,3,5,2],
[5,2,3,1,4],
[3,5,4,2,1],
[1,4,3,2,5],
]
d = np.array(d)
w2, t2, o2 = irv.irv(d)
def test_stv_tie3(self):
d = [[5,2,1,4,3],
[3,5,2,1,4],
[2,3,1,5,4],
[2,3,5,1,4],
[5,4,1,3,2],
[3,2,5,4,1],
[1,4,3,5,2],
[5,2,3,1,4],
[3,5,4,2,1],
[1,4,3,2,5],
]
d = np.array(d)
w2, t2, o2 = irv.irv_stv(d)
def test_stv_tie4(self):
d = [[2,4,3,1,5]
,[5,2,1,4,3]
,[1,3,4,5,2]
,[1,3,5,2,4]
,[3,1,2,4,5]
,[2,5,3,1,4]
,[2,1,4,3,5]
,[3,1,5,2,4]
,[1,2,3,5,4]
,[3,2,5,4,1]]
d = np.array(d)
w2, t2, o2 = irv.irv_stv(d)
if __name__ == '__main__':
pass
# logging.basicConfig()
# logger = logging.getLogger('votesim.votemethods.irv')
# logger.setLevel(logging.DEBUG)
t = TestIRV()
# t.test_tie()
# unittest.main(exit=False)
# a = TestIRV()
# a.test_eliminate()
# a.test_irv2()
t.test_wiki()
# a.test_stv_tie4()
| [
"logging.getLogger",
"votesim.votemethods.tools.score2rank",
"votesim.votemethods.irv.irv_stv",
"votesim.votemethods.irv.irv_eliminate",
"votesim.votemethods.irv.rcv_reorder",
"numpy.sort",
"numpy.in1d",
"numpy.array",
"numpy.all",
"votesim.votemethods.irv.irv",
"numpy.random.RandomState"
] | [((137, 164), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (154, 164), False, 'import logging\n'), ((341, 358), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d', '(1)'], {}), '(d, 1)\n', (352, 358), False, 'from votesim.votemethods import irv\n'), ((393, 406), 'votesim.votemethods.irv.irv', 'irv.irv', (['d', '(1)'], {}), '(d, 1)\n', (400, 406), False, 'from votesim.votemethods import irv\n'), ((921, 934), 'votesim.votemethods.irv.irv', 'irv.irv', (['d', '(1)'], {}), '(d, 1)\n', (928, 934), False, 'from votesim.votemethods import irv\n'), ((1141, 1158), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d', '(2)'], {}), '(d, 2)\n', (1152, 1158), False, 'from votesim.votemethods import irv\n'), ((1486, 1497), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (1494, 1497), True, 'import numpy as np\n'), ((2088, 2109), 'numpy.array', 'np.array', (['first_round'], {}), '(first_round)\n', (2096, 2109), True, 'import numpy as np\n'), ((2133, 2155), 'numpy.array', 'np.array', (['second_round'], {}), '(second_round)\n', (2141, 2155), True, 'import numpy as np\n'), ((2268, 2288), 'votesim.votemethods.irv.irv_eliminate', 'irv.irv_eliminate', (['d'], {}), '(d)\n', (2285, 2288), False, 'from votesim.votemethods import irv\n'), ((2436, 2457), 'votesim.votemethods.irv.irv_eliminate', 'irv.irv_eliminate', (['d1'], {}), '(d1)\n', (2453, 2457), False, 'from votesim.votemethods import irv\n'), ((2597, 2618), 'votesim.votemethods.irv.irv_eliminate', 'irv.irv_eliminate', (['d2'], {}), '(d2)\n', (2614, 2618), False, 'from votesim.votemethods import irv\n'), ((2746, 2770), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d'], {'numwin': '(1)'}), '(d, numwin=1)\n', (2757, 2770), False, 'from votesim.votemethods import irv\n'), ((3021, 3032), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (3029, 3032), True, 'import numpy as np\n'), ((3060, 3077), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d', '(2)'], {}), '(d, 2)\n', (3071, 3077), False, 'from votesim.votemethods import irv\n'), ((3367, 3378), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (3375, 3378), True, 'import numpy as np\n'), ((3391, 3409), 'votesim.votemethods.irv.rcv_reorder', 'irv.rcv_reorder', (['a'], {}), '(a)\n', (3406, 3409), False, 'from votesim.votemethods import irv\n'), ((3591, 3608), 'numpy.array', 'np.array', (['correct'], {}), '(correct)\n', (3599, 3608), True, 'import numpy as np\n'), ((3636, 3656), 'numpy.all', 'np.all', (['(correct == b)'], {}), '(correct == b)\n', (3642, 3656), True, 'import numpy as np\n'), ((4130, 4141), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (4138, 4141), True, 'import numpy as np\n'), ((4174, 4191), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d', '(1)'], {}), '(d, 1)\n', (4185, 4191), False, 'from votesim.votemethods import irv\n'), ((4518, 4543), 'numpy.array', 'np.array', (['correct_history'], {}), '(correct_history)\n', (4526, 4543), True, 'import numpy as np\n'), ((4835, 4858), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (4856, 4858), True, 'import numpy as np\n'), ((6544, 6555), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (6552, 6555), True, 'import numpy as np\n'), ((6577, 6587), 'votesim.votemethods.irv.irv', 'irv.irv', (['d'], {}), '(d)\n', (6584, 6587), False, 'from votesim.votemethods import irv\n'), ((6913, 6924), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (6921, 6924), True, 'import numpy as np\n'), ((6946, 6960), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d'], {}), '(d)\n', (6957, 6960), False, 'from votesim.votemethods import irv\n'), ((7274, 7285), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (7282, 7285), True, 'import numpy as np\n'), ((7307, 7321), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['d'], {}), '(d)\n', (7318, 7321), False, 'from votesim.votemethods import irv\n'), ((2362, 2387), 'numpy.all', 'np.all', (['(first_round == d1)'], {}), '(first_round == d1)\n', (2368, 2387), True, 'import numpy as np\n'), ((2531, 2557), 'numpy.all', 'np.all', (['(second_round == d2)'], {}), '(second_round == d2)\n', (2537, 2557), True, 'import numpy as np\n'), ((2692, 2717), 'numpy.all', 'np.all', (['(third_round == d3)'], {}), '(third_round == d3)\n', (2698, 2717), True, 'import numpy as np\n'), ((4581, 4615), 'numpy.all', 'np.all', (['(correct_history == history)'], {}), '(correct_history == history)\n', (4587, 4615), True, 'import numpy as np\n'), ((4982, 5027), 'votesim.votemethods.tools.score2rank', 'votesim.votemethods.tools.score2rank', (['ratings'], {}), '(ratings)\n', (5018, 5027), False, 'import votesim\n'), ((5078, 5096), 'votesim.votemethods.irv.irv_stv', 'irv.irv_stv', (['ranks'], {}), '(ranks)\n', (5089, 5096), False, 'from votesim.votemethods import irv\n'), ((5134, 5148), 'votesim.votemethods.irv.irv', 'irv.irv', (['ranks'], {}), '(ranks)\n', (5141, 5148), False, 'from votesim.votemethods import irv\n'), ((5172, 5183), 'numpy.sort', 'np.sort', (['w1'], {}), '(w1)\n', (5179, 5183), True, 'import numpy as np\n'), ((5201, 5212), 'numpy.sort', 'np.sort', (['w2'], {}), '(w2)\n', (5208, 5212), True, 'import numpy as np\n'), ((5230, 5241), 'numpy.sort', 'np.sort', (['t1'], {}), '(t1)\n', (5237, 5241), True, 'import numpy as np\n'), ((5259, 5270), 'numpy.sort', 'np.sort', (['t2'], {}), '(t2)\n', (5266, 5270), True, 'import numpy as np\n'), ((684, 705), 'numpy.in1d', 'np.in1d', (['ties1', 'ties2'], {}), '(ties1, ties2)\n', (691, 705), True, 'import numpy as np\n'), ((5364, 5380), 'numpy.all', 'np.all', (['(w1 == w2)'], {}), '(w1 == w2)\n', (5370, 5380), True, 'import numpy as np\n'), ((5383, 5399), 'numpy.all', 'np.all', (['(t1 == t2)'], {}), '(t1 == t2)\n', (5389, 5399), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 16/1/2019 10:26 AM
# @Description :
# @Author : <NAME>
# @Email : <EMAIL>
# @File : loss_utils.py
import tensorflow as tf
import os
import sys
sys.path.append(os.path.dirname(os.getcwd()))
from Common import pc_util
sys.path.append(os.path.join(os.getcwd(),"tf_ops/sampling"))
sys.path.append(os.path.join(os.getcwd(),"tf_ops/nn_distance"))
sys.path.append(os.path.join(os.getcwd(),"tf_ops/approxmatch"))
sys.path.append(os.path.join(os.getcwd(),"tf_ops/grouping"))
sys.path.append(os.path.join(os.getcwd(),"tf_ops/interpolation"))
import tf_nndistance
import tf_approxmatch
from tf_sampling import gather_point, farthest_point_sample
from tf_grouping import query_ball_point, group_point, knn_point,knn_point_2
#from tf_ops.nn_distance import tf_nndistance
#from tf_ops.approxmatch import tf_approxmatch
# from tf_ops.grouping.tf_grouping import query_ball_point, group_point, knn_point,knn_point_2
# from tf_ops.sampling.tf_sampling import gather_point, farthest_point_sample
import numpy as np
import math
def pc_distance(pcd1,pcd2,dis_type='EMD',radius=1):
if dis_type == 'CD':
return chamfer(pcd1,pcd2,radius=radius)
else:
return earth_mover(pcd1,pcd2,radius=radius)
def classify_loss(pre_label,label):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pre_label, labels=label)
classify_loss = tf.reduce_mean(loss)
return classify_loss
def chamfer(pred, gt, radius=1.0, forward_weight=1.0, threshold=None, return_hd=False):
"""
pred: BxNxC,
label: BxN,
forward_weight: relative weight for forward_distance
"""
dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
if threshold is not None:
forward_threshold = tf.reduce_mean(dists_forward, keepdims=True, axis=1) * threshold
backward_threshold = tf.reduce_mean(dists_backward, keepdims=True, axis=1) * threshold
# only care about distance within threshold (ignore strong outliers)
dists_forward = tf.where(dists_forward < forward_threshold, dists_forward, tf.zeros_like(dists_forward))
dists_backward = tf.where(dists_backward < backward_threshold, dists_backward, tf.zeros_like(dists_backward))
# dists_forward is for each element in gt, the closest distance to this element
dists_forward = tf.reduce_mean(dists_forward, axis=1)
dists_backward = tf.reduce_mean(dists_backward, axis=1)
CD_dist = forward_weight * dists_forward + dists_backward
CD_dist_norm = CD_dist/radius
cd_loss = tf.reduce_mean(CD_dist_norm)
return cd_loss
def hausdorff_loss(pred, gt, radius=1.0, forward_weight=1.0, threshold=None):
"""
pred: BxNxC,
label: BxN,
forward_weight: relative weight for forward_distance
"""
dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
# only care about distance within threshold (ignore strong outliers)
if threshold is not None:
dists_forward = tf.where(dists_forward < threshold, dists_forward, tf.zeros_like(dists_forward))
dists_backward = tf.where(dists_backward < threshold, dists_backward, tf.zeros_like(dists_backward))
# dists_forward is for each element in gt, the closest distance to this element
dists_forward = tf.reduce_max(dists_forward, axis=1)
dists_backward = tf.reduce_max(dists_backward, axis=1)
CD_dist = forward_weight * dists_forward + dists_backward
CD_dist_norm = CD_dist/radius
cd_loss = tf.reduce_max(CD_dist_norm)
return cd_loss
def get_Geometric_Loss(predictedPts, targetpoints, return_all=False):
# calculate shape loss
square_dist = pairwise_l2_norm2_batch(targetpoints, predictedPts)
dist = tf.sqrt(square_dist)
minRow = tf.reduce_min(dist, axis=2)
minCol = tf.reduce_min(dist, axis=1)
shapeLoss = tf.reduce_mean(minRow) + tf.reduce_mean(minCol)
densityWeight = 1.0
# calculate density loss
square_dist2 = pairwise_l2_norm2_batch(targetpoints, targetpoints)
dist2 = tf.sqrt(square_dist2)
nnk = 8
knndis = tf.nn.top_k(tf.negative(dist), k=nnk)
knndis2 = tf.nn.top_k(tf.negative(dist2), k=nnk)
densityLoss = tf.reduce_mean(tf.abs(knndis.values - knndis2.values))
gt_offsets = knndis2.values
gt_offsets_norm = tf.reduce_sum(gt_offsets**2,keep_dims=True)
gt_offsets_ = gt_offsets / (gt_offsets_norm + 1e-8)
pt_offsets = knndis.values
pt_offsets_norm = tf.reduce_sum(pt_offsets**2,keep_dims=True)
pt_offsets_ = pt_offsets / (pt_offsets_norm + 1e-8)
direction_diff = tf.reduce_sum(gt_offsets_ * pt_offsets_) # (N)
# gt_offsets_norm = torch.norm(gt_offsets, p=2, dim=1) # (N), float
# gt_offsets_ = gt_offsets / (gt_offsets_norm.unsqueeze(-1) + 1e-8)
# pt_offsets_norm = torch.norm(pt_offsets, p=2, dim=1)
# pt_offsets_ = pt_offsets / (pt_offsets_norm.unsqueeze(-1) + 1e-8)
# direction_diff = - (gt_offsets_ * pt_offsets_).sum(-1) # (N)
# offset_dir_loss = torch.sum(direction_diff * valid) / (torch.sum(valid) + 1e-6)
# neighbour1 = get_knn_neighbour(targetpoints, knndis.indices)
# cov1 = tf_covariance2(neighbour1)
# neighbour2 = get_knn_neighbour(predictedPts, knndis2.indices)
# cov2 = tf_covariance2(neighbour2)
# densityLoss_2 = tf.reduce_mean(tf.abs(cov1 - cov2))
# if return_all:
# densityLoss_3 = tf.reduce_mean(tf.abs(tf.reduce_sum(cov1, axis=-1) - tf.reduce_sum(cov2, axis=-1)))
return shapeLoss, densityLoss,direction_diff
def pairwise_distance(x, y, scope=None):
"""Compute pairwise distance of a point cloud.
Args:
x: tensor (batch_size, num_points, num_dims)
y: tensor (batch_size, num_points, num_dims)
Returns:
pairwise distance: (batch_size, num_points, num_points)
"""
with tf.op_scope([x, y], scope, 'pairwise_l2_norm2_batch'):
y_T = tf.transpose(y, perm=[0, 2, 1])
x_y = -2 * tf.matmul(x, y_T)
x_square = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
y_square = tf.reduce_sum(tf.square(y), axis=-1, keep_dims=True)
y_square_T = tf.transpose(y_square, perm=[0, 2, 1])
return x_square + x_y + y_square_T
def pairwise_l2_norm2_batch(x, y, scope=None, num=2048):
with tf.op_scope([x, y], scope, 'pairwise_l2_norm2_batch'):
nump_x = tf.shape(x)[1]
nump_y = tf.shape(y)[1]
xx = tf.expand_dims(x, -1)
xx = tf.tile(xx, tf.stack([1, 1, 1, nump_y]))
yy = tf.expand_dims(y, -1)
yy = tf.tile(yy, tf.stack([1, 1, 1, nump_x]))
yy = tf.transpose(yy, perm=[0, 3, 2, 1])
diff = tf.subtract(xx, yy)
square_diff = tf.square(diff)
square_dist = tf.reduce_sum(square_diff, 2)
return square_dist
def earth_mover(pcd1, pcd2,radius=1.0):
assert pcd1.shape[1] == pcd2.shape[1]
num_points = tf.cast(pcd1.shape[1], tf.float32)
match = tf_approxmatch.approx_match(pcd1, pcd2)
cost = tf_approxmatch.match_cost(pcd1, pcd2, match)
cost = cost/radius
return tf.reduce_mean(cost / num_points)
def py_uniform_loss(points,idx,pts_cn,radius):
#print(type(idx))
B,N,C = points.shape
_,npoint,nsample = idx.shape
uniform_vals = []
for i in range(B):
point = points[i]
for j in range(npoint):
number = pts_cn[i,j]
coverage = np.square(number - nsample) / nsample
if number<5:
uniform_vals.append(coverage)
continue
_idx = idx[i, j, :number]
disk_point = point[_idx]
if disk_point.shape[0]<0:
pair_dis = pc_util.get_pairwise_distance(disk_point)#(batch_size, num_points, num_points)
nan_valid = np.where(pair_dis<1e-7)
pair_dis[nan_valid]=0
pair_dis = np.squeeze(pair_dis, axis=0)
pair_dis = np.sort(pair_dis, axis=1)
shortest_dis = np.sqrt(pair_dis[:, 1])
else:
shortest_dis = pc_util.get_knn_dis(disk_point,disk_point,2)
shortest_dis = shortest_dis[:,1]
disk_area = math.pi * (radius ** 2) / disk_point.shape[0]
#expect_d = math.sqrt(disk_area)
expect_d = np.sqrt(2 * disk_area / 1.732) # using hexagon
dis = np.square(shortest_dis - expect_d) / expect_d
uniform_val = coverage * np.mean(dis)
uniform_vals.append(uniform_val)
uniform_dis = np.array(uniform_vals).astype(np.float32)
uniform_dis = np.mean(uniform_dis)
return uniform_dis
#whole version, slower
def get_uniform_loss2(pcd, percentages=[0.002,0.004,0.006,0.008,0.010,0.012,0.015], radius=1.0):
B,N,C = pcd.get_shape().as_list()
npoint = int(N * 0.05)
loss=[]
for p in percentages:
nsample = int(N*p)
r = math.sqrt(p*radius)
#print(npoint,nsample)
new_xyz = gather_point(pcd, farthest_point_sample(npoint, pcd)) # (batch_size, npoint, 3)
idx, pts_cnt = query_ball_point(r, nsample, pcd, new_xyz)#(batch_size, npoint, nsample)
uniform_val = tf.py_func(py_uniform_loss, [pcd, idx, pts_cnt, r], tf.float32)
loss.append(uniform_val*math.sqrt(p*100))
return tf.add_n(loss)/len(percentages)
#[0.004,0.006,0.008,0.010,0.012]
#[0.006,0.008,0.010,0.012,0.015]
#[0.010,0.012,0.015,0.02,0.025]
#simplfied version, faster
def get_uniform_loss(pcd, percentages=[0.004,0.006,0.008,0.010,0.012], radius=1.0):
B,N,C = pcd.get_shape().as_list()
npoint = int(N * 0.05)
loss=[]
for p in percentages:
nsample = int(N*p)
r = math.sqrt(p*radius)
disk_area = math.pi *(radius ** 2) * p/nsample
#print(npoint,nsample)
new_xyz = gather_point(pcd, farthest_point_sample(npoint, pcd)) # (batch_size, npoint, 3)
idx, pts_cnt = query_ball_point(r, nsample, pcd, new_xyz)#(batch_size, npoint, nsample)
#expect_len = tf.sqrt(2*disk_area/1.732)#using hexagon
expect_len = tf.sqrt(disk_area) # using square
grouped_pcd = group_point(pcd, idx)
grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)
var, _ = knn_point(2, grouped_pcd, grouped_pcd)
uniform_dis = -var[:, :, 1:]
uniform_dis = tf.sqrt(tf.abs(uniform_dis+1e-8))
uniform_dis = tf.reduce_mean(uniform_dis,axis=[-1])
uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
uniform_dis = tf.reshape(uniform_dis, [-1])
mean, variance = tf.nn.moments(uniform_dis, axes=0)
mean = mean*math.pow(p*100,2)
#nothing 4
loss.append(mean)
return tf.add_n(loss)/len(percentages)
def get_repulsion_loss(pred, nsample=20, radius=0.07, knn=False, use_l1=False, h=0.001):
if knn:
_, idx = knn_point_2(nsample, pred, pred)
pts_cnt = tf.constant(nsample, shape=(30, 1024))
else:
idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
tf.summary.histogram('smooth/unque_index', pts_cnt)
grouped_pred = group_point(pred, idx) # (batch_size, npoint, nsample, 3)
grouped_pred -= tf.expand_dims(pred, 2)
# get the uniform loss
if use_l1:
dists = tf.reduce_sum(tf.abs(grouped_pred), axis=-1)
else:
dists = tf.reduce_sum(grouped_pred ** 2, axis=-1)
val, idx = tf.nn.top_k(-dists, 5)
val = val[:, :, 1:] # remove the first one
if use_l1:
h = np.sqrt(h)*2
print(("h is ", h))
val = tf.maximum(0.0, h + val) # dd/np.sqrt(n)
repulsion_loss = tf.reduce_mean(val)
return repulsion_loss
##################################################################################
# Loss function
##################################################################################
def discriminator_loss(d_real, d_fake, gan_type='lsgan'):
real_loss = tf.reduce_mean(tf.square(d_real - 1.0))
fake_loss = tf.reduce_mean(tf.square(d_fake))
loss = 0.5*(real_loss + fake_loss)
return loss
def generator_loss(d_fake):
fake_loss = tf.reduce_mean(tf.square(d_fake - 1.0))
return fake_loss
def discriminator_loss_(D, input_real, input_fake, Ra=False, gan_type='lsgan'):
real = D(input_real)
fake = D(input_fake)
real_loss = tf.reduce_mean(tf.square(real - 1.0))
fake_loss = tf.reduce_mean(tf.square(fake))
loss = 0.5*(real_loss + fake_loss)
return loss
def generator_loss_(D,input_fake):
fake = D(input_fake)
fake_loss = tf.reduce_mean(tf.square(fake - 1.0))
return fake_loss
def L1_loss(x, y):
loss = tf.reduce_mean(tf.abs(x - y))
return loss
| [
"tensorflow.unstack",
"tensorflow.shape",
"numpy.sqrt",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"math.sqrt",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.negative",
"numpy.array",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tf_appr... | [((1310, 1388), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'pre_label', 'labels': 'label'}), '(logits=pre_label, labels=label)\n', (1356, 1388), True, 'import tensorflow as tf\n'), ((1409, 1429), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (1423, 1429), True, 'import tensorflow as tf\n'), ((1692, 1727), 'tf_nndistance.nn_distance', 'tf_nndistance.nn_distance', (['gt', 'pred'], {}), '(gt, pred)\n', (1717, 1727), False, 'import tf_nndistance\n'), ((2358, 2395), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dists_forward'], {'axis': '(1)'}), '(dists_forward, axis=1)\n', (2372, 2395), True, 'import tensorflow as tf\n'), ((2417, 2455), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dists_backward'], {'axis': '(1)'}), '(dists_backward, axis=1)\n', (2431, 2455), True, 'import tensorflow as tf\n'), ((2566, 2594), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['CD_dist_norm'], {}), '(CD_dist_norm)\n', (2580, 2594), True, 'import tensorflow as tf\n'), ((2842, 2877), 'tf_nndistance.nn_distance', 'tf_nndistance.nn_distance', (['gt', 'pred'], {}), '(gt, pred)\n', (2867, 2877), False, 'import tf_nndistance\n'), ((3299, 3335), 'tensorflow.reduce_max', 'tf.reduce_max', (['dists_forward'], {'axis': '(1)'}), '(dists_forward, axis=1)\n', (3312, 3335), True, 'import tensorflow as tf\n'), ((3357, 3394), 'tensorflow.reduce_max', 'tf.reduce_max', (['dists_backward'], {'axis': '(1)'}), '(dists_backward, axis=1)\n', (3370, 3394), True, 'import tensorflow as tf\n'), ((3505, 3532), 'tensorflow.reduce_max', 'tf.reduce_max', (['CD_dist_norm'], {}), '(CD_dist_norm)\n', (3518, 3532), True, 'import tensorflow as tf\n'), ((3731, 3751), 'tensorflow.sqrt', 'tf.sqrt', (['square_dist'], {}), '(square_dist)\n', (3738, 3751), True, 'import tensorflow as tf\n'), ((3765, 3792), 'tensorflow.reduce_min', 'tf.reduce_min', (['dist'], {'axis': '(2)'}), '(dist, axis=2)\n', (3778, 3792), True, 'import tensorflow as tf\n'), ((3806, 3833), 'tensorflow.reduce_min', 'tf.reduce_min', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (3819, 3833), True, 'import tensorflow as tf\n'), ((4034, 4055), 'tensorflow.sqrt', 'tf.sqrt', (['square_dist2'], {}), '(square_dist2)\n', (4041, 4055), True, 'import tensorflow as tf\n'), ((4301, 4347), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gt_offsets ** 2)'], {'keep_dims': '(True)'}), '(gt_offsets ** 2, keep_dims=True)\n', (4314, 4347), True, 'import tensorflow as tf\n'), ((4455, 4501), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(pt_offsets ** 2)'], {'keep_dims': '(True)'}), '(pt_offsets ** 2, keep_dims=True)\n', (4468, 4501), True, 'import tensorflow as tf\n'), ((4576, 4616), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gt_offsets_ * pt_offsets_)'], {}), '(gt_offsets_ * pt_offsets_)\n', (4589, 4616), True, 'import tensorflow as tf\n'), ((6868, 6902), 'tensorflow.cast', 'tf.cast', (['pcd1.shape[1]', 'tf.float32'], {}), '(pcd1.shape[1], tf.float32)\n', (6875, 6902), True, 'import tensorflow as tf\n'), ((6915, 6954), 'tf_approxmatch.approx_match', 'tf_approxmatch.approx_match', (['pcd1', 'pcd2'], {}), '(pcd1, pcd2)\n', (6942, 6954), False, 'import tf_approxmatch\n'), ((6966, 7010), 'tf_approxmatch.match_cost', 'tf_approxmatch.match_cost', (['pcd1', 'pcd2', 'match'], {}), '(pcd1, pcd2, match)\n', (6991, 7010), False, 'import tf_approxmatch\n'), ((7045, 7078), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(cost / num_points)'], {}), '(cost / num_points)\n', (7059, 7078), True, 'import tensorflow as tf\n'), ((8542, 8562), 'numpy.mean', 'np.mean', (['uniform_dis'], {}), '(uniform_dis)\n', (8549, 8562), True, 'import numpy as np\n'), ((10993, 11044), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""smooth/unque_index"""', 'pts_cnt'], {}), "('smooth/unque_index', pts_cnt)\n", (11013, 11044), True, 'import tensorflow as tf\n'), ((11065, 11087), 'tf_grouping.group_point', 'group_point', (['pred', 'idx'], {}), '(pred, idx)\n', (11076, 11087), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((11144, 11167), 'tensorflow.expand_dims', 'tf.expand_dims', (['pred', '(2)'], {}), '(pred, 2)\n', (11158, 11167), True, 'import tensorflow as tf\n'), ((11356, 11378), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(-dists)', '(5)'], {}), '(-dists, 5)\n', (11367, 11378), True, 'import tensorflow as tf\n'), ((11503, 11527), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(h + val)'], {}), '(0.0, h + val)\n', (11513, 11527), True, 'import tensorflow as tf\n'), ((11566, 11585), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['val'], {}), '(val)\n', (11580, 11585), True, 'import tensorflow as tf\n'), ((235, 246), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (244, 246), False, 'import os\n'), ((307, 318), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (316, 318), False, 'import os\n'), ((368, 379), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (377, 379), False, 'import os\n'), ((432, 443), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (441, 443), False, 'import os\n'), ((496, 507), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (505, 507), False, 'import os\n'), ((557, 568), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (566, 568), False, 'import os\n'), ((3850, 3872), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['minRow'], {}), '(minRow)\n', (3864, 3872), True, 'import tensorflow as tf\n'), ((3875, 3897), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['minCol'], {}), '(minCol)\n', (3889, 3897), True, 'import tensorflow as tf\n'), ((4094, 4111), 'tensorflow.negative', 'tf.negative', (['dist'], {}), '(dist)\n', (4105, 4111), True, 'import tensorflow as tf\n'), ((4146, 4164), 'tensorflow.negative', 'tf.negative', (['dist2'], {}), '(dist2)\n', (4157, 4164), True, 'import tensorflow as tf\n'), ((4206, 4244), 'tensorflow.abs', 'tf.abs', (['(knndis.values - knndis2.values)'], {}), '(knndis.values - knndis2.values)\n', (4212, 4244), True, 'import tensorflow as tf\n'), ((5812, 5865), 'tensorflow.op_scope', 'tf.op_scope', (['[x, y]', 'scope', '"""pairwise_l2_norm2_batch"""'], {}), "([x, y], scope, 'pairwise_l2_norm2_batch')\n", (5823, 5865), True, 'import tensorflow as tf\n'), ((5881, 5912), 'tensorflow.transpose', 'tf.transpose', (['y'], {'perm': '[0, 2, 1]'}), '(y, perm=[0, 2, 1])\n', (5893, 5912), True, 'import tensorflow as tf\n'), ((6116, 6154), 'tensorflow.transpose', 'tf.transpose', (['y_square'], {'perm': '[0, 2, 1]'}), '(y_square, perm=[0, 2, 1])\n', (6128, 6154), True, 'import tensorflow as tf\n'), ((6265, 6318), 'tensorflow.op_scope', 'tf.op_scope', (['[x, y]', 'scope', '"""pairwise_l2_norm2_batch"""'], {}), "([x, y], scope, 'pairwise_l2_norm2_batch')\n", (6276, 6318), True, 'import tensorflow as tf\n'), ((6398, 6419), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (6412, 6419), True, 'import tensorflow as tf\n'), ((6488, 6509), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(-1)'], {}), '(y, -1)\n', (6502, 6509), True, 'import tensorflow as tf\n'), ((6577, 6612), 'tensorflow.transpose', 'tf.transpose', (['yy'], {'perm': '[0, 3, 2, 1]'}), '(yy, perm=[0, 3, 2, 1])\n', (6589, 6612), True, 'import tensorflow as tf\n'), ((6629, 6648), 'tensorflow.subtract', 'tf.subtract', (['xx', 'yy'], {}), '(xx, yy)\n', (6640, 6648), True, 'import tensorflow as tf\n'), ((6671, 6686), 'tensorflow.square', 'tf.square', (['diff'], {}), '(diff)\n', (6680, 6686), True, 'import tensorflow as tf\n'), ((6710, 6739), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['square_diff', '(2)'], {}), '(square_diff, 2)\n', (6723, 6739), True, 'import tensorflow as tf\n'), ((8849, 8870), 'math.sqrt', 'math.sqrt', (['(p * radius)'], {}), '(p * radius)\n', (8858, 8870), False, 'import math\n'), ((9022, 9064), 'tf_grouping.query_ball_point', 'query_ball_point', (['r', 'nsample', 'pcd', 'new_xyz'], {}), '(r, nsample, pcd, new_xyz)\n', (9038, 9064), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((9118, 9181), 'tensorflow.py_func', 'tf.py_func', (['py_uniform_loss', '[pcd, idx, pts_cnt, r]', 'tf.float32'], {}), '(py_uniform_loss, [pcd, idx, pts_cnt, r], tf.float32)\n', (9128, 9181), True, 'import tensorflow as tf\n'), ((9244, 9258), 'tensorflow.add_n', 'tf.add_n', (['loss'], {}), '(loss)\n', (9252, 9258), True, 'import tensorflow as tf\n'), ((9629, 9650), 'math.sqrt', 'math.sqrt', (['(p * radius)'], {}), '(p * radius)\n', (9638, 9650), False, 'import math\n'), ((9857, 9899), 'tf_grouping.query_ball_point', 'query_ball_point', (['r', 'nsample', 'pcd', 'new_xyz'], {}), '(r, nsample, pcd, new_xyz)\n', (9873, 9899), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((10016, 10034), 'tensorflow.sqrt', 'tf.sqrt', (['disk_area'], {}), '(disk_area)\n', (10023, 10034), True, 'import tensorflow as tf\n'), ((10074, 10095), 'tf_grouping.group_point', 'group_point', (['pcd', 'idx'], {}), '(pcd, idx)\n', (10085, 10095), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((10187, 10225), 'tf_grouping.knn_point', 'knn_point', (['(2)', 'grouped_pcd', 'grouped_pcd'], {}), '(2, grouped_pcd, grouped_pcd)\n', (10196, 10225), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((10341, 10379), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['uniform_dis'], {'axis': '[-1]'}), '(uniform_dis, axis=[-1])\n', (10355, 10379), True, 'import tensorflow as tf\n'), ((10481, 10510), 'tensorflow.reshape', 'tf.reshape', (['uniform_dis', '[-1]'], {}), '(uniform_dis, [-1])\n', (10491, 10510), True, 'import tensorflow as tf\n'), ((10537, 10571), 'tensorflow.nn.moments', 'tf.nn.moments', (['uniform_dis'], {'axes': '(0)'}), '(uniform_dis, axes=0)\n', (10550, 10571), True, 'import tensorflow as tf\n'), ((10666, 10680), 'tensorflow.add_n', 'tf.add_n', (['loss'], {}), '(loss)\n', (10674, 10680), True, 'import tensorflow as tf\n'), ((10820, 10852), 'tf_grouping.knn_point_2', 'knn_point_2', (['nsample', 'pred', 'pred'], {}), '(nsample, pred, pred)\n', (10831, 10852), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((10871, 10909), 'tensorflow.constant', 'tf.constant', (['nsample'], {'shape': '(30, 1024)'}), '(nsample, shape=(30, 1024))\n', (10882, 10909), True, 'import tensorflow as tf\n'), ((10943, 10988), 'tf_grouping.query_ball_point', 'query_ball_point', (['radius', 'nsample', 'pred', 'pred'], {}), '(radius, nsample, pred, pred)\n', (10959, 10988), False, 'from tf_grouping import query_ball_point, group_point, knn_point, knn_point_2\n'), ((11298, 11339), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(grouped_pred ** 2)'], {'axis': '(-1)'}), '(grouped_pred ** 2, axis=-1)\n', (11311, 11339), True, 'import tensorflow as tf\n'), ((11885, 11908), 'tensorflow.square', 'tf.square', (['(d_real - 1.0)'], {}), '(d_real - 1.0)\n', (11894, 11908), True, 'import tensorflow as tf\n'), ((11941, 11958), 'tensorflow.square', 'tf.square', (['d_fake'], {}), '(d_fake)\n', (11950, 11958), True, 'import tensorflow as tf\n'), ((12077, 12100), 'tensorflow.square', 'tf.square', (['(d_fake - 1.0)'], {}), '(d_fake - 1.0)\n', (12086, 12100), True, 'import tensorflow as tf\n'), ((12286, 12307), 'tensorflow.square', 'tf.square', (['(real - 1.0)'], {}), '(real - 1.0)\n', (12295, 12307), True, 'import tensorflow as tf\n'), ((12340, 12355), 'tensorflow.square', 'tf.square', (['fake'], {}), '(fake)\n', (12349, 12355), True, 'import tensorflow as tf\n'), ((12506, 12527), 'tensorflow.square', 'tf.square', (['(fake - 1.0)'], {}), '(fake - 1.0)\n', (12515, 12527), True, 'import tensorflow as tf\n'), ((12597, 12610), 'tensorflow.abs', 'tf.abs', (['(x - y)'], {}), '(x - y)\n', (12603, 12610), True, 'import tensorflow as tf\n'), ((1786, 1838), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dists_forward'], {'keepdims': '(True)', 'axis': '(1)'}), '(dists_forward, keepdims=True, axis=1)\n', (1800, 1838), True, 'import tensorflow as tf\n'), ((1880, 1933), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dists_backward'], {'keepdims': '(True)', 'axis': '(1)'}), '(dists_backward, keepdims=True, axis=1)\n', (1894, 1933), True, 'import tensorflow as tf\n'), ((2106, 2134), 'tensorflow.zeros_like', 'tf.zeros_like', (['dists_forward'], {}), '(dists_forward)\n', (2119, 2134), True, 'import tensorflow as tf\n'), ((2223, 2252), 'tensorflow.zeros_like', 'tf.zeros_like', (['dists_backward'], {}), '(dists_backward)\n', (2236, 2252), True, 'import tensorflow as tf\n'), ((3056, 3084), 'tensorflow.zeros_like', 'tf.zeros_like', (['dists_forward'], {}), '(dists_forward)\n', (3069, 3084), True, 'import tensorflow as tf\n'), ((3164, 3193), 'tensorflow.zeros_like', 'tf.zeros_like', (['dists_backward'], {}), '(dists_backward)\n', (3177, 3193), True, 'import tensorflow as tf\n'), ((5932, 5949), 'tensorflow.matmul', 'tf.matmul', (['x', 'y_T'], {}), '(x, y_T)\n', (5941, 5949), True, 'import tensorflow as tf\n'), ((5984, 5996), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (5993, 5996), True, 'import tensorflow as tf\n'), ((6056, 6068), 'tensorflow.square', 'tf.square', (['y'], {}), '(y)\n', (6065, 6068), True, 'import tensorflow as tf\n'), ((6337, 6348), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (6345, 6348), True, 'import tensorflow as tf\n'), ((6369, 6380), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (6377, 6380), True, 'import tensorflow as tf\n'), ((6445, 6472), 'tensorflow.stack', 'tf.stack', (['[1, 1, 1, nump_y]'], {}), '([1, 1, 1, nump_y])\n', (6453, 6472), True, 'import tensorflow as tf\n'), ((6535, 6562), 'tensorflow.stack', 'tf.stack', (['[1, 1, 1, nump_x]'], {}), '([1, 1, 1, nump_x])\n', (6543, 6562), True, 'import tensorflow as tf\n'), ((8254, 8284), 'numpy.sqrt', 'np.sqrt', (['(2 * disk_area / 1.732)'], {}), '(2 * disk_area / 1.732)\n', (8261, 8284), True, 'import numpy as np\n'), ((8481, 8503), 'numpy.array', 'np.array', (['uniform_vals'], {}), '(uniform_vals)\n', (8489, 8503), True, 'import numpy as np\n'), ((8936, 8970), 'tf_sampling.farthest_point_sample', 'farthest_point_sample', (['npoint', 'pcd'], {}), '(npoint, pcd)\n', (8957, 8970), False, 'from tf_sampling import gather_point, farthest_point_sample\n'), ((9771, 9805), 'tf_sampling.farthest_point_sample', 'farthest_point_sample', (['npoint', 'pcd'], {}), '(npoint, pcd)\n', (9792, 9805), False, 'from tf_sampling import gather_point, farthest_point_sample\n'), ((10128, 10159), 'tensorflow.unstack', 'tf.unstack', (['grouped_pcd'], {'axis': '(1)'}), '(grouped_pcd, axis=1)\n', (10138, 10159), True, 'import tensorflow as tf\n'), ((10293, 10320), 'tensorflow.abs', 'tf.abs', (['(uniform_dis + 1e-08)'], {}), '(uniform_dis + 1e-08)\n', (10299, 10320), True, 'import tensorflow as tf\n'), ((10401, 10436), 'tensorflow.square', 'tf.square', (['(uniform_dis - expect_len)'], {}), '(uniform_dis - expect_len)\n', (10410, 10436), True, 'import tensorflow as tf\n'), ((10592, 10612), 'math.pow', 'math.pow', (['(p * 100)', '(2)'], {}), '(p * 100, 2)\n', (10600, 10612), False, 'import math\n'), ((11241, 11261), 'tensorflow.abs', 'tf.abs', (['grouped_pred'], {}), '(grouped_pred)\n', (11247, 11261), True, 'import tensorflow as tf\n'), ((11455, 11465), 'numpy.sqrt', 'np.sqrt', (['h'], {}), '(h)\n', (11462, 11465), True, 'import numpy as np\n'), ((7366, 7393), 'numpy.square', 'np.square', (['(number - nsample)'], {}), '(number - nsample)\n', (7375, 7393), True, 'import numpy as np\n'), ((7640, 7681), 'Common.pc_util.get_pairwise_distance', 'pc_util.get_pairwise_distance', (['disk_point'], {}), '(disk_point)\n', (7669, 7681), False, 'from Common import pc_util\n'), ((7747, 7773), 'numpy.where', 'np.where', (['(pair_dis < 1e-07)'], {}), '(pair_dis < 1e-07)\n', (7755, 7773), True, 'import numpy as np\n'), ((7836, 7864), 'numpy.squeeze', 'np.squeeze', (['pair_dis'], {'axis': '(0)'}), '(pair_dis, axis=0)\n', (7846, 7864), True, 'import numpy as np\n'), ((7892, 7917), 'numpy.sort', 'np.sort', (['pair_dis'], {'axis': '(1)'}), '(pair_dis, axis=1)\n', (7899, 7917), True, 'import numpy as np\n'), ((7949, 7972), 'numpy.sqrt', 'np.sqrt', (['pair_dis[:, 1]'], {}), '(pair_dis[:, 1])\n', (7956, 7972), True, 'import numpy as np\n'), ((8022, 8068), 'Common.pc_util.get_knn_dis', 'pc_util.get_knn_dis', (['disk_point', 'disk_point', '(2)'], {}), '(disk_point, disk_point, 2)\n', (8041, 8068), False, 'from Common import pc_util\n'), ((8320, 8354), 'numpy.square', 'np.square', (['(shortest_dis - expect_d)'], {}), '(shortest_dis - expect_d)\n', (8329, 8354), True, 'import numpy as np\n'), ((8403, 8415), 'numpy.mean', 'np.mean', (['dis'], {}), '(dis)\n', (8410, 8415), True, 'import numpy as np\n'), ((9215, 9233), 'math.sqrt', 'math.sqrt', (['(p * 100)'], {}), '(p * 100)\n', (9224, 9233), False, 'import math\n')] |
# tests.dataset
# Helper functions for tests that utilize downloadable datasets.
#
# Author: <NAME> <<EMAIL>>
# Created: Thu Oct 13 19:55:53 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: dataset.py [8f4de77] <EMAIL> $
"""
Helper functions for tests that utilize downloadable datasets.
"""
##########################################################################
## Imports
##########################################################################
import os
import sys
import shutil
import hashlib
import zipfile
import numpy as np
from sklearn.datasets.base import Bunch
try:
import requests
except ImportError:
requests = None
##########################################################################
## Fixtures
##########################################################################
DATASETS = {
'concrete': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/concrete.zip',
'signature': 'b9ea5f26a7bb272a040e2f1a993b26babbf8dc4a04ab8198bb315ca66d71f10d',
'type': 'numpy',
},
'energy': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/energy.zip',
'signature': '19fb86f3bcdde208eed46944172cb643ef6a7d58da103fb568fae43205ed89d3',
'type': 'numpy',
},
'credit': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/credit.zip',
'signature': '4a91339c69f55e18f3f48004328fbcb7868070b618208fed099920427b084e5e',
'type': 'numpy',
},
'occupancy': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/occupancy.zip',
'signature': '429cfe376dc9929a1fa528da89f0e1626e34e19695f3f555d8954025bbc522b8',
'type': 'numpy',
},
'mushroom': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/mushroom.zip',
'signature': '884c43cb70db35d211c67b1cf6a3683b2b4569393d2789d5c07840da4dc85ba8',
'type': 'numpy',
},
'hobbies': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/hobbies.zip',
'signature': '415c8f68df1486d5d84a1d1757a5aa3035aef5ad63ede5013c261d622fbd29d8',
'type': 'corpus',
},
'game': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/game.zip',
'signature': 'b1bd85789a014a898daa34cb5f89ceab6d2cd6488a2e572187e34aa4ec21a43b',
'type': 'numpy',
},
'bikeshare': {
'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/bikeshare.zip',
'signature': 'a9b440f65549746dff680c92ff8bdca3c7265f09db1cf09e708e6e26fc8aba44',
'type': 'numpy',
},
}
FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures")
##########################################################################
## Test Cases that Require Download
##########################################################################
class DatasetMixin(object):
"""
Mixin for unittest.TestCase class to download datasets from S3 for
testing real world machine learning visual diagnostics.
"""
@staticmethod
def sha256sum(path, blocksize=65536):
"""
Computes the SHA256 signature of a file to verify that the file has not
been modified in transit and that it is the correct version of the data.
"""
sig = hashlib.sha256()
with open(path, 'rb') as f:
buf = f.read(blocksize)
while len(buf) > 0:
sig.update(buf)
buf = f.read(blocksize)
return sig.hexdigest()
@staticmethod
def download_data(url, path=FIXTURES, signature=None, extract=True):
"""
Downloads the zipped data set specified at the given URL, saving it to
the output path specified. This function verifies the download with the
given signature (if supplied) and extracts the zip file if requested.
"""
if requests is None:
raise ImportError(
"The requests module is required to download data --\n"
"please install it with pip install requests."
)
# Create the output directory if it does not exist
if not os.path.exists(path):
os.mkdir(path)
# Get the name of the file from the URL
name = os.path.basename(url)
dlpath = os.path.join(path, name)
# Fetch the response in a streaming fashion and write it to disk.
response = requests.get(url, stream=True)
with open(dlpath, 'wb') as f:
for chunk in response.iter_content(65536):
f.write(chunk)
# If verify, compare the signature
if signature is not None:
dlsignature = DatasetMixin.sha256sum(dlpath)
if signature != dlsignature:
raise ValueError(
"Download signature does not match hardcoded signature!"
)
# If extract, extract the zipfile.
if extract:
zf = zipfile.ZipFile(dlpath)
zf.extractall(path)
@staticmethod
def download_all(path=FIXTURES, verify=True, extract=True):
"""
Downloads all the example datasets. If verify is True then compare the
download signature with the hardcoded signature. If extract is True then
extract the contents of the zipfile to the given path.
"""
for name, meta in DATASETS.items():
url = meta['url']
signature = meta['signature'] if verify else None
DatasetMixin.download_data(
url, path=path, signature=signature, extract=extract
)
@staticmethod
def remove_all(fixtures=FIXTURES):
"""
Removes all the downloaded datasets as clean up
"""
shutil.rmtree(fixtures)
@staticmethod
def load_data(name, fixtures=FIXTURES):
"""
Loads the numpy matrix from the specified data set, downloads it if
it hasn't already been downloaded.
"""
# Just in case this is a corpus data set, then do that instead.
if DATASETS[name]['type'] == 'corpus':
return DatasetMixin.load_corpus(name, fixtures)
path = os.path.join(fixtures, name, "{}.csv".format(name))
if not os.path.exists(path):
DatasetMixin.download_all(path=fixtures)
return np.genfromtxt(path, dtype=float, delimiter=',', names=True)
@staticmethod
def load_corpus(name, fixtures=FIXTURES):
"""
Loads a sklearn Bunch with the corpus and downloads it if it hasn't
already been downloaded. Used to test text visualizers.
"""
path = os.path.join(fixtures, name)
if not os.path.exists(path):
DatasetMixin.download_all(path=fixtures)
# Read the directories in the directory as the categories.
categories = [
cat for cat in os.listdir(path)
if os.path.isdir(os.path.join(path, cat))
]
files = [] # holds the file names relative to the root
data = [] # holds the text read from the file
target = [] # holds the string of the category
# Load the data from the files in the corpus
for cat in categories:
for name in os.listdir(os.path.join(path, cat)):
files.append(os.path.join(path, cat, name))
target.append(cat)
with open(os.path.join(path, cat, name), 'r') as f:
data.append(f.read())
# Return the data bunch for use similar to the newsgroups example
return Bunch(
categories=categories,
files=files,
data=data,
target=target,
)
| [
"os.path.exists",
"hashlib.sha256",
"os.listdir",
"zipfile.ZipFile",
"os.path.join",
"requests.get",
"os.path.dirname",
"os.path.basename",
"os.mkdir",
"shutil.rmtree",
"sklearn.datasets.base.Bunch",
"numpy.genfromtxt"
] | [((2665, 2690), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2680, 2690), False, 'import os\n'), ((3328, 3344), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (3342, 3344), False, 'import hashlib\n'), ((4303, 4324), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (4319, 4324), False, 'import os\n'), ((4342, 4366), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (4354, 4366), False, 'import os\n'), ((4461, 4491), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (4473, 4491), False, 'import requests\n'), ((5795, 5818), 'shutil.rmtree', 'shutil.rmtree', (['fixtures'], {}), '(fixtures)\n', (5808, 5818), False, 'import shutil\n'), ((6378, 6437), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'dtype': 'float', 'delimiter': '""","""', 'names': '(True)'}), "(path, dtype=float, delimiter=',', names=True)\n", (6391, 6437), True, 'import numpy as np\n'), ((6682, 6710), 'os.path.join', 'os.path.join', (['fixtures', 'name'], {}), '(fixtures, name)\n', (6694, 6710), False, 'import os\n'), ((7618, 7685), 'sklearn.datasets.base.Bunch', 'Bunch', ([], {'categories': 'categories', 'files': 'files', 'data': 'data', 'target': 'target'}), '(categories=categories, files=files, data=data, target=target)\n', (7623, 7685), False, 'from sklearn.datasets.base import Bunch\n'), ((4190, 4210), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4204, 4210), False, 'import os\n'), ((4224, 4238), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (4232, 4238), False, 'import os\n'), ((5002, 5025), 'zipfile.ZipFile', 'zipfile.ZipFile', (['dlpath'], {}), '(dlpath)\n', (5017, 5025), False, 'import zipfile\n'), ((6287, 6307), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6301, 6307), False, 'import os\n'), ((6726, 6746), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6740, 6746), False, 'import os\n'), ((6919, 6935), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (6929, 6935), False, 'import os\n'), ((7296, 7319), 'os.path.join', 'os.path.join', (['path', 'cat'], {}), '(path, cat)\n', (7308, 7319), False, 'import os\n'), ((6965, 6988), 'os.path.join', 'os.path.join', (['path', 'cat'], {}), '(path, cat)\n', (6977, 6988), False, 'import os\n'), ((7351, 7380), 'os.path.join', 'os.path.join', (['path', 'cat', 'name'], {}), '(path, cat, name)\n', (7363, 7380), False, 'import os\n'), ((7444, 7473), 'os.path.join', 'os.path.join', (['path', 'cat', 'name'], {}), '(path, cat, name)\n', (7456, 7473), False, 'import os\n')] |
from __future__ import division
from nose.tools import *
import numpy as np
import causalinference.causal as c
from utils import random_data
def test_est_propensity():
D = np.array([0, 0, 0, 1, 1, 1])
X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
Y = random_data(D_cur=D, X_cur=X)
causal = c.CausalModel(Y, D, X)
causal.est_propensity()
lin = [0, 1]
qua = []
coef = np.array([6.8066090, -0.0244874, -0.7524939])
loglike = -3.626517
fitted = np.array([0.6491366, 0.3117840, 0.2911631,
0.8086407, 0.3013733, 0.6379023])
se = np.array([8.5373779, 0.4595191, 0.8106499])
keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
assert_equal(causal.propensity['lin'], lin)
assert_equal(causal.propensity['qua'], qua)
assert np.allclose(causal.propensity['coef'], coef)
assert np.allclose(causal.propensity['loglike'], loglike)
assert np.allclose(causal.propensity['fitted'], fitted)
assert np.allclose(causal.propensity['se'], se)
assert_equal(set(causal.propensity.keys()), keys)
assert np.allclose(causal.raw_data['pscore'], fitted)
def test_est_propensity_s():
D = np.array([0, 0, 0, 1, 1, 1])
X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
Y = random_data(D_cur=D, X_cur=X)
causal = c.CausalModel(Y, D, X)
causal.est_propensity_s()
lin1 = [1]
qua1 = []
coef1 = np.array([6.5424027, -0.7392041])
loglike1 = -3.627939
fitted1 = np.array([0.6522105, 0.2995088, 0.2995088,
0.7970526, 0.2995088, 0.6522105])
se1 = np.array([6.8455179, 0.7641445])
keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
assert_equal(causal.propensity['lin'], lin1)
assert_equal(causal.propensity['qua'], qua1)
assert np.allclose(causal.propensity['coef'], coef1)
assert np.allclose(causal.propensity['loglike'], loglike1)
assert np.allclose(causal.propensity['fitted'], fitted1)
assert np.allclose(causal.propensity['se'], se1)
assert_equal(set(causal.propensity.keys()), keys)
assert np.allclose(causal.raw_data['pscore'], fitted1)
causal.est_propensity_s([0,1])
lin2 = [0, 1]
qua2 = []
coef2 = np.array([6.8066090, -0.0244874, -0.7524939])
loglike2 = -3.626517
fitted2 = np.array([0.6491366, 0.3117840, 0.2911631,
0.8086407, 0.3013733, 0.6379023])
se2 = np.array([8.5373779, 0.4595191, 0.8106499])
assert_equal(causal.propensity['lin'], lin2)
assert_equal(causal.propensity['qua'], qua2)
assert np.allclose(causal.propensity['coef'], coef2)
assert np.allclose(causal.propensity['loglike'], loglike2)
assert np.allclose(causal.propensity['fitted'], fitted2)
assert np.allclose(causal.propensity['se'], se2)
assert np.allclose(causal.raw_data['pscore'], fitted2)
def test_est_via_ols():
Y = np.array([52, 30, 5, 29, 12, 10, 44, 87])
D = np.array([0, 0, 0, 0, 1, 1, 1, 1])
X = np.array([[1, 42], [3, 32], [9, 7], [12, 86],
[5, 94], [4, 36], [2, 13], [6, 61]])
causal = c.CausalModel(Y, D, X)
adj1 = 0
causal.est_via_ols(adj1)
ate1 = 9.25
ate_se1 = 17.68253
keys1 = {'ate', 'ate_se'}
assert np.allclose(causal.estimates['ols']['ate'], ate1)
assert np.allclose(causal.estimates['ols']['ate_se'], ate_se1)
assert_equal(set(causal.estimates['ols'].keys()), keys1)
adj2 = 1
causal.est_via_ols(adj2)
ate2 = 3.654552
ate_se2 = 17.749993
keys2 = {'ate', 'ate_se'}
assert np.allclose(causal.estimates['ols']['ate'], ate2)
assert np.allclose(causal.estimates['ols']['ate_se'], ate_se2)
assert_equal(set(causal.estimates['ols'].keys()), keys2)
adj3 = 2
causal.est_via_ols(adj3)
ate3 = 30.59444
atc3 = 63.2095
att3 = -2.020611
ate_se3 = 19.91887865
atc_se3 = 29.92152
att_se3 = 11.8586
keys3 = {'ate', 'atc', 'att', 'ate_se', 'atc_se', 'att_se'}
assert np.allclose(causal.estimates['ols']['ate'], ate3)
assert np.allclose(causal.estimates['ols']['atc'], atc3)
assert np.allclose(causal.estimates['ols']['att'], att3)
assert np.allclose(causal.estimates['ols']['ate_se'], ate_se3)
assert np.allclose(causal.estimates['ols']['atc_se'], atc_se3)
assert np.allclose(causal.estimates['ols']['att_se'], att_se3)
assert_equal(set(causal.estimates['ols'].keys()), keys3)
def test_parse_lin_terms():
K1 = 4
lin1 = None
ans1 = []
assert_equal(c.parse_lin_terms(K1, lin1), ans1)
K2 = 2
lin2 = 'all'
ans2 = [0, 1]
assert_equal(c.parse_lin_terms(K2, lin2), ans2)
K3 = 2
lin3 = [1]
ans3 = [1]
assert_equal(c.parse_lin_terms(K3, lin3), ans3)
K4 = 2
lin4 = []
ans4 = []
assert_equal(c.parse_lin_terms(K4, lin4), ans4)
def test_parse_qua_terms():
K1 = 3
qua1 = None
ans1 = []
assert_equal(c.parse_qua_terms(K1, qua1), ans1)
K2 = 2
qua2 = 'all'
ans2 = [(0, 0), (0, 1), (1, 1)]
assert_equal(c.parse_qua_terms(K2, qua2), ans2)
K3 = 2
qua3 = [(0, 1)]
ans3 = [(0, 1)]
assert_equal(c.parse_qua_terms(K3, qua3), ans3)
K4 = 2
qua4 = []
ans4 = []
assert_equal(c.parse_qua_terms(K4, qua4), ans4)
def test_split_equal_bins():
pscore = np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.95])
blocks = 5
ans = [0, 0.2, 0.4, 0.6, 0.8, 1]
assert_equal(c.split_equal_bins(pscore, blocks), ans)
def test_sumlessthan():
g1 = np.array([3, 1, 2, 4, 3, 3])
sg1 = np.array([1, 2, 3, 3, 3, 4])
cs11 = np.array([1, 2, 3, 4, 5, 6])
csg1 = np.array([1, 3, 6, 9, 12, 16])
ans1 = np.array([5, 1, 2, 6, 5, 5])
ans2 = np.array([12, 1, 3, 16, 12, 12])
assert np.array_equal(c.sumlessthan(g1, sg1, cs11), ans1)
assert np.array_equal(c.sumlessthan(g1, sg1, csg1), ans2)
g2 = np.array([22, 4, 6, 4, 25, 5])
sg2 = np.array([4, 4, 5, 6, 22, 25])
cs12 = np.array([1, 2, 3, 4, 5, 6])
csg2 = np.array([4, 8, 13, 19, 41, 66])
ans3 = np.array([5, 2, 4, 2, 6, 3])
ans4 = np.array([41, 8, 19, 8, 66, 13])
assert np.array_equal(c.sumlessthan(g2, sg2, cs12), ans3)
assert np.array_equal(c.sumlessthan(g2, sg2, csg2), ans4)
def test_select_cutoff():
g1 = np.array([3, 1, 2, 4, 3, 3])
ans1 = 0
assert_equal(c.select_cutoff(g1), ans1)
g2 = np.array([22, 4, 6, 4, 25, 5])
ans2 = 0.2113248654
assert np.allclose(c.select_cutoff(g2), ans2)
def test_calc_tstat():
sample1 = np.array([1, 1, 2, 2, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 4, 4, 4, 5])
sample2 = np.array([5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2])
ans = 3.632233
assert np.allclose(c.calc_tstat(sample1, sample2), ans)
def test_calc_sample_sizes():
D1 = np.array([0, 1, 0, 1, 0, 1])
ans1 = (2, 1, 1, 2)
assert_equal(c.calc_sample_sizes(D1), ans1)
D2 = np.array([0, 1, 0, 1, 0])
ans2 = (1, 1, 2, 1)
assert_equal(c.calc_sample_sizes(D2), ans2)
D3 = np.array([1, 1, 1, 1, 1, 1])
ans3 = (0, 3, 0, 3)
assert_equal(c.calc_sample_sizes(D3), ans3)
D4 = np.array([0, 0, 0])
ans4 = (1, 0, 2, 0)
assert_equal(c.calc_sample_sizes(D4), ans4)
def test_select_blocks():
pscore1 = np.array([0.05, 0.06, 0.3, 0.4, 0.5, 0.6, 0.7, 0.95, 0.95])
D1 = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1])
logodds1 = np.log(pscore1 / (1-pscore1))
K1 = 1
ans1 = np.array([0.05, 0.5, 0.5, 0.95])
test1 = np.array(c.select_blocks(pscore1, logodds1, D1, K1, 0, 1))
assert np.allclose(test1, ans1)
pscore2 = np.array([0.05, 0.06, 0.3, 0.4, 0.5, 0.6, 0.7, 0.95, 0.95])
D2 = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1])
logodds2 = np.log(pscore1 / (1-pscore1))
K2 = 2
ans2 = np.array([0, 1])
test2 = np.array(c.select_blocks(pscore2, logodds2, D2, K2, 0, 1))
assert np.allclose(test2, ans2)
| [
"causalinference.causal.sumlessthan",
"causalinference.causal.select_cutoff",
"numpy.allclose",
"causalinference.causal.parse_qua_terms",
"numpy.log",
"numpy.array",
"utils.random_data",
"causalinference.causal.CausalModel",
"causalinference.causal.parse_lin_terms",
"causalinference.causal.split_e... | [((177, 205), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (185, 205), True, 'import numpy as np\n'), ((211, 272), 'numpy.array', 'np.array', (['[[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]]'], {}), '([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])\n', (219, 272), True, 'import numpy as np\n'), ((278, 307), 'utils.random_data', 'random_data', ([], {'D_cur': 'D', 'X_cur': 'X'}), '(D_cur=D, X_cur=X)\n', (289, 307), False, 'from utils import random_data\n'), ((318, 340), 'causalinference.causal.CausalModel', 'c.CausalModel', (['Y', 'D', 'X'], {}), '(Y, D, X)\n', (331, 340), True, 'import causalinference.causal as c\n'), ((399, 443), 'numpy.array', 'np.array', (['[6.806609, -0.0244874, -0.7524939]'], {}), '([6.806609, -0.0244874, -0.7524939])\n', (407, 443), True, 'import numpy as np\n'), ((476, 551), 'numpy.array', 'np.array', (['[0.6491366, 0.311784, 0.2911631, 0.8086407, 0.3013733, 0.6379023]'], {}), '([0.6491366, 0.311784, 0.2911631, 0.8086407, 0.3013733, 0.6379023])\n', (484, 551), True, 'import numpy as np\n'), ((579, 622), 'numpy.array', 'np.array', (['[8.5373779, 0.4595191, 0.8106499]'], {}), '([8.5373779, 0.4595191, 0.8106499])\n', (587, 622), True, 'import numpy as np\n'), ((781, 825), 'numpy.allclose', 'np.allclose', (["causal.propensity['coef']", 'coef'], {}), "(causal.propensity['coef'], coef)\n", (792, 825), True, 'import numpy as np\n'), ((834, 884), 'numpy.allclose', 'np.allclose', (["causal.propensity['loglike']", 'loglike'], {}), "(causal.propensity['loglike'], loglike)\n", (845, 884), True, 'import numpy as np\n'), ((893, 941), 'numpy.allclose', 'np.allclose', (["causal.propensity['fitted']", 'fitted'], {}), "(causal.propensity['fitted'], fitted)\n", (904, 941), True, 'import numpy as np\n'), ((950, 990), 'numpy.allclose', 'np.allclose', (["causal.propensity['se']", 'se'], {}), "(causal.propensity['se'], se)\n", (961, 990), True, 'import numpy as np\n'), ((1050, 1096), 'numpy.allclose', 'np.allclose', (["causal.raw_data['pscore']", 'fitted'], {}), "(causal.raw_data['pscore'], fitted)\n", (1061, 1096), True, 'import numpy as np\n'), ((1135, 1163), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (1143, 1163), True, 'import numpy as np\n'), ((1169, 1230), 'numpy.array', 'np.array', (['[[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]]'], {}), '([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])\n', (1177, 1230), True, 'import numpy as np\n'), ((1236, 1265), 'utils.random_data', 'random_data', ([], {'D_cur': 'D', 'X_cur': 'X'}), '(D_cur=D, X_cur=X)\n', (1247, 1265), False, 'from utils import random_data\n'), ((1276, 1298), 'causalinference.causal.CausalModel', 'c.CausalModel', (['Y', 'D', 'X'], {}), '(Y, D, X)\n', (1289, 1298), True, 'import causalinference.causal as c\n'), ((1359, 1392), 'numpy.array', 'np.array', (['[6.5424027, -0.7392041]'], {}), '([6.5424027, -0.7392041])\n', (1367, 1392), True, 'import numpy as np\n'), ((1426, 1502), 'numpy.array', 'np.array', (['[0.6522105, 0.2995088, 0.2995088, 0.7970526, 0.2995088, 0.6522105]'], {}), '([0.6522105, 0.2995088, 0.2995088, 0.7970526, 0.2995088, 0.6522105])\n', (1434, 1502), True, 'import numpy as np\n'), ((1530, 1562), 'numpy.array', 'np.array', (['[6.8455179, 0.7641445]'], {}), '([6.8455179, 0.7641445])\n', (1538, 1562), True, 'import numpy as np\n'), ((1723, 1768), 'numpy.allclose', 'np.allclose', (["causal.propensity['coef']", 'coef1'], {}), "(causal.propensity['coef'], coef1)\n", (1734, 1768), True, 'import numpy as np\n'), ((1777, 1828), 'numpy.allclose', 'np.allclose', (["causal.propensity['loglike']", 'loglike1'], {}), "(causal.propensity['loglike'], loglike1)\n", (1788, 1828), True, 'import numpy as np\n'), ((1837, 1886), 'numpy.allclose', 'np.allclose', (["causal.propensity['fitted']", 'fitted1'], {}), "(causal.propensity['fitted'], fitted1)\n", (1848, 1886), True, 'import numpy as np\n'), ((1895, 1936), 'numpy.allclose', 'np.allclose', (["causal.propensity['se']", 'se1'], {}), "(causal.propensity['se'], se1)\n", (1906, 1936), True, 'import numpy as np\n'), ((1996, 2043), 'numpy.allclose', 'np.allclose', (["causal.raw_data['pscore']", 'fitted1'], {}), "(causal.raw_data['pscore'], fitted1)\n", (2007, 2043), True, 'import numpy as np\n'), ((2112, 2156), 'numpy.array', 'np.array', (['[6.806609, -0.0244874, -0.7524939]'], {}), '([6.806609, -0.0244874, -0.7524939])\n', (2120, 2156), True, 'import numpy as np\n'), ((2191, 2266), 'numpy.array', 'np.array', (['[0.6491366, 0.311784, 0.2911631, 0.8086407, 0.3013733, 0.6379023]'], {}), '([0.6491366, 0.311784, 0.2911631, 0.8086407, 0.3013733, 0.6379023])\n', (2199, 2266), True, 'import numpy as np\n'), ((2296, 2339), 'numpy.array', 'np.array', (['[8.5373779, 0.4595191, 0.8106499]'], {}), '([8.5373779, 0.4595191, 0.8106499])\n', (2304, 2339), True, 'import numpy as np\n'), ((2441, 2486), 'numpy.allclose', 'np.allclose', (["causal.propensity['coef']", 'coef2'], {}), "(causal.propensity['coef'], coef2)\n", (2452, 2486), True, 'import numpy as np\n'), ((2495, 2546), 'numpy.allclose', 'np.allclose', (["causal.propensity['loglike']", 'loglike2'], {}), "(causal.propensity['loglike'], loglike2)\n", (2506, 2546), True, 'import numpy as np\n'), ((2555, 2604), 'numpy.allclose', 'np.allclose', (["causal.propensity['fitted']", 'fitted2'], {}), "(causal.propensity['fitted'], fitted2)\n", (2566, 2604), True, 'import numpy as np\n'), ((2613, 2654), 'numpy.allclose', 'np.allclose', (["causal.propensity['se']", 'se2'], {}), "(causal.propensity['se'], se2)\n", (2624, 2654), True, 'import numpy as np\n'), ((2663, 2710), 'numpy.allclose', 'np.allclose', (["causal.raw_data['pscore']", 'fitted2'], {}), "(causal.raw_data['pscore'], fitted2)\n", (2674, 2710), True, 'import numpy as np\n'), ((2743, 2784), 'numpy.array', 'np.array', (['[52, 30, 5, 29, 12, 10, 44, 87]'], {}), '([52, 30, 5, 29, 12, 10, 44, 87])\n', (2751, 2784), True, 'import numpy as np\n'), ((2790, 2824), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1])\n', (2798, 2824), True, 'import numpy as np\n'), ((2830, 2916), 'numpy.array', 'np.array', (['[[1, 42], [3, 32], [9, 7], [12, 86], [5, 94], [4, 36], [2, 13], [6, 61]]'], {}), '([[1, 42], [3, 32], [9, 7], [12, 86], [5, 94], [4, 36], [2, 13], [6,\n 61]])\n', (2838, 2916), True, 'import numpy as np\n'), ((2938, 2960), 'causalinference.causal.CausalModel', 'c.CausalModel', (['Y', 'D', 'X'], {}), '(Y, D, X)\n', (2951, 2960), True, 'import causalinference.causal as c\n'), ((3066, 3115), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['ate']", 'ate1'], {}), "(causal.estimates['ols']['ate'], ate1)\n", (3077, 3115), True, 'import numpy as np\n'), ((3124, 3179), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['ate_se']", 'ate_se1'], {}), "(causal.estimates['ols']['ate_se'], ate_se1)\n", (3135, 3179), True, 'import numpy as np\n'), ((3348, 3397), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['ate']", 'ate2'], {}), "(causal.estimates['ols']['ate'], ate2)\n", (3359, 3397), True, 'import numpy as np\n'), ((3406, 3461), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['ate_se']", 'ate_se2'], {}), "(causal.estimates['ols']['ate_se'], ate_se2)\n", (3417, 3461), True, 'import numpy as np\n'), ((3739, 3788), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['ate']", 'ate3'], {}), "(causal.estimates['ols']['ate'], ate3)\n", (3750, 3788), True, 'import numpy as np\n'), ((3797, 3846), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['atc']", 'atc3'], {}), "(causal.estimates['ols']['atc'], atc3)\n", (3808, 3846), True, 'import numpy as np\n'), ((3855, 3904), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['att']", 'att3'], {}), "(causal.estimates['ols']['att'], att3)\n", (3866, 3904), True, 'import numpy as np\n'), ((3913, 3968), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['ate_se']", 'ate_se3'], {}), "(causal.estimates['ols']['ate_se'], ate_se3)\n", (3924, 3968), True, 'import numpy as np\n'), ((3977, 4032), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['atc_se']", 'atc_se3'], {}), "(causal.estimates['ols']['atc_se'], atc_se3)\n", (3988, 4032), True, 'import numpy as np\n'), ((4041, 4096), 'numpy.allclose', 'np.allclose', (["causal.estimates['ols']['att_se']", 'att_se3'], {}), "(causal.estimates['ols']['att_se'], att_se3)\n", (4052, 4096), True, 'import numpy as np\n'), ((4947, 5014), 'numpy.array', 'np.array', (['[0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95]'], {}), '([0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95])\n', (4955, 5014), True, 'import numpy as np\n'), ((5170, 5198), 'numpy.array', 'np.array', (['[3, 1, 2, 4, 3, 3]'], {}), '([3, 1, 2, 4, 3, 3])\n', (5178, 5198), True, 'import numpy as np\n'), ((5206, 5234), 'numpy.array', 'np.array', (['[1, 2, 3, 3, 3, 4]'], {}), '([1, 2, 3, 3, 3, 4])\n', (5214, 5234), True, 'import numpy as np\n'), ((5243, 5271), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (5251, 5271), True, 'import numpy as np\n'), ((5280, 5310), 'numpy.array', 'np.array', (['[1, 3, 6, 9, 12, 16]'], {}), '([1, 3, 6, 9, 12, 16])\n', (5288, 5310), True, 'import numpy as np\n'), ((5320, 5348), 'numpy.array', 'np.array', (['[5, 1, 2, 6, 5, 5]'], {}), '([5, 1, 2, 6, 5, 5])\n', (5328, 5348), True, 'import numpy as np\n'), ((5357, 5389), 'numpy.array', 'np.array', (['[12, 1, 3, 16, 12, 12]'], {}), '([12, 1, 3, 16, 12, 12])\n', (5365, 5389), True, 'import numpy as np\n'), ((5515, 5545), 'numpy.array', 'np.array', (['[22, 4, 6, 4, 25, 5]'], {}), '([22, 4, 6, 4, 25, 5])\n', (5523, 5545), True, 'import numpy as np\n'), ((5553, 5583), 'numpy.array', 'np.array', (['[4, 4, 5, 6, 22, 25]'], {}), '([4, 4, 5, 6, 22, 25])\n', (5561, 5583), True, 'import numpy as np\n'), ((5592, 5620), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (5600, 5620), True, 'import numpy as np\n'), ((5629, 5661), 'numpy.array', 'np.array', (['[4, 8, 13, 19, 41, 66]'], {}), '([4, 8, 13, 19, 41, 66])\n', (5637, 5661), True, 'import numpy as np\n'), ((5671, 5699), 'numpy.array', 'np.array', (['[5, 2, 4, 2, 6, 3]'], {}), '([5, 2, 4, 2, 6, 3])\n', (5679, 5699), True, 'import numpy as np\n'), ((5708, 5740), 'numpy.array', 'np.array', (['[41, 8, 19, 8, 66, 13]'], {}), '([41, 8, 19, 8, 66, 13])\n', (5716, 5740), True, 'import numpy as np\n'), ((5894, 5922), 'numpy.array', 'np.array', (['[3, 1, 2, 4, 3, 3]'], {}), '([3, 1, 2, 4, 3, 3])\n', (5902, 5922), True, 'import numpy as np\n'), ((5981, 6011), 'numpy.array', 'np.array', (['[22, 4, 6, 4, 25, 5]'], {}), '([22, 4, 6, 4, 25, 5])\n', (5989, 6011), True, 'import numpy as np\n'), ((6117, 6187), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5]'], {}), '([1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5])\n', (6125, 6187), True, 'import numpy as np\n'), ((6220, 6296), 'numpy.array', 'np.array', (['[5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2]'], {}), '([5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2])\n', (6228, 6296), True, 'import numpy as np\n'), ((6431, 6459), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 0, 1]'], {}), '([0, 1, 0, 1, 0, 1])\n', (6439, 6459), True, 'import numpy as np\n'), ((6533, 6558), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 0]'], {}), '([0, 1, 0, 1, 0])\n', (6541, 6558), True, 'import numpy as np\n'), ((6632, 6660), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (6640, 6660), True, 'import numpy as np\n'), ((6734, 6753), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6742, 6753), True, 'import numpy as np\n'), ((6860, 6919), 'numpy.array', 'np.array', (['[0.05, 0.06, 0.3, 0.4, 0.5, 0.6, 0.7, 0.95, 0.95]'], {}), '([0.05, 0.06, 0.3, 0.4, 0.5, 0.6, 0.7, 0.95, 0.95])\n', (6868, 6919), True, 'import numpy as np\n'), ((6926, 6963), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 0, 0, 1, 1, 1])\n', (6934, 6963), True, 'import numpy as np\n'), ((6976, 7007), 'numpy.log', 'np.log', (['(pscore1 / (1 - pscore1))'], {}), '(pscore1 / (1 - pscore1))\n', (6982, 7007), True, 'import numpy as np\n'), ((7022, 7054), 'numpy.array', 'np.array', (['[0.05, 0.5, 0.5, 0.95]'], {}), '([0.05, 0.5, 0.5, 0.95])\n', (7030, 7054), True, 'import numpy as np\n'), ((7131, 7155), 'numpy.allclose', 'np.allclose', (['test1', 'ans1'], {}), '(test1, ans1)\n', (7142, 7155), True, 'import numpy as np\n'), ((7168, 7227), 'numpy.array', 'np.array', (['[0.05, 0.06, 0.3, 0.4, 0.5, 0.6, 0.7, 0.95, 0.95]'], {}), '([0.05, 0.06, 0.3, 0.4, 0.5, 0.6, 0.7, 0.95, 0.95])\n', (7176, 7227), True, 'import numpy as np\n'), ((7234, 7271), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 0, 0, 1, 1, 1])\n', (7242, 7271), True, 'import numpy as np\n'), ((7284, 7315), 'numpy.log', 'np.log', (['(pscore1 / (1 - pscore1))'], {}), '(pscore1 / (1 - pscore1))\n', (7290, 7315), True, 'import numpy as np\n'), ((7330, 7346), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (7338, 7346), True, 'import numpy as np\n'), ((7423, 7447), 'numpy.allclose', 'np.allclose', (['test2', 'ans2'], {}), '(test2, ans2)\n', (7434, 7447), True, 'import numpy as np\n'), ((4232, 4259), 'causalinference.causal.parse_lin_terms', 'c.parse_lin_terms', (['K1', 'lin1'], {}), '(K1, lin1)\n', (4249, 4259), True, 'import causalinference.causal as c\n'), ((4319, 4346), 'causalinference.causal.parse_lin_terms', 'c.parse_lin_terms', (['K2', 'lin2'], {}), '(K2, lin2)\n', (4336, 4346), True, 'import causalinference.causal as c\n'), ((4401, 4428), 'causalinference.causal.parse_lin_terms', 'c.parse_lin_terms', (['K3', 'lin3'], {}), '(K3, lin3)\n', (4418, 4428), True, 'import causalinference.causal as c\n'), ((4481, 4508), 'causalinference.causal.parse_lin_terms', 'c.parse_lin_terms', (['K4', 'lin4'], {}), '(K4, lin4)\n', (4498, 4508), True, 'import causalinference.causal as c\n'), ((4593, 4620), 'causalinference.causal.parse_qua_terms', 'c.parse_qua_terms', (['K1', 'qua1'], {}), '(K1, qua1)\n', (4610, 4620), True, 'import causalinference.causal as c\n'), ((4698, 4725), 'causalinference.causal.parse_qua_terms', 'c.parse_qua_terms', (['K2', 'qua2'], {}), '(K2, qua2)\n', (4715, 4725), True, 'import causalinference.causal as c\n'), ((4790, 4817), 'causalinference.causal.parse_qua_terms', 'c.parse_qua_terms', (['K3', 'qua3'], {}), '(K3, qua3)\n', (4807, 4817), True, 'import causalinference.causal as c\n'), ((4870, 4897), 'causalinference.causal.parse_qua_terms', 'c.parse_qua_terms', (['K4', 'qua4'], {}), '(K4, qua4)\n', (4887, 4897), True, 'import causalinference.causal as c\n'), ((5096, 5130), 'causalinference.causal.split_equal_bins', 'c.split_equal_bins', (['pscore', 'blocks'], {}), '(pscore, blocks)\n', (5114, 5130), True, 'import causalinference.causal as c\n'), ((5413, 5441), 'causalinference.causal.sumlessthan', 'c.sumlessthan', (['g1', 'sg1', 'cs11'], {}), '(g1, sg1, cs11)\n', (5426, 5441), True, 'import causalinference.causal as c\n'), ((5472, 5500), 'causalinference.causal.sumlessthan', 'c.sumlessthan', (['g1', 'sg1', 'csg1'], {}), '(g1, sg1, csg1)\n', (5485, 5500), True, 'import causalinference.causal as c\n'), ((5764, 5792), 'causalinference.causal.sumlessthan', 'c.sumlessthan', (['g2', 'sg2', 'cs12'], {}), '(g2, sg2, cs12)\n', (5777, 5792), True, 'import causalinference.causal as c\n'), ((5823, 5851), 'causalinference.causal.sumlessthan', 'c.sumlessthan', (['g2', 'sg2', 'csg2'], {}), '(g2, sg2, csg2)\n', (5836, 5851), True, 'import causalinference.causal as c\n'), ((5947, 5966), 'causalinference.causal.select_cutoff', 'c.select_cutoff', (['g1'], {}), '(g1)\n', (5962, 5966), True, 'import causalinference.causal as c\n'), ((6053, 6072), 'causalinference.causal.select_cutoff', 'c.select_cutoff', (['g2'], {}), '(g2)\n', (6068, 6072), True, 'import causalinference.causal as c\n'), ((6355, 6385), 'causalinference.causal.calc_tstat', 'c.calc_tstat', (['sample1', 'sample2'], {}), '(sample1, sample2)\n', (6367, 6385), True, 'import causalinference.causal as c\n'), ((6495, 6518), 'causalinference.causal.calc_sample_sizes', 'c.calc_sample_sizes', (['D1'], {}), '(D1)\n', (6514, 6518), True, 'import causalinference.causal as c\n'), ((6594, 6617), 'causalinference.causal.calc_sample_sizes', 'c.calc_sample_sizes', (['D2'], {}), '(D2)\n', (6613, 6617), True, 'import causalinference.causal as c\n'), ((6696, 6719), 'causalinference.causal.calc_sample_sizes', 'c.calc_sample_sizes', (['D3'], {}), '(D3)\n', (6715, 6719), True, 'import causalinference.causal as c\n'), ((6789, 6812), 'causalinference.causal.calc_sample_sizes', 'c.calc_sample_sizes', (['D4'], {}), '(D4)\n', (6808, 6812), True, 'import causalinference.causal as c\n'), ((7073, 7121), 'causalinference.causal.select_blocks', 'c.select_blocks', (['pscore1', 'logodds1', 'D1', 'K1', '(0)', '(1)'], {}), '(pscore1, logodds1, D1, K1, 0, 1)\n', (7088, 7121), True, 'import causalinference.causal as c\n'), ((7365, 7413), 'causalinference.causal.select_blocks', 'c.select_blocks', (['pscore2', 'logodds2', 'D2', 'K2', '(0)', '(1)'], {}), '(pscore2, logodds2, D2, K2, 0, 1)\n', (7380, 7413), True, 'import causalinference.causal as c\n')] |
"""
utility functions for narps analysis
"""
import os
import glob
import nilearn.input_data
import numpy
import pandas
import nibabel
from scipy.stats import norm, t
import scipy.stats
from datetime import datetime
from sklearn.metrics import cohen_kappa_score
def stringify_dict(d):
"""create a pretty version of arguments for printing"""
if 'self' in d:
del d['self']
s = 'Arguments:\n'
for k in d:
if not isinstance(d[k], str):
d[k] = str(d[k])
s = s + '%s: %s\n' % (k, d[k])
return(s)
def log_to_file(fname, s, flush=False,
add_timestamp=True,
also_print=True,
headspace=0):
""" save string to log file"""
if flush and os.path.exists(fname):
os.remove(fname)
if not isinstance(s, str):
s = str(s)
# add spacing before line
if headspace > 0:
s = os.linesep*headspace + s
with open(fname, 'a+') as f:
if also_print:
print(s)
f.write(s + os.linesep)
if flush and add_timestamp:
f.write(datetime.isoformat(
datetime.now()) + 2 * os.linesep)
def get_masked_data(hyp, mask_img, output_dir,
imgtype='unthresh', dataset='zstat'):
"""
load data from within mask
"""
if imgtype == 'unthresh':
hmaps = glob.glob(os.path.join(
output_dir,
'%s/*/hypo%d_unthresh.nii.gz' % (dataset, hyp)))
elif imgtype == 'thresh':
hmaps = glob.glob(os.path.join(
output_dir,
'%s/*/hypo%d_thresh.nii.gz' % (dataset, hyp)))
else:
raise Exception('bad imgtype argument')
hmaps.sort()
masker = nilearn.input_data.NiftiMasker(mask_img=mask_img)
maskdata = masker.fit_transform(hmaps) # combined_data
maskdata = numpy.nan_to_num(maskdata)
if imgtype == 'thresh':
maskdata = (maskdata > 1e-4).astype('float')
labels = [os.path.basename(os.path.dirname(i)).split('_')[1]
for i in hmaps]
return(maskdata, labels)
# load concatenated data - this is meant to replace
# get_masked_data()
def get_concat_data(hyp, mask_img, output_dir,
imgtype='unthresh', dataset='zstat',
vox_mask_thresh=None,
logfile=None):
"""
load data from within mask
if vox_mask_thresh is specified, then the relevant
file is loaded and only voxels with at least
this proportion of teams present will be included
"""
concat_file = os.path.join(
output_dir,
'%s_concat_%s' % (imgtype, dataset),
'hypo%d.nii.gz' % hyp)
assert os.path.exists(concat_file)
labelfile = concat_file.replace('.nii.gz', '.labels')
assert os.path.exists(labelfile)
labels = []
with open(labelfile, 'r') as f:
for l in f.readlines():
l_s = l.strip().split()
labels.append(l_s[0])
masker = nilearn.input_data.NiftiMasker(mask_img=mask_img)
maskdata = masker.fit_transform(concat_file) # combined_data
maskdata = numpy.nan_to_num(maskdata)
if imgtype == 'thresh':
maskdata = (maskdata > 1e-4).astype('float')
if vox_mask_thresh is not None:
assert vox_mask_thresh >= 0 and vox_mask_thresh <= 1
mask_file = concat_file.replace(
'.nii.gz', '_voxelmap.nii.gz'
)
voxmaskdata = masker.fit_transform(mask_file)
maskdata = maskdata[:, voxmaskdata[0, :] >= vox_mask_thresh]
if logfile is not None:
log_to_file(
logfile,
'hyp %d: number of nonzero voxels (%s %s): %d' %
(hyp, imgtype, dataset,
numpy.sum(voxmaskdata[0, :] >= vox_mask_thresh)))
return(maskdata, labels)
def get_metadata(metadata_file,
index_var='teamID'):
""" get team metadata"""
metadata = pandas.read_excel(metadata_file, header=1)
metadata.teamID = [i.strip() for i in metadata.teamID]
metadata.index = metadata[index_var].values
# fix issues with metadata
metadata['used_fmriprep_data'] = [
i.strip().split(',')[0]
for i in metadata['used_fmriprep_data']]
# manual fixes to textual responses
metadata['used_fmriprep_data'] = metadata[
'used_fmriprep_data'].replace({'Yas': 'Yes'})
metadata.loc['E6R3', 'n_participants'] = 108
metadata.loc['J7F9', 'n_participants'] = 107
metadata['n_participants'] = [
int(i.split('\n')[0]) if isinstance(i, str)
else i for i in metadata['n_participants']]
metadata['NV_collection_string'] = [
os.path.basename(i.strip('/')) for i in
metadata['NV_collection_link']]
return(metadata)
def get_map_metadata(map_metadata_file):
"""
get selected metadata from map metadata file
"""
map_info = pandas.read_csv(
map_metadata_file,
names=['timestamp', 'teamID', 'software', 'unthresh_type',
'thresh_type', 'MNItemplate', 'hyp5_direction',
'hyp6_direction', 'hyp9_direction', 'comments'],
skiprows=1)
# manual fixes
map_info.teamID = [i.upper() for i in map_info.teamID]
del map_info['timestamp']
map_info.index = map_info.teamID
map_info = map_info.drop_duplicates(
subset='teamID', keep='last')
map_info.loc[:, 'unthresh_type'] = [
i.split('values')[0].strip()
for i in map_info.unthresh_type]
# manual fixes
map_info.loc['E3B6', 'unthresh_type'] = 't'
map_info.loc['5G9K', 'unthresh_type'] = 't'
map_info.loc['DC61', 'unthresh_type'] = 't'
# for those that don't fit, set to NA
map_info.loc[:, 'unthresh_type'] = [
i if i in ['t', 'z'] else 'NA' for i in map_info.unthresh_type]
return(map_info)
def get_decisions(decisions_file, tidy=False):
colnames = ['teamID']
for hyp in range(1, 10):
colnames += ['Decision%d' % hyp,
'Confidence%d' % hyp,
'Similar%d' % hyp]
colnames += ['collection']
decisions = pandas.read_excel(decisions_file, skiprows=1,
encoding='utf-8')
decisions.columns = colnames
# make a tidy version
if tidy:
decisions_long = pandas.melt(
decisions,
id_vars=['teamID'],
value_vars=decisions.columns.values[1:28])
decisions_long['vartype'] = [
i[:-1] for i in decisions_long['variable']]
decisions_long['varnum'] = [
i[-1] for i in decisions_long['variable']]
del decisions_long['variable']
Decision_df = decisions_long.query('vartype =="Decision"')
Similar_df = decisions_long.query('vartype =="Similar"')
Confidence_df = decisions_long.query('vartype =="Confidence"')
decision_df = Decision_df.merge(
Similar_df,
'left',
on=['teamID', 'varnum'],
suffixes=['_decision', '_similar']).merge(
Confidence_df, 'left', on=['teamID', 'varnum'])
del decision_df['vartype_decision']
del decision_df['vartype_similar']
del decision_df['vartype']
decision_df.columns = ['teamID', 'Decision',
'varnum', 'Similar', 'Confidence']
decision_df['Decision'] = (
decision_df['Decision'] == 'Yes').astype('int')
decision_df['Similar'] = decision_df['Similar'].astype('int')
decision_df['Confidence'] = decision_df['Confidence'].astype('int')
decision_df.head()
return(decision_df)
else:
return(decisions)
def get_merged_metadata_decisions(metadata_file, decisions_file,):
""" get all metadata in tidy format"""
metadata = get_metadata(metadata_file)
decision_df = get_decisions(decisions_file, tidy=True)
alldata_df = decision_df.merge(metadata, on='teamID', how='left')
return(alldata_df)
def get_teamID_to_collectionID_dict(metadata):
"""create dictionary mapping from teamID to collection ID"""
teamid_to_collectionID_dict = {}
for i in metadata.index:
idx = i.split()[0]
teamid_to_collectionID_dict[idx] = '_'.join(
[metadata.loc[i, 'NV_collection_string'].strip(), idx])
return(teamid_to_collectionID_dict)
def TtoZ(tmapfile, outfile, df):
"""
takes a nibabel file object and converts from z to t
using Hughett's transform
adapted from:
https://github.com/vsoch/TtoZ/blob/master/TtoZ/scripts.py
"""
mr = nibabel.load(tmapfile)
data = mr.get_data()
# Select just the nonzero voxels
nonzero = data[data != 0]
# We will store our results here
Z = numpy.zeros(len(nonzero))
# Select values less than or == 0, and greater than zero
c = numpy.zeros(len(nonzero))
k1 = (nonzero <= c)
k2 = (nonzero > c)
# Subset the data into two sets
t1 = nonzero[k1]
t2 = nonzero[k2]
# Calculate p values for <=0
p_values_t1 = t.cdf(t1, df=df)
z_values_t1 = norm.ppf(p_values_t1)
# Calculate p values for > 0
p_values_t2 = t.cdf(-t2, df=df)
z_values_t2 = -norm.ppf(p_values_t2)
Z[k1] = z_values_t1
Z[k2] = z_values_t2
# Write new image to file
empty_nii = numpy.zeros(mr.shape)
empty_nii[mr.get_data() != 0] = Z
Z_nii_fixed = nibabel.nifti1.Nifti1Image(
empty_nii,
affine=mr.get_affine(),
header=mr.get_header())
nibabel.save(Z_nii_fixed, outfile)
def t_corr(y, res_mean=None, res_var=None, Q=None):
"""
perform a one-sample t-test on correlated data
y = data (n observations X n vars)
res_mean = Common mean over voxels and results
res_var = Common variance over voxels and results
Q = "known" correlation across observations
- (use empirical correlation based on maps)
"""
npts = y.shape[0]
X = numpy.ones((npts, 1))
if res_mean is None:
res_mean = 0
if res_var is None:
res_var = 1
if Q is None:
Q = numpy.eye(npts)
VarMean = res_var * X.T.dot(Q).dot(X) / npts**2
# T = mean(y,0)/s-hat-2
# use diag to get s_hat2 for each variable
T = (numpy.mean(y, 0)-res_mean
)/numpy.sqrt(VarMean)*numpy.sqrt(res_var) + res_mean
# Assuming variance is estimated on whole image
# and assuming infinite df
p = 1 - scipy.stats.norm.cdf(T)
return(T, p)
def randn_from_shape(shape):
"""
take in a tuple defining a 4d matrix shape,
and return a random matrix of that shape
"""
assert len(shape) == 4
return(
numpy.random.randn(
shape[0],
shape[1],
shape[2],
shape[3]))
def matrix_kappa_score(d):
"""
compute cohen's kappa for each combination
of rows
"""
score = numpy.eye(d.shape[0])
for i in range(d.shape[0]):
for j in range(i + 1, d.shape[0]):
score[i, j] = cohen_kappa_score(d[i, :], d[j, :])
score[j, i] = score[i, j]
return(score)
def matrix_pct_agreement(d):
"""
compute cohen's kappa for each combination
of rows
"""
score = numpy.eye(d.shape[0])
for i in range(d.shape[0]):
for j in range(i + 1, d.shape[0]):
score[i, j] = numpy.mean(d[i, :] == d[j, :])
score[j, i] = score[i, j]
return(score)
| [
"numpy.sqrt",
"pandas.read_csv",
"nibabel.load",
"pandas.read_excel",
"os.remove",
"os.path.exists",
"numpy.mean",
"pandas.melt",
"scipy.stats.t.cdf",
"numpy.eye",
"nibabel.save",
"numpy.ones",
"scipy.stats.norm.ppf",
"os.path.dirname",
"numpy.random.randn",
"os.path.join",
"sklearn.... | [((1838, 1864), 'numpy.nan_to_num', 'numpy.nan_to_num', (['maskdata'], {}), '(maskdata)\n', (1854, 1864), False, 'import numpy\n'), ((2549, 2638), 'os.path.join', 'os.path.join', (['output_dir', "('%s_concat_%s' % (imgtype, dataset))", "('hypo%d.nii.gz' % hyp)"], {}), "(output_dir, '%s_concat_%s' % (imgtype, dataset), \n 'hypo%d.nii.gz' % hyp)\n", (2561, 2638), False, 'import os\n'), ((2670, 2697), 'os.path.exists', 'os.path.exists', (['concat_file'], {}), '(concat_file)\n', (2684, 2697), False, 'import os\n'), ((2768, 2793), 'os.path.exists', 'os.path.exists', (['labelfile'], {}), '(labelfile)\n', (2782, 2793), False, 'import os\n'), ((3092, 3118), 'numpy.nan_to_num', 'numpy.nan_to_num', (['maskdata'], {}), '(maskdata)\n', (3108, 3118), False, 'import numpy\n'), ((3912, 3954), 'pandas.read_excel', 'pandas.read_excel', (['metadata_file'], {'header': '(1)'}), '(metadata_file, header=1)\n', (3929, 3954), False, 'import pandas\n'), ((4865, 5075), 'pandas.read_csv', 'pandas.read_csv', (['map_metadata_file'], {'names': "['timestamp', 'teamID', 'software', 'unthresh_type', 'thresh_type',\n 'MNItemplate', 'hyp5_direction', 'hyp6_direction', 'hyp9_direction',\n 'comments']", 'skiprows': '(1)'}), "(map_metadata_file, names=['timestamp', 'teamID', 'software',\n 'unthresh_type', 'thresh_type', 'MNItemplate', 'hyp5_direction',\n 'hyp6_direction', 'hyp9_direction', 'comments'], skiprows=1)\n", (4880, 5075), False, 'import pandas\n'), ((6081, 6144), 'pandas.read_excel', 'pandas.read_excel', (['decisions_file'], {'skiprows': '(1)', 'encoding': '"""utf-8"""'}), "(decisions_file, skiprows=1, encoding='utf-8')\n", (6098, 6144), False, 'import pandas\n'), ((8550, 8572), 'nibabel.load', 'nibabel.load', (['tmapfile'], {}), '(tmapfile)\n', (8562, 8572), False, 'import nibabel\n'), ((9012, 9028), 'scipy.stats.t.cdf', 't.cdf', (['t1'], {'df': 'df'}), '(t1, df=df)\n', (9017, 9028), False, 'from scipy.stats import norm, t\n'), ((9047, 9068), 'scipy.stats.norm.ppf', 'norm.ppf', (['p_values_t1'], {}), '(p_values_t1)\n', (9055, 9068), False, 'from scipy.stats import norm, t\n'), ((9121, 9138), 'scipy.stats.t.cdf', 't.cdf', (['(-t2)'], {'df': 'df'}), '(-t2, df=df)\n', (9126, 9138), False, 'from scipy.stats import norm, t\n'), ((9275, 9296), 'numpy.zeros', 'numpy.zeros', (['mr.shape'], {}), '(mr.shape)\n', (9286, 9296), False, 'import numpy\n'), ((9468, 9502), 'nibabel.save', 'nibabel.save', (['Z_nii_fixed', 'outfile'], {}), '(Z_nii_fixed, outfile)\n', (9480, 9502), False, 'import nibabel\n'), ((9896, 9917), 'numpy.ones', 'numpy.ones', (['(npts, 1)'], {}), '((npts, 1))\n', (9906, 9917), False, 'import numpy\n'), ((10610, 10668), 'numpy.random.randn', 'numpy.random.randn', (['shape[0]', 'shape[1]', 'shape[2]', 'shape[3]'], {}), '(shape[0], shape[1], shape[2], shape[3])\n', (10628, 10668), False, 'import numpy\n'), ((10835, 10856), 'numpy.eye', 'numpy.eye', (['d.shape[0]'], {}), '(d.shape[0])\n', (10844, 10856), False, 'import numpy\n'), ((11168, 11189), 'numpy.eye', 'numpy.eye', (['d.shape[0]'], {}), '(d.shape[0])\n', (11177, 11189), False, 'import numpy\n'), ((741, 762), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (755, 762), False, 'import os\n'), ((772, 788), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (781, 788), False, 'import os\n'), ((6277, 6367), 'pandas.melt', 'pandas.melt', (['decisions'], {'id_vars': "['teamID']", 'value_vars': 'decisions.columns.values[1:28]'}), "(decisions, id_vars=['teamID'], value_vars=decisions.columns.\n values[1:28])\n", (6288, 6367), False, 'import pandas\n'), ((9158, 9179), 'scipy.stats.norm.ppf', 'norm.ppf', (['p_values_t2'], {}), '(p_values_t2)\n', (9166, 9179), False, 'from scipy.stats import norm, t\n'), ((10041, 10056), 'numpy.eye', 'numpy.eye', (['npts'], {}), '(npts)\n', (10050, 10056), False, 'import numpy\n'), ((1373, 1445), 'os.path.join', 'os.path.join', (['output_dir', "('%s/*/hypo%d_unthresh.nii.gz' % (dataset, hyp))"], {}), "(output_dir, '%s/*/hypo%d_unthresh.nii.gz' % (dataset, hyp))\n", (1385, 1445), False, 'import os\n'), ((10254, 10273), 'numpy.sqrt', 'numpy.sqrt', (['res_var'], {}), '(res_var)\n', (10264, 10273), False, 'import numpy\n'), ((10958, 10993), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['d[i, :]', 'd[j, :]'], {}), '(d[i, :], d[j, :])\n', (10975, 10993), False, 'from sklearn.metrics import cohen_kappa_score\n'), ((11291, 11321), 'numpy.mean', 'numpy.mean', (['(d[i, :] == d[j, :])'], {}), '(d[i, :] == d[j, :])\n', (11301, 11321), False, 'import numpy\n'), ((1528, 1598), 'os.path.join', 'os.path.join', (['output_dir', "('%s/*/hypo%d_thresh.nii.gz' % (dataset, hyp))"], {}), "(output_dir, '%s/*/hypo%d_thresh.nii.gz' % (dataset, hyp))\n", (1540, 1598), False, 'import os\n'), ((10234, 10253), 'numpy.sqrt', 'numpy.sqrt', (['VarMean'], {}), '(VarMean)\n', (10244, 10253), False, 'import numpy\n'), ((10197, 10213), 'numpy.mean', 'numpy.mean', (['y', '(0)'], {}), '(y, 0)\n', (10207, 10213), False, 'import numpy\n'), ((1129, 1143), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1141, 1143), False, 'from datetime import datetime\n'), ((1977, 1995), 'os.path.dirname', 'os.path.dirname', (['i'], {}), '(i)\n', (1992, 1995), False, 'import os\n'), ((3717, 3764), 'numpy.sum', 'numpy.sum', (['(voxmaskdata[0, :] >= vox_mask_thresh)'], {}), '(voxmaskdata[0, :] >= vox_mask_thresh)\n', (3726, 3764), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 01 19:20:16 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
nobs = 1000
r = stats.pareto.rvs(1, size=nobs)
#rhisto = np.histogram(r, bins=20)
rhisto, e = np.histogram(np.clip(r, 0 , 1000), bins=50)
plt.figure()
plt.loglog(e[:-1]+np.diff(e)/2, rhisto, '-o')
plt.figure()
plt.loglog(e[:-1]+np.diff(e)/2, nobs-rhisto.cumsum(), '-o')
##plt.figure()
##plt.plot(e[:-1]+np.diff(e)/2, rhisto.cumsum(), '-o')
##plt.figure()
##plt.semilogx(e[:-1]+np.diff(e)/2, nobs-rhisto.cumsum(), '-o')
rsind = np.argsort(r)
rs = r[rsind]
rsf = nobs-rsind.argsort()
plt.figure()
plt.loglog(rs, nobs-np.arange(nobs), '-o')
print(stats.linregress(np.log(rs), np.log(nobs-np.arange(nobs))))
plt.show()
| [
"numpy.clip",
"scipy.stats.pareto.rvs",
"numpy.log",
"numpy.diff",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((182, 212), 'scipy.stats.pareto.rvs', 'stats.pareto.rvs', (['(1)'], {'size': 'nobs'}), '(1, size=nobs)\n', (198, 212), False, 'from scipy import stats\n'), ((305, 317), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (315, 317), True, 'import matplotlib.pyplot as plt\n'), ((364, 376), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (374, 376), True, 'import matplotlib.pyplot as plt\n'), ((595, 608), 'numpy.argsort', 'np.argsort', (['r'], {}), '(r)\n', (605, 608), True, 'import numpy as np\n'), ((650, 662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (660, 662), True, 'import matplotlib.pyplot as plt\n'), ((773, 783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (781, 783), True, 'import matplotlib.pyplot as plt\n'), ((274, 293), 'numpy.clip', 'np.clip', (['r', '(0)', '(1000)'], {}), '(r, 0, 1000)\n', (281, 293), True, 'import numpy as np\n'), ((683, 698), 'numpy.arange', 'np.arange', (['nobs'], {}), '(nobs)\n', (692, 698), True, 'import numpy as np\n'), ((729, 739), 'numpy.log', 'np.log', (['rs'], {}), '(rs)\n', (735, 739), True, 'import numpy as np\n'), ((336, 346), 'numpy.diff', 'np.diff', (['e'], {}), '(e)\n', (343, 346), True, 'import numpy as np\n'), ((395, 405), 'numpy.diff', 'np.diff', (['e'], {}), '(e)\n', (402, 405), True, 'import numpy as np\n'), ((753, 768), 'numpy.arange', 'np.arange', (['nobs'], {}), '(nobs)\n', (762, 768), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Created on Mon Oct 19.
@author: dejh
"""
import numpy as np
from landlab import Component
class SteepnessFinder(Component):
"""This component calculates steepness indices, sensu Wobus et al. 2006,
for a Landlab landscape. Follows broadly the approach used in
GeomorphTools, geomorphtools.org.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator, FastscapeEroder
>>> from landlab.components import SteepnessFinder
>>> mg = RasterModelGrid((3, 10), xy_spacing=100.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_zeros("topographic__elevation", at="node")
>>> mg.at_node['topographic__elevation'][mg.core_nodes] = mg.node_x[
... mg.core_nodes]/1000.
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> sp = FastscapeEroder(mg, K_sp=0.01)
>>> sf = SteepnessFinder(mg, min_drainage_area=10000.)
>>> for i in range(10):
... mg.at_node['topographic__elevation'][mg.core_nodes] += 10.
... _ = fr.run_one_step()
... sp.run_one_step(1000.)
>>> sf.calculate_steepnesses()
>>> mg.at_node['channel__steepness_index'].reshape((3, 10))[1, :]
array([ 0. , 29.28427125, 1. , 1. ,
1. , 1. , 1. , 1. ,
0.99999997, 0. ])
>>> sf.hillslope_mask
array([ True, True, True, True, True, True, True, True, True,
True, False, False, False, False, False, False, False, False,
False, True, True, True, True, True, True, True, True,
True, True, True], dtype=bool)
>>> sf = SteepnessFinder(mg, min_drainage_area=10000., discretization_length=350.)
>>> sf.calculate_steepnesses()
>>> mg.at_node['channel__steepness_index'].reshape((3, 10))[1, :]
array([ 0. , 3.08232295, 3.08232295, 3.08232295, 1. ,
1. , 1. , 1. , 0. , 0. ])
>>> sf = SteepnessFinder(mg, min_drainage_area=10000., elev_step=1.5)
>>> sf.calculate_steepnesses()
>>> mg.at_node['channel__steepness_index'].reshape((3, 10))[1, :]
array([ 0. , 1.22673541, 1.2593727 , 1.27781936, 1.25659369,
1.12393156, 0.97335328, 0.79473963, 0.56196578, 0. ])
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., and <NAME>.: Tectonics from topography:
Procedures, promise, and pitfalls, in: Tectonics, Climate, and Landscape
Evolution, edited by: <NAME>., <NAME>., <NAME>., and
<NAME>., Geological Society of America Special Paper 398, Geological
Society of America, Boulder, CO, USA, 55–74, 2006.
"""
_name = "SteepnessFinder"
_unit_agnostic = True
_info = {
"channel__steepness_index": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "variable",
"mapping": "node",
"doc": "the local steepness index",
},
"drainage_area": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m**2",
"mapping": "node",
"doc": "Upstream accumulated surface area contributing to the node's discharge",
},
"flow__link_to_receiver_node": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "ID of link downstream of each node, which carries the discharge",
},
"flow__receiver_node": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array of receivers (node that receives flow from current node)",
},
"flow__upstream_node_order": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array containing downstream-to-upstream ordered list of node IDs",
},
"topographic__elevation": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__steepest_slope": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "The steepest *downhill* slope",
},
}
def __init__(
self,
grid,
reference_concavity=0.5,
min_drainage_area=1.0e6,
elev_step=0.0,
discretization_length=0.0,
):
"""
Parameters
----------
grid : RasterModelGrid
A landlab RasterModelGrid.
reference_concavity : float
The reference concavity to use in the calculation.
min_drainage_area : float (m**2; default 1.e6)
The minimum drainage area above which steepness indices are
calculated.
Defaults to 1.e6 m**2, per Wobus et al. 2006.
elev_step : float (m; default 0.)
If >0., becomes a vertical elevation change step to use to
discretize the data (per Wobus). If 0., all nodes are used and
no discretization happens.
discretization_length : float (m; default 0.)
If >0., becomes the lengthscale over which to segment the profiles -
i.e., one different steepness index value is calculated every
discretization_length. If only one (or no) points are present in a
segment, it will be lumped together with the next segment.
If zero, one value is assigned to each channel node.
"""
super().__init__(grid)
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
msg = (
"A route-to-multiple flow director has been "
"run on this grid. The landlab development team has not "
"verified that SteepnessFinder is compatible with "
"route-to-multiple methods. Please open a GitHub Issue "
"to start this process."
)
raise NotImplementedError(msg)
self._reftheta = reference_concavity
self._min_drainage = min_drainage_area
assert elev_step >= 0.0, "elev_step must be >= 0!"
self._elev_step = elev_step
self._discretization = discretization_length
self._ksn = self._grid.add_zeros(
"channel__steepness_index", at="node", clobber=True
)
self._mask = self._grid.ones("node", dtype=bool)
# this one needs modifying if smooth_elev
self._elev = self._grid.at_node["topographic__elevation"]
def calculate_steepnesses(self):
"""This is the main method. Call it to calculate local steepness
indices at all points with drainage areas greater than
*min_drainage_area*.
This "run" method can optionally take the same parameter set as
provided at instantiation. If they are provided, they will override
the existing values from instantiation.
Normalized steepness of any node without a defined value is reported
as 0. These nodes are also identified in the mask retrieved with
:func:`hillslope_mask`.
"""
self._mask.fill(True)
self._ksn.fill(0.0)
reftheta = self._reftheta
min_drainage = self._min_drainage
elev_step = self._elev_step
discretization_length = self._discretization
upstr_order = self._grid.at_node["flow__upstream_node_order"]
# get an array of only nodes with A above threshold:
valid_dstr_order = (
upstr_order[
self._grid.at_node["drainage_area"][upstr_order] >= min_drainage
]
)[::-1]
# note elevs are guaranteed to be in order, UNLESS a fill
# algorithm has been used.
nodes_incorporated = self._grid.zeros("node", dtype=bool)
# now do each poss channel in turn
# get the head of the first (longest!) channel:
for dstr_order_index in range(valid_dstr_order.size):
this_ch_top_node = valid_dstr_order[dstr_order_index] # top node
if not nodes_incorporated[this_ch_top_node]:
nodes_incorporated[this_ch_top_node] = True
nodes_in_channel = [this_ch_top_node]
penultimate_node = this_ch_top_node
current_node_incorporated = False
while not current_node_incorporated:
next_node = self._grid.at_node["flow__receiver_node"][
penultimate_node
]
if next_node == penultimate_node: # end of flow path
break
nodes_in_channel.append(next_node)
current_node_incorporated = nodes_incorporated[next_node]
# ^ this is a COPY op, so we're free to update the array
nodes_incorporated[next_node] = True
penultimate_node = next_node
# by here, we have a full, unique reach in nodes_in_channel
# it incorporates a single, duplicate node at the lower end
# Now, if this segment long enough?
if elev_step:
top_elev = self._elev[nodes_in_channel[0]]
base_elev = self._elev[nodes_in_channel[-1]]
# work up the channel from the base to make new interp pts
interp_pt_elevs = np.arange(base_elev, top_elev, elev_step)
if interp_pt_elevs.size <= 1:
# <1 step; bail on this whole segment
break
# now we can fairly closely follow the Geomorphtools
# algorithm:
ch_nodes = np.array(nodes_in_channel)
# ^ this is top-to-bottom
ch_A = self._grid.at_node["drainage_area"][ch_nodes]
ch_dists = self.channel_distances_downstream(ch_nodes)
ch_S = self.interpolate_slopes_with_step(
ch_nodes, ch_dists, interp_pt_elevs
)
else:
# all the nodes; much easier as links work
ch_nodes = np.array(nodes_in_channel)
ch_dists = self.channel_distances_downstream(ch_nodes)
ch_A = self._grid.at_node["drainage_area"][ch_nodes]
ch_S = self._grid.at_node["topographic__steepest_slope"][ch_nodes]
assert np.all(ch_S >= 0.0)
# if we're doing spatial discretization, do it here:
if discretization_length:
ch_ksn = self.calc_ksn_discretized(
ch_dists, ch_A, ch_S, reftheta, discretization_length
)
else: # not discretized
# also chopping off the final node, as above
log_A = np.log10(ch_A[:-1])
log_S = np.log10(ch_S[:-1])
# we're potentially propagating nans here if S<=0
log_ksn = log_S + reftheta * log_A
ch_ksn = 10.0 ** log_ksn
# save the answers into the main arrays:
assert np.all(self._mask[ch_nodes[:-1]])
# Final node gets trimmed off...
self._ksn[ch_nodes[:-1]] = ch_ksn
self._mask[ch_nodes] = False
# now a final sweep to remove any undefined ksn values:
self._mask[self._ksn == -1.0] = True
self._ksn[self._ksn == -1.0] = 0.0
def channel_distances_downstream(self, ch_nodes):
"""Calculates distances downstream from top node of a defined flowpath.
Parameters
----------
ch_nodes : array of ints
The nodes along a single defined flow path, starting upstream.
Returns
-------
ch_dists : array of floats
Distances downstream from top node of ch_nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> mg = RasterModelGrid((4,5), xy_spacing=(10., 5.))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> mg.status_at_node[[6, 12, 13, 14]] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_field("topographic__elevation", mg.node_x, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> sf = SteepnessFinder(mg)
>>> _ = fr.run_one_step()
>>> ch_nodes = np.array([8, 7, 11, 10])
>>> sf.channel_distances_downstream(ch_nodes)
array([ 0. , 10. , 21.18033989, 31.18033989])
"""
ch_links = self._grid.at_node["flow__link_to_receiver_node"][ch_nodes]
ch_dists = np.empty_like(ch_nodes, dtype=float)
# dists from ch head, NOT drainage divide
ch_dists[0] = 0.0
np.cumsum(self._grid.length_of_d8[ch_links[:-1]], out=ch_dists[1:])
return ch_dists
def interpolate_slopes_with_step(self, ch_nodes, ch_dists, interp_pt_elevs):
"""Maps slopes to nodes, interpolating withing defined vertical
intervals.
This follows Geomorphtools' discretization methods. It is essentially a
downwind map of the slopes.
Parameters
----------
ch_nodes : array of ints
The nodes along a single defined flow path, starting upstream.
ch_dists : array of floats
Distances downstream from top node of ch_nodes.
interp_pt_elevs : array of floats
Elevations at the discretizing points along the profile, in order
of increasing elevation.
Returns
-------
ch_S : array of floats
Interpolated slopes at each node in the flowpath (always positive).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> mg = RasterModelGrid((3,10), xy_spacing=(10., 5.))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_field("topographic__elevation", mg.node_x**1.1, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> sf = SteepnessFinder(mg)
>>> _ = fr.run_one_step()
>>> ch_nodes = np.arange(18, 9, -1)
>>> ch_dists = sf.channel_distances_downstream(ch_nodes)
>>> interp_pt_elevs = np.array([0., 30., 60., 90., 120.])
>>> sf.interpolate_slopes_with_step(ch_nodes, ch_dists,
... interp_pt_elevs)
array([ 1.67970205, 1.67970205, 1.67970205, 1.65129294, 1.62115336,
1.5811951 , 1.53157521, 1.44240187, 1.36442227])
>>> mg.at_node['topographic__steepest_slope'][ch_nodes]
array([ 1.69383001, 1.66972677, 1.64200694, 1.60928598, 1.56915472,
1.51678178, 1.43964028, 1.25892541, 0. ])
>>> mg.at_node['topographic__elevation'][:] = mg.node_x
>>> interp_pt_elevs = np.array([0., 25., 50., 75., 80.])
>>> sf.interpolate_slopes_with_step(ch_nodes, ch_dists,
... interp_pt_elevs)
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1.])
"""
ch_z = self._grid.at_node["topographic__elevation"][ch_nodes]
assert (
ch_z[0] >= interp_pt_elevs[-1]
), "Highest interp_pt_elev must be below top channel node"
interp_pt_x = np.interp(interp_pt_elevs, ch_z[::-1], ch_dists[::-1])
interp_pt_S = np.empty_like(interp_pt_elevs)
# now a downwind map of the slopes onto the nodes
# slopes are defined positive
z_diff = interp_pt_elevs[:-1] - interp_pt_elevs[1:]
x_diff = interp_pt_x[1:] - interp_pt_x[:-1]
np.divide(z_diff, x_diff, out=interp_pt_S[:-1])
interp_pt_S[-1] = interp_pt_S[-2]
# Map S back onto nodes
ch_S = np.interp(ch_z, interp_pt_elevs, interp_pt_S)
return ch_S
def calc_ksn_discretized(
self, ch_dists, ch_A, ch_S, ref_theta, discretization_length
):
"""Calculate normalized steepness index on defined channel segments.
Every segment must have at least 2 nodes along it. If not, segments
will be automatically merged to achieve this. The channel will be
segmented starting at the *downstream* end.
NB: The final node in the channel does not receive an index, as it
either belongs to a longer, existing flow path, or it is a boundary
node with S = 0. Neither works.
Parameters
----------
ch_dists : array of floats
Distances downstream from top node of a single stream path.
ch_A : array of floats
Drainage areas at each node in the flowpath.
ch_S : array of floats
Slope at each node in the flowpath (defined as positive).
ref_theta : float
The reference concavity; must be positive.
discretization_length : float (m)
The streamwise length of each segment.
Returns
-------
ch_ksn : array of floats
The normalized steepness index at each node in the flowpath,
EXCEPT THE LAST. (i.e., length is (ch_dists.size - 1)). Values
will be the same within each defined segment.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.components import SteepnessFinder
>>> mg = RasterModelGrid((3,10), xy_spacing=(10., 5.))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_field("topographic__elevation", mg.node_x, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> sf = SteepnessFinder(mg)
>>> _ = fr.run_one_step()
>>> ch_nodes = np.arange(18, 9, -1)
>>> ch_dists = sf.channel_distances_downstream(ch_nodes)
>>> ch_A = mg.at_node['drainage_area'][ch_nodes]
>>> ch_S = mg.at_node['topographic__steepest_slope'][ch_nodes]
>>> ksn_25 = sf.calc_ksn_discretized(ch_dists, ch_A, ch_S, 0.5, 25.)
>>> ksn_25.size == ch_dists.size - 1
True
>>> ksn_25
array([ -1. , 11.0668192 , 11.0668192 , 15.70417802,
15.70417802, 15.70417802, 19.3433642 , 19.3433642 ])
>>> ksn_10 = sf.calc_ksn_discretized(ch_dists, ch_A, ch_S, 0.5, 10.)
>>> ksn_10
array([ 8.40896415, 8.40896415, 13.16074013, 13.16074013,
16.5487546 , 16.5487546 , 19.3433642 , 19.3433642 ])
>>> ch_ksn_overdiscretized = sf.calc_ksn_discretized(
... ch_dists, ch_A, ch_S, 0.5, 10.)
>>> np.allclose(ch_ksn_overdiscretized, ksn_10)
True
"""
ch_ksn = np.empty_like(ch_A)
# need to remove the influence of the final node in the seg,
# as it reflects either the edge of the grid (S=0) or a point
# after a confluence - hence the 0.000001
seg_ends = np.arange(ch_dists[-1] - 0.000001, 0.0, -discretization_length)[::-1]
# ^ counts up from 0, but terminates at the far end cleanly
pts_in_each_seg = np.searchsorted(seg_ends, ch_dists)
num_segs = pts_in_each_seg[-1]
i = num_segs - 1 # the final pt is no longer included
while i >= 0:
old_i = i
pts_in_seg = pts_in_each_seg == i
num_pts_in_seg = int(pts_in_seg.sum())
# if i == num_segs:
# true_pts_in_seg = pts_in_each_seg.copy()
# pts_in_each_seg[-1] = False
# else:
# true_pts_in_seg = pts_in_each_seg
# make sure there's always 2 pts in the seg...
while num_pts_in_seg < 2:
i -= 1
pts_in_seg = np.logical_and(
pts_in_each_seg <= old_i, pts_in_each_seg >= i
)
num_pts_in_seg = int(pts_in_seg.sum())
if i < 0:
break
if num_pts_in_seg < 2:
# must be at the end of the seg...
# nodes in invalid segs at the end get ksn = -1.
ch_ksn[pts_in_seg] = -1.0
break
seg_A = ch_A[pts_in_seg]
seg_S = ch_S[pts_in_seg]
logseg_A = np.log10(seg_A)
logseg_S = np.log10(seg_S)
meanlogseg_A = np.mean(logseg_A)
meanlogseg_S = np.mean(logseg_S)
logseg_ksn = meanlogseg_S + ref_theta * meanlogseg_A
ch_ksn[pts_in_seg] = 10.0 ** logseg_ksn
i -= 1
return ch_ksn[:-1]
@property
def steepness_indices(self):
"""Return the array of channel steepness indices.
Nodes not in the channel receive zeros.
"""
return self._ksn
@property
def hillslope_mask(self):
"""Return a boolean array, False where steepness indices exist."""
return self._mask
@property
def masked_steepness_indices(self):
"""Returns a masked array version of the 'channel__steepness_index'
field. This enables easier plotting of the values with.
:func:`landlab.imshow_grid_at_node` or similar.
Examples
--------
Make a topographic map with an overlay of steepness values:
>>> from landlab import imshow_grid_at_node
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator, FastscapeEroder
>>> from landlab.components import SteepnessFinder
>>> mg = RasterModelGrid((5, 5), xy_spacing=100.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_zeros("topographic__elevation", at="node")
>>> mg.at_node['topographic__elevation'][mg.core_nodes] = mg.node_x[
... mg.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg.at_node['topographic__elevation'][
... mg.core_nodes] += np.random.rand(mg.number_of_core_nodes)
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> sp = FastscapeEroder(mg, K_sp=0.01)
>>> cf = SteepnessFinder(mg, min_drainage_area=20000.)
>>> for i in range(10):
... mg.at_node['topographic__elevation'][mg.core_nodes] += 10.
... _ = fr.run_one_step()
... sp.run_one_step(1000.)
>>> _ = fr.run_one_step()
>>> cf.calculate_steepnesses()
>>> imshow_grid_at_node(mg, 'topographic__elevation',
... allow_colorbar=False)
>>> imshow_grid_at_node(mg, cf.masked_steepness_indices,
... color_for_closed=None, cmap='winter')
"""
return np.ma.array(self.steepness_indices, mask=self.hillslope_mask)
| [
"numpy.mean",
"numpy.log10",
"numpy.logical_and",
"numpy.ma.array",
"numpy.searchsorted",
"numpy.arange",
"numpy.array",
"numpy.empty_like",
"numpy.interp",
"numpy.cumsum",
"numpy.all",
"numpy.divide"
] | [((13743, 13779), 'numpy.empty_like', 'np.empty_like', (['ch_nodes'], {'dtype': 'float'}), '(ch_nodes, dtype=float)\n', (13756, 13779), True, 'import numpy as np\n'), ((13864, 13931), 'numpy.cumsum', 'np.cumsum', (['self._grid.length_of_d8[ch_links[:-1]]'], {'out': 'ch_dists[1:]'}), '(self._grid.length_of_d8[ch_links[:-1]], out=ch_dists[1:])\n', (13873, 13931), True, 'import numpy as np\n'), ((16613, 16667), 'numpy.interp', 'np.interp', (['interp_pt_elevs', 'ch_z[::-1]', 'ch_dists[::-1]'], {}), '(interp_pt_elevs, ch_z[::-1], ch_dists[::-1])\n', (16622, 16667), True, 'import numpy as np\n'), ((16690, 16720), 'numpy.empty_like', 'np.empty_like', (['interp_pt_elevs'], {}), '(interp_pt_elevs)\n', (16703, 16720), True, 'import numpy as np\n'), ((16937, 16984), 'numpy.divide', 'np.divide', (['z_diff', 'x_diff'], {'out': 'interp_pt_S[:-1]'}), '(z_diff, x_diff, out=interp_pt_S[:-1])\n', (16946, 16984), True, 'import numpy as np\n'), ((17074, 17119), 'numpy.interp', 'np.interp', (['ch_z', 'interp_pt_elevs', 'interp_pt_S'], {}), '(ch_z, interp_pt_elevs, interp_pt_S)\n', (17083, 17119), True, 'import numpy as np\n'), ((20173, 20192), 'numpy.empty_like', 'np.empty_like', (['ch_A'], {}), '(ch_A)\n', (20186, 20192), True, 'import numpy as np\n'), ((20565, 20600), 'numpy.searchsorted', 'np.searchsorted', (['seg_ends', 'ch_dists'], {}), '(seg_ends, ch_dists)\n', (20580, 20600), True, 'import numpy as np\n'), ((24244, 24305), 'numpy.ma.array', 'np.ma.array', (['self.steepness_indices'], {'mask': 'self.hillslope_mask'}), '(self.steepness_indices, mask=self.hillslope_mask)\n', (24255, 24305), True, 'import numpy as np\n'), ((20401, 20461), 'numpy.arange', 'np.arange', (['(ch_dists[-1] - 1e-06)', '(0.0)', '(-discretization_length)'], {}), '(ch_dists[-1] - 1e-06, 0.0, -discretization_length)\n', (20410, 20461), True, 'import numpy as np\n'), ((21722, 21737), 'numpy.log10', 'np.log10', (['seg_A'], {}), '(seg_A)\n', (21730, 21737), True, 'import numpy as np\n'), ((21761, 21776), 'numpy.log10', 'np.log10', (['seg_S'], {}), '(seg_S)\n', (21769, 21776), True, 'import numpy as np\n'), ((21804, 21821), 'numpy.mean', 'np.mean', (['logseg_A'], {}), '(logseg_A)\n', (21811, 21821), True, 'import numpy as np\n'), ((21849, 21866), 'numpy.mean', 'np.mean', (['logseg_S'], {}), '(logseg_S)\n', (21856, 21866), True, 'import numpy as np\n'), ((12021, 12054), 'numpy.all', 'np.all', (['self._mask[ch_nodes[:-1]]'], {}), '(self._mask[ch_nodes[:-1]])\n', (12027, 12054), True, 'import numpy as np\n'), ((21202, 21264), 'numpy.logical_and', 'np.logical_and', (['(pts_in_each_seg <= old_i)', '(pts_in_each_seg >= i)'], {}), '(pts_in_each_seg <= old_i, pts_in_each_seg >= i)\n', (21216, 21264), True, 'import numpy as np\n'), ((10191, 10232), 'numpy.arange', 'np.arange', (['base_elev', 'top_elev', 'elev_step'], {}), '(base_elev, top_elev, elev_step)\n', (10200, 10232), True, 'import numpy as np\n'), ((10512, 10538), 'numpy.array', 'np.array', (['nodes_in_channel'], {}), '(nodes_in_channel)\n', (10520, 10538), True, 'import numpy as np\n'), ((10993, 11019), 'numpy.array', 'np.array', (['nodes_in_channel'], {}), '(nodes_in_channel)\n', (11001, 11019), True, 'import numpy as np\n'), ((11282, 11301), 'numpy.all', 'np.all', (['(ch_S >= 0.0)'], {}), '(ch_S >= 0.0)\n', (11288, 11301), True, 'import numpy as np\n'), ((11703, 11722), 'numpy.log10', 'np.log10', (['ch_A[:-1]'], {}), '(ch_A[:-1])\n', (11711, 11722), True, 'import numpy as np\n'), ((11751, 11770), 'numpy.log10', 'np.log10', (['ch_S[:-1]'], {}), '(ch_S[:-1])\n', (11759, 11770), True, 'import numpy as np\n')] |
import os
from typing import List, NamedTuple
import numpy as np
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from mcp.config.evaluation import BestWeightsMetric
from mcp.data.dataloader.dataloader import DataLoader, FewShotDataLoader
from mcp.model.base import Model
from mcp.result.logger import ResultLogger
from mcp.task.base import Task
from mcp.training.loop import TrainingLoop
from mcp.utils.logging import create_logger
logger = create_logger(__name__)
class TrainerLoggers(NamedTuple):
train: ResultLogger
support: ResultLogger
evaluation: ResultLogger
class Trainer(object):
def __init__(
self,
model: Model,
optimizer_train: Optimizer,
optimizer_support: Optimizer,
scheduler_train: _LRScheduler,
scheduler_support: _LRScheduler,
dataloader_train: DataLoader,
dataloader_valids: List[FewShotDataLoader],
tasks_train: List[Task],
tasks_valid: List[Task],
epochs: int,
training_loop: TrainingLoop,
trainer_loggers: TrainerLoggers,
device: torch.device,
save_path: str,
checkpoint_metric: BestWeightsMetric,
num_checkpoints: int,
):
self.model = model
self.optimizer_train = optimizer_train
self.optimizer_support = optimizer_support
self.scheduler_train = scheduler_train
self.scheduler_support = scheduler_support
self.dataloader_train = dataloader_train
self.dataloader_valids = dataloader_valids
self.tasks_train = tasks_train
self.tasks_valid = tasks_valid
self.epochs = epochs
self.training_loop = training_loop
self.logger = trainer_loggers
self.device = device
self.save_path = save_path
self.checkpoint_metric = checkpoint_metric
self.num_checkpoints = num_checkpoints
self.valid_metrics: List[float] = []
self.checkpoints: List[int] = []
def fit(self, starting_epoch=0):
self.model.to(self.device)
for task in self.tasks_train + self.tasks_valid:
task.to(self.device)
logger.info(
f"Fitting the model | {self.model.num_trainable_parameters()} parameters | "
+ f"{len(self.dataloader_train)} train batches "
)
for epoch in range(starting_epoch + 1, self.epochs + 1):
self._training_phase(epoch)
metric = 0.0
logger_support = self.logger.support.epoch(epoch, self.epochs)
logger_eval = self.logger.evaluation.epoch(epoch, self.epochs)
for dataloader_valid in self.dataloader_valids:
self._training_support_phase(epoch, dataloader_valid, logger_support)
metric += self._evaluation_phase(epoch, dataloader_valid, logger_eval)
self._save_checkpoint(epoch, metric / len(self.dataloader_valids))
def _training_phase(self, epoch):
self.training_loop.fit_one(
self.model,
self.tasks_train,
self.dataloader_train,
self.optimizer_train,
self.scheduler_train,
self.logger.train.epoch(epoch, self.epochs),
train_model=True,
)
def _training_support_phase(self, epoch, dataloader_valid, logger_support):
self.training_loop.fit_support(
self.model,
self.tasks_valid,
dataloader_valid.support,
self.optimizer_support,
self.scheduler_support,
logger_support,
)
def _evaluation_phase(self, epoch, dataloader_valid, logger_eval) -> float:
return self.training_loop.evaluate(
self.model, self.tasks_valid, dataloader_valid.query, logger_eval,
)
def _save_checkpoint(self, epoch: int, metric: float):
logger.info(f"Saving checkpoint | epoch {epoch} - metric {metric}")
self.valid_metrics.append(metric)
self.checkpoints.append(epoch)
self.save(epoch)
idxs = np.argsort(np.asarray(self.valid_metrics))
if len(idxs) > self.num_checkpoints:
# Worse metric ids
if self.checkpoint_metric == BestWeightsMetric.LOSS:
worse_metric_id = -1
elif self.checkpoint_metric == BestWeightsMetric.METRIC:
worse_metric_id = 0
elif self.checkpoint_metric == BestWeightsMetric.TIME:
worse_metric_id = 0
else:
raise ValueError(f"Unsupported metric {self.checkpoint_metric}")
idx = idxs[worse_metric_id]
epoch = self.checkpoints[idx]
metric = self.valid_metrics[idx]
logger.info(f"Remove checkpoint | epoch {epoch} - metric {metric}")
os.remove(self._trainer_path(epoch))
os.remove(self._model_path(epoch))
for task in self.tasks_train:
os.remove(self._task_path(task.name, epoch))
del self.valid_metrics[idx]
del self.checkpoints[idx]
def save(self, epoch: int):
self.model.save(self._model_path(epoch))
for task in self.tasks_train:
task.save(self._task_path(task.name, epoch))
torch.save(
{
"optimizer_state_dict": self.optimizer_train.state_dict(),
"scheduler_state_dict": self.scheduler_train.state_dict(),
"checkpoints": self.checkpoints,
"valid_metrics": self.valid_metrics,
},
self._trainer_path(epoch),
)
def load(self, epoch: int):
self.model.load(self._model_path(epoch), self.device)
for task in self.tasks_train:
task.load(self._task_path(task.name, epoch), self.device)
trainer_checkpoint = torch.load(
self._trainer_path(epoch), map_location=self.device
)
self.optimizer_train.load_state_dict(trainer_checkpoint["optimizer_state_dict"])
self.scheduler_train.load_state_dict(trainer_checkpoint["scheduler_state_dict"])
self.checkpoints = trainer_checkpoint["checkpoints"]
self.valid_metrics = trainer_checkpoint["valid_metrics"]
def _model_path(self, epoch: int) -> str:
return os.path.join(self.save_path, f"model-{epoch}.pth")
def _task_path(self, name: str, epoch: int) -> str:
return os.path.join(self.save_path, f"task-{name}-{epoch}.pth")
def _trainer_path(self, epoch: int) -> str:
return os.path.join(self.save_path, f"trainer-{epoch}.pth")
| [
"numpy.asarray",
"os.path.join",
"mcp.utils.logging.create_logger"
] | [((493, 516), 'mcp.utils.logging.create_logger', 'create_logger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'from mcp.utils.logging import create_logger\n'), ((6313, 6363), 'os.path.join', 'os.path.join', (['self.save_path', 'f"""model-{epoch}.pth"""'], {}), "(self.save_path, f'model-{epoch}.pth')\n", (6325, 6363), False, 'import os\n'), ((6436, 6492), 'os.path.join', 'os.path.join', (['self.save_path', 'f"""task-{name}-{epoch}.pth"""'], {}), "(self.save_path, f'task-{name}-{epoch}.pth')\n", (6448, 6492), False, 'import os\n'), ((6557, 6609), 'os.path.join', 'os.path.join', (['self.save_path', 'f"""trainer-{epoch}.pth"""'], {}), "(self.save_path, f'trainer-{epoch}.pth')\n", (6569, 6609), False, 'import os\n'), ((4093, 4123), 'numpy.asarray', 'np.asarray', (['self.valid_metrics'], {}), '(self.valid_metrics)\n', (4103, 4123), True, 'import numpy as np\n')] |
import numpy
from shapely.geometry import Point
import geopandas
def simulated_geo_points(in_data, needed, seed) -> geopandas.GeoDataFrame:
"""
Simulate points using a geopandas dataframse with geometry as reference.
Parameters
----------
in_data: geopandas.GeoDataFrame
the geodataframe containing the geometries
needed: int
how many points to generate
seed: int
number to initialize the random number generation
Returns
-------
geopandas.GeoDataFrame
Examples
--------
>>> import spaghetti
>>> from spopt.locate.util import simulated_geo_points
>>> lattice = spaghetti.regular_lattice((0, 0, 10, 10), 9, exterior=True)
>>> ntw = spaghetti.Network(in_data=lattice)
>>> street = spaghetti.element_as_gdf(ntw, arcs=True)
>>> street_buffered = geopandas.GeoDataFrame(
... geopandas.GeoSeries(street["geometry"].buffer(0.3).unary_union),
... crs=street.crs,
... columns=["geometry"])
>>> points_simulated = simulated_geo_points(street_buffered, needed=10, seed=1)
>>> type(points_simulated)
<class 'geopandas.geodataframe.GeoDataFrame'>
"""
geoms = in_data.geometry
area = tuple(
in_data.total_bounds
) # create a polygon with bounds to represent an area
simulated_points_list = []
simulated_points_all = False
numpy.random.seed(seed)
while simulated_points_all == False:
x = numpy.random.uniform(
area[0], area[2], 1
) # get coordinates x of area variable
y = numpy.random.uniform(
area[1], area[3], 1
) # get coordinates y of area variable
point = Point(x, y) # transform coordinates x, y into `shapely.geometry.Point`
if geoms.intersects(point)[0]: # check if the point belong to the network
simulated_points_list.append(point)
if (
len(simulated_points_list) == needed
): # check if the length of array of points simulated
# contains the number of points needed
simulated_points_all = True
sim_pts = geopandas.GeoDataFrame(
simulated_points_list, columns=["geometry"], crs=in_data.crs
) # transform the points array into geodataframe
return sim_pts
| [
"shapely.geometry.Point",
"geopandas.GeoDataFrame",
"numpy.random.seed",
"numpy.random.uniform"
] | [((1456, 1479), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (1473, 1479), False, 'import numpy\n'), ((2198, 2287), 'geopandas.GeoDataFrame', 'geopandas.GeoDataFrame', (['simulated_points_list'], {'columns': "['geometry']", 'crs': 'in_data.crs'}), "(simulated_points_list, columns=['geometry'], crs=\n in_data.crs)\n", (2220, 2287), False, 'import geopandas\n'), ((1533, 1574), 'numpy.random.uniform', 'numpy.random.uniform', (['area[0]', 'area[2]', '(1)'], {}), '(area[0], area[2], 1)\n', (1553, 1574), False, 'import numpy\n'), ((1647, 1688), 'numpy.random.uniform', 'numpy.random.uniform', (['area[1]', 'area[3]', '(1)'], {}), '(area[1], area[3], 1)\n', (1667, 1688), False, 'import numpy\n'), ((1765, 1776), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (1770, 1776), False, 'from shapely.geometry import Point\n')] |
import numpy as np
from math import ceil
def deriveSizeFromScale(img_shape, scale):
output_shape = []
for k in range(2):
output_shape.append(int(ceil(scale[k] * img_shape[k])))
return output_shape
def deriveScaleFromSize(img_shape_in, img_shape_out):
scale = []
for k in range(2):
scale.append(1.0 * img_shape_out[k] / img_shape_in[k])
return scale
def cubic(x):
x = np.array(x).astype(np.float64)
absx = np.absolute(x)
absx2 = np.multiply(absx, absx)
absx3 = np.multiply(absx2, absx)
f = np.multiply(1.5*absx3 - 2.5*absx2 + 1, absx <= 1) + np.multiply(-0.5*absx3 + 2.5*absx2 - 4*absx + 2, (1 < absx) & (absx <= 2))
return f
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length+1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def imresizemex(inimg, weights, indices, dim):
in_shape = inimg.shape
w_shape = weights.shape
out_shape = list(in_shape)
out_shape[dim] = w_shape[0]
outimg = np.zeros(out_shape)
if dim == 0:
for i_img in range(in_shape[1]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[ind, i_img].astype(np.float64)
outimg[i_w, i_img] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)
elif dim == 1:
for i_img in range(in_shape[0]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[i_img, ind].astype(np.float64)
outimg[i_img, i_w] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def imresizevec(inimg, weights, indices, dim):
wshape = weights.shape
if dim == 0:
weights = weights.reshape((wshape[0], wshape[2], 1, 1))
outimg = np.sum(weights*((inimg[indices].squeeze(axis=1)).astype(np.float64)), axis=1)
elif dim == 1:
weights = weights.reshape((1, wshape[0], wshape[2], 1))
outimg = np.sum(weights*((inimg[:, indices].squeeze(axis=2)).astype(np.float64)), axis=2)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def resizeAlongDim(A, dim, weights, indices, mode="vec"):
if mode == "org":
out = imresizemex(A, weights, indices, dim)
else:
out = imresizevec(A, weights, indices, dim)
return out
def imresize(I, scalar_scale=None, output_shape=None, mode="vec"):
kernel = cubic
kernel_width = 4.0
# Fill scale and output_size
if scalar_scale is not None:
scalar_scale = float(scalar_scale)
scale = [scalar_scale, scalar_scale]
output_size = deriveSizeFromScale(I.shape, scale)
elif output_shape is not None:
scale = deriveScaleFromSize(I.shape, output_shape)
output_size = list(output_shape)
else:
print('Error: scalar_scale OR output_shape should be defined!')
return
scale_np = np.array(scale)
order = np.argsort(scale_np)
weights = []
indices = []
for k in range(2):
w, ind = contributions(I.shape[k], output_size[k], scale[k], kernel, kernel_width)
weights.append(w)
indices.append(ind)
B = np.copy(I)
flag2D = False
if B.ndim == 2:
B = np.expand_dims(B, axis=2)
flag2D = True
for k in range(2):
dim = order[k]
B = resizeAlongDim(B, dim, weights[dim], indices[dim], mode)
if flag2D:
B = np.squeeze(B, axis=2)
return B
def convertDouble2Byte(I):
B = np.clip(I, 0.0, 1.0)
B = 255*B
return np.around(B).astype(np.uint8)
if __name__ == '__main__':
import matplotlib.pyplot as plt
x = np.linspace(-2.5,2.5,100)
plt.figure(figsize=(10,10))
plt.plot(x,cubic(x))
plt.show()
x = np.linspace(-2,2,6)[1:-1]
w = 0.25*cubic(0.25*x)
w /= w.sum()
im = np.random.random((32,32))
im_small = imresize(im, 0.25)
| [
"numpy.clip",
"numpy.argsort",
"numpy.array",
"numpy.mod",
"numpy.arange",
"numpy.multiply",
"numpy.random.random",
"numpy.linspace",
"numpy.floor",
"numpy.any",
"numpy.squeeze",
"numpy.around",
"matplotlib.pyplot.show",
"numpy.copy",
"math.ceil",
"numpy.absolute",
"numpy.sum",
"nu... | [((456, 470), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (467, 470), True, 'import numpy as np\n'), ((483, 506), 'numpy.multiply', 'np.multiply', (['absx', 'absx'], {}), '(absx, absx)\n', (494, 506), True, 'import numpy as np\n'), ((519, 543), 'numpy.multiply', 'np.multiply', (['absx2', 'absx'], {}), '(absx2, absx)\n', (530, 543), True, 'import numpy as np\n'), ((1037, 1067), 'numpy.floor', 'np.floor', (['(u - kernel_width / 2)'], {}), '(u - kernel_width / 2)\n', (1045, 1067), True, 'import numpy as np\n'), ((1875, 1894), 'numpy.zeros', 'np.zeros', (['out_shape'], {}), '(out_shape)\n', (1883, 1894), True, 'import numpy as np\n'), ((4104, 4119), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (4112, 4119), True, 'import numpy as np\n'), ((4132, 4152), 'numpy.argsort', 'np.argsort', (['scale_np'], {}), '(scale_np)\n', (4142, 4152), True, 'import numpy as np\n'), ((4363, 4373), 'numpy.copy', 'np.copy', (['I'], {}), '(I)\n', (4370, 4373), True, 'import numpy as np\n'), ((4687, 4707), 'numpy.clip', 'np.clip', (['I', '(0.0)', '(1.0)'], {}), '(I, 0.0, 1.0)\n', (4694, 4707), True, 'import numpy as np\n'), ((4838, 4865), 'numpy.linspace', 'np.linspace', (['(-2.5)', '(2.5)', '(100)'], {}), '(-2.5, 2.5, 100)\n', (4849, 4865), True, 'import numpy as np\n'), ((4868, 4896), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4878, 4896), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4933, 4935), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5064), 'numpy.random.random', 'np.random.random', (['(32, 32)'], {}), '((32, 32))\n', (5054, 5064), True, 'import numpy as np\n'), ((552, 605), 'numpy.multiply', 'np.multiply', (['(1.5 * absx3 - 2.5 * absx2 + 1)', '(absx <= 1)'], {}), '(1.5 * absx3 - 2.5 * absx2 + 1, absx <= 1)\n', (563, 605), True, 'import numpy as np\n'), ((604, 689), 'numpy.multiply', 'np.multiply', (['(-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2)', '((1 < absx) & (absx <= 2))'], {}), '(-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2, (1 < absx) & (absx <= 2)\n )\n', (615, 689), True, 'import numpy as np\n'), ((1517, 1542), 'numpy.mod', 'np.mod', (['indices', 'aux.size'], {}), '(indices, aux.size)\n', (1523, 1542), True, 'import numpy as np\n'), ((1571, 1594), 'numpy.any', 'np.any', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (1577, 1594), True, 'import numpy as np\n'), ((2630, 2653), 'numpy.clip', 'np.clip', (['outimg', '(0)', '(255)'], {}), '(outimg, 0, 255)\n', (2637, 2653), True, 'import numpy as np\n'), ((3219, 3242), 'numpy.clip', 'np.clip', (['outimg', '(0)', '(255)'], {}), '(outimg, 0, 255)\n', (3226, 3242), True, 'import numpy as np\n'), ((4426, 4451), 'numpy.expand_dims', 'np.expand_dims', (['B'], {'axis': '(2)'}), '(B, axis=2)\n', (4440, 4451), True, 'import numpy as np\n'), ((4616, 4637), 'numpy.squeeze', 'np.squeeze', (['B'], {'axis': '(2)'}), '(B, axis=2)\n', (4626, 4637), True, 'import numpy as np\n'), ((4949, 4970), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(6)'], {}), '(-2, 2, 6)\n', (4960, 4970), True, 'import numpy as np\n'), ((414, 425), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (422, 425), True, 'import numpy as np\n'), ((938, 966), 'numpy.arange', 'np.arange', (['(1)', '(out_length + 1)'], {}), '(1, out_length + 1)\n', (947, 966), True, 'import numpy as np\n'), ((1080, 1098), 'math.ceil', 'ceil', (['kernel_width'], {}), '(kernel_width)\n', (1084, 1098), False, 'from math import ceil\n'), ((1114, 1142), 'numpy.expand_dims', 'np.expand_dims', (['left'], {'axis': '(1)'}), '(left, axis=1)\n', (1128, 1142), True, 'import numpy as np\n'), ((1145, 1157), 'numpy.arange', 'np.arange', (['P'], {}), '(P)\n', (1154, 1157), True, 'import numpy as np\n'), ((1360, 1383), 'numpy.sum', 'np.sum', (['weights'], {'axis': '(1)'}), '(weights, axis=1)\n', (1366, 1383), True, 'import numpy as np\n'), ((4733, 4745), 'numpy.around', 'np.around', (['B'], {}), '(B)\n', (4742, 4745), True, 'import numpy as np\n'), ((162, 191), 'math.ceil', 'ceil', (['(scale[k] * img_shape[k])'], {}), '(scale[k] * img_shape[k])\n', (166, 191), False, 'from math import ceil\n'), ((1242, 1267), 'numpy.expand_dims', 'np.expand_dims', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (1256, 1267), True, 'import numpy as np\n'), ((2669, 2686), 'numpy.around', 'np.around', (['outimg'], {}), '(outimg)\n', (2678, 2686), True, 'import numpy as np\n'), ((3258, 3275), 'numpy.around', 'np.around', (['outimg'], {}), '(outimg)\n', (3267, 3275), True, 'import numpy as np\n'), ((1420, 1440), 'numpy.arange', 'np.arange', (['in_length'], {}), '(in_length)\n', (1429, 1440), True, 'import numpy as np\n'), ((1442, 1479), 'numpy.arange', 'np.arange', (['(in_length - 1)', '(-1)'], {'step': '(-1)'}), '(in_length - 1, -1, step=-1)\n', (1451, 1479), True, 'import numpy as np\n'), ((2189, 2217), 'numpy.squeeze', 'np.squeeze', (['im_slice'], {'axis': '(0)'}), '(im_slice, axis=0)\n', (2199, 2217), True, 'import numpy as np\n'), ((2529, 2557), 'numpy.squeeze', 'np.squeeze', (['im_slice'], {'axis': '(0)'}), '(im_slice, axis=0)\n', (2539, 2557), True, 'import numpy as np\n')] |
import gym
import gym.spaces
import numpy as np
class NormalizeActionSpace(gym.ActionWrapper):
"""Normalize a Box action space to [-1, 1]^n."""
def __init__(self, env):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.Box)
self.action_space = gym.spaces.Box(
low=-np.ones_like(env.action_space.low),
high=np.ones_like(env.action_space.low),
)
def action(self, action):
# action is in [-1, 1]
action = action.copy()
# -> [0, 2]
action += 1
# -> [0, orig_high - orig_low]
action *= (self.env.action_space.high - self.env.action_space.low) / 2
# -> [orig_low, orig_high]
return action + self.env.action_space.low
| [
"numpy.ones_like"
] | [((384, 418), 'numpy.ones_like', 'np.ones_like', (['env.action_space.low'], {}), '(env.action_space.low)\n', (396, 418), True, 'import numpy as np\n'), ((331, 365), 'numpy.ones_like', 'np.ones_like', (['env.action_space.low'], {}), '(env.action_space.low)\n', (343, 365), True, 'import numpy as np\n')] |
import tensorflow as tf
import algos_tf14.models
from common import tr_helpers, experience, env_configurations
import numpy as np
import collections
import time
from collections import deque
from tensorboardX import SummaryWriter
from datetime import datetime
from algos_tf14.tensorflow_utils import TensorFlowVariables
from common.categorical import CategoricalQ
class DQNAgent:
def __init__(self, sess, base_name, observation_space, action_space, config, logger, central_state_space=None):
observation_shape = observation_space.shape
actions_num = action_space.n
self.config = config
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.games_to_track = tr_helpers.get_or_default(config, 'games_to_track', 100)
self.max_epochs = tr_helpers.get_or_default(self.config, 'max_epochs', 1e6)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
if self.is_adaptive_lr:
self.lr_threshold = config['lr_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, end_learning_rate=0.001, power=tr_helpers.get_or_default(config, 'decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, decay_rate = config['decay_rate'])
self.env_name = config['env_name']
self.network = config['network']
self.state_shape = observation_shape
self.actions_num = actions_num
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("%d, %H:%M:%S"))
self.epsilon = self.config['epsilon']
self.rewards_shaper = self.config['reward_shaper']
self.epsilon_processor = tr_helpers.LinearValueProcessor(self.config['epsilon'], self.config['min_epsilon'], self.config['epsilon_decay_frames'])
self.beta_processor = tr_helpers.LinearValueProcessor(self.config['priority_beta'], self.config['max_beta'], self.config['beta_decay_frames'])
if self.env_name:
self.env = env_configurations.configurations[self.env_name]['env_creator'](name=config['name'])
self.sess = sess
self.steps_num = self.config['steps_num']
self.states = deque([], maxlen=self.steps_num)
self.is_prioritized = config['replay_buffer_type'] != 'normal'
self.atoms_num = self.config['atoms_num']
self.is_categorical = self.atoms_num > 1
if self.is_categorical:
self.v_min = self.config['v_min']
self.v_max = self.config['v_max']
self.delta_z = (self.v_max - self.v_min) / (self.atoms_num - 1)
self.all_z = tf.range(self.v_min, self.v_max + self.delta_z, self.delta_z)
self.categorical = CategoricalQ(self.atoms_num, self.v_min, self.v_max)
self.n_agents = self.env.env_info['n_agents']
if not self.is_prioritized:
self.exp_buffer = experience.ReplayBuffer(config['replay_buffer_size'], observation_space, self.n_agents)
else:
self.exp_buffer = experience.PrioritizedReplayBuffer(config['replay_buffer_size'], config['priority_alpha'], observation_space, self.n_agents)
self.sample_weights_ph = tf.placeholder(tf.float32, shape= [None,] , name='sample_weights')
self.obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'obs_ph')
self.actions_ph = tf.placeholder(tf.int32, shape=[None,], name = 'actions_ph')
self.rewards_ph = tf.placeholder(tf.float32, shape=[None,], name = 'rewards_ph')
self.next_obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'next_obs_ph')
self.is_done_ph = tf.placeholder(tf.float32, shape=[None,], name = 'is_done_ph')
self.is_not_done = 1 - self.is_done_ph
self.name = base_name
self.gamma = self.config['gamma']
self.gamma_step = self.gamma**self.steps_num
self.input_obs = self.obs_ph
self.input_next_obs = self.next_obs_ph
if observation_space.dtype == np.uint8:
print('scaling obs')
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_next_obs = tf.to_float(self.input_next_obs) / 255.0
if self.atoms_num == 1:
self.setup_qvalues(actions_num)
else:
self.setup_cat_qvalues(actions_num)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
self.saver = tf.train.Saver()
self.assigns_op = [tf.assign(w_target, w_self, validate_shape=True) for w_self, w_target in zip(self.weights, self.target_weights)]
self.variables = TensorFlowVariables(self.qvalues, self.sess)
if self.env_name:
sess.run(tf.global_variables_initializer())
self._reset()
def _get_q(self, probs):
res = probs * self.all_z
return tf.reduce_sum(res, axis=2)
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def setup_cat_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.logits = self.network(config, reuse=False)
self.qvalues_c = tf.nn.softmax(self.logits, axis = 2)
self.qvalues = self._get_q(self.qvalues_c)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_logits = self.network(config, reuse=False)
self.target_qvalues_c = tf.nn.softmax(self.target_logits, axis = 2)
self.target_qvalues = self._get_q(self.target_qvalues_c)
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_logits = tf.stop_gradient(self.network(config, reuse=True))
self.next_qvalues_c = tf.nn.softmax(self.next_logits, axis = 2)
self.next_qvalues = self._get_q(self.next_qvalues_c)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_values = tf.reduce_sum(tf.expand_dims(tf.one_hot(self.actions_ph, actions_num), -1) * self.logits, reduction_indices = (1,))
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
else:
self.next_selected_actions = tf.argmax(self.target_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
self.proj_dir_ph = tf.placeholder(tf.float32, shape=[None, self.atoms_num], name = 'best_proj_dir')
log_probs = tf.nn.log_softmax( self.current_action_values, axis=1)
if self.is_prioritized:
# we need to return loss to update priority buffer
self.abs_errors = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1) + 1e-5
self.td_loss = self.abs_errors * self.sample_weights_ph
else:
self.td_loss = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1)
self.td_loss_mean = tf.reduce_mean(self.td_loss)
def setup_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.qvalues = self.network(config, reuse=False)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_qvalues = tf.stop_gradient(self.network(config, reuse=False))
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_qvalues = tf.stop_gradient(self.network(config, reuse=True))
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_qvalues = tf.reduce_sum(tf.one_hot(self.actions_ph, actions_num) * self.qvalues, reduction_indices = 1)
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( self.target_qvalues * self.next_selected_actions_onehot , reduction_indices=[1,] ))
else:
self.next_state_values_target = tf.stop_gradient(tf.reduce_max(self.target_qvalues, reduction_indices=1))
self.reference_qvalues = self.rewards_ph + self.gamma_step *self.is_not_done * self.next_state_values_target
if self.is_prioritized:
# we need to return l1 loss to update priority buffer
self.abs_errors = tf.abs(self.current_action_qvalues - self.reference_qvalues) + 1e-5
# the same as multiply gradients later (other way is used in different examples over internet)
self.td_loss = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.NONE) * self.sample_weights_ph
self.td_loss_mean = tf.reduce_mean(self.td_loss)
else:
self.td_loss_mean = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.MEAN)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
if self.env_name:
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def _reset(self):
self.states.clear()
if self.env_name:
self.state = self.env.reset()
self.total_reward = 0.0
self.total_shaped_reward = 0.0
self.step_count = 0
def get_qvalues(self, state):
return self.sess.run(self.qvalues, {self.obs_ph: state})
def get_action(self, state, epsilon=0.0):
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
qvals = self.get_qvalues([state])
action = np.argmax(qvals)
return action
def play_steps(self, steps, epsilon=0.0):
done_reward = None
done_shaped_reward = None
done_steps = None
steps_rewards = 0
cur_gamma = 1
cur_states_len = len(self.states)
# always break after one
while True:
if cur_states_len > 0:
state = self.states[-1][0]
else:
state = self.state
action = self.get_action(state, epsilon)
new_state, reward, is_done, _ = self.env.step(action)
#reward = reward * (1 - is_done)
self.step_count += 1
self.total_reward += reward
shaped_reward = self.rewards_shaper(reward)
self.total_shaped_reward += shaped_reward
self.states.append([new_state, action, shaped_reward])
if len(self.states) < steps:
break
for i in range(steps):
sreward = self.states[i][2]
steps_rewards += sreward * cur_gamma
cur_gamma = cur_gamma * self.gamma
next_state, current_action, _ = self.states[0]
self.exp_buffer.add(self.state, current_action, steps_rewards, new_state, is_done)
self.state = next_state
break
if is_done:
done_reward = self.total_reward
done_steps = self.step_count
done_shaped_reward = self.total_shaped_reward
self._reset()
return done_reward, done_shaped_reward, done_steps
def load_weigths_into_target_network(self):
self.sess.run(self.assigns_op)
def sample_batch(self, exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch
}
def sample_prioritized_batch(self, exp_replay, batch_size, beta):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch, sample_weights, sample_idxes = exp_replay.sample(batch_size, beta)
batch = { self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch, self.sample_weights_ph: sample_weights }
return [batch , sample_idxes]
def train(self):
mem_free_steps = 0
last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
self.load_weigths_into_target_network()
for _ in range(0, self.config['num_steps_fill_buffer']):
self.play_steps(self.steps_num, self.epsilon)
steps_per_epoch = self.config['steps_per_epoch']
num_epochs_to_copy = self.config['num_epochs_to_copy']
batch_size = self.config['batch_size']
lives_reward = self.config['lives_reward']
episodes_to_log = self.config['episodes_to_log']
frame = 0
play_time = 0
update_time = 0
rewards = []
shaped_rewards = []
steps = []
losses = deque([], maxlen=100)
while True:
epoch_num = self.update_epoch()
t_play_start = time.time()
self.epsilon = self.epsilon_processor(frame)
self.beta = self.beta_processor(frame)
for _ in range(0, steps_per_epoch):
reward, shaped_reward, step = self.play_steps(self.steps_num, self.epsilon)
if reward != None:
self.game_lengths.append(step)
self.game_rewards.append(reward)
#shaped_rewards.append(shaped_reward)
t_play_end = time.time()
play_time += t_play_end - t_play_start
# train
frame = frame + steps_per_epoch
t_start = time.time()
if self.is_categorical:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.steps_num)
batch[self.proj_dir_ph] = projected
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.steps_num)
batch[self.proj_dir_ph] = projected
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
else:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
losses.append(loss_t)
t_end = time.time()
update_time += t_end - t_start
total_time += update_time
if frame % 1000 == 0:
mem_free_steps += 1
if mem_free_steps == 10:
mem_free_steps = 0
tr_helpers.free_mem()
sum_time = update_time + play_time
print('frames per seconds: ', 1000 / (sum_time))
self.writer.add_scalar('performance/fps', 1000 / sum_time, frame)
self.writer.add_scalar('performance/upd_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/td_loss', np.mean(losses), frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/lr', self.learning_rate*lr_mul, frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self.writer.add_scalar('info/epsilon', self.epsilon, frame)
if self.is_prioritized:
self.writer.add_scalar('beta', self.beta, frame)
self.logger.log_stat("whirl/performance/fps", 1000 / sum_time, self.num_env_steps_train)
self.logger.log_stat("whirl/performance/upd_time", update_time, self.num_env_steps_train)
self.logger.log_stat("whirl/performance/play_time", play_time, self.num_env_steps_train)
self.logger.log_stat("losses/td_loss", np.mean(losses), self.num_env_steps_train)
self.logger.log_stat("whirl/info/last_lr", self.learning_rate*lr_mul, self.num_env_steps_train)
self.logger.log_stat("whirl/info/lr_mul", lr_mul, self.num_env_steps_train)
self.logger.log_stat("whirl/epochs", epoch_num, self.num_env_steps_train)
self.logger.log_stat("whirl/epsilon", self.epsilon, self.num_env_steps_train)
update_time = 0
play_time = 0
num_games = len(self.game_rewards)
if num_games > 10:
d = num_games / lives_reward
mean_rewards = np.sum(self.game_rewards) / d
mean_lengths = np.sum(self.game_lengths) / d
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
self.logger.log_stat("whirl/rewards/mean", np.asscalar(mean_rewards), self.num_env_steps_train)
self.logger.log_stat("whirl/rewards/time", mean_rewards, total_time)
self.logger.log_stat("whirl/episode_lengths/mean", np.asscalar(mean_lengths), self.num_env_steps_train)
self.logger.log_stat("whirl/episode_lengths/time", mean_lengths, total_time)
if mean_rewards > last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
if last_mean_rewards > self.config['score_to_win']:
print('network won!')
return last_mean_rewards, epoch_num
#clear_output(True)
# adjust agent parameters
if frame % num_epochs_to_copy == 0:
self.load_weigths_into_target_network()
if epoch_num >= self.max_epochs:
print('Max epochs reached')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(np.sum(self.game_rewards) * lives_reward / len(self.game_rewards)))
return last_mean_rewards, epoch_num
| [
"common.tr_helpers.free_mem",
"tensorflow.reduce_sum",
"tensorflow.nn.softmax",
"common.tr_helpers.LinearValueProcessor",
"tensorflow.reduce_mean",
"algos_tf14.tensorflow_utils.TensorFlowVariables",
"numpy.mean",
"collections.deque",
"common.experience.ReplayBuffer",
"numpy.random.random",
"comm... | [((854, 896), 'tensorflow.constant', 'tf.constant', (['(1)'], {'shape': '()', 'dtype': 'tf.float32'}), '(1, shape=(), dtype=tf.float32)\n', (865, 896), True, 'import tensorflow as tf\n'), ((929, 972), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '()'], {'name': '"""lr_ph"""'}), "('float32', (), name='lr_ph')\n", (943, 972), True, 'import tensorflow as tf\n'), ((1005, 1061), 'common.tr_helpers.get_or_default', 'tr_helpers.get_or_default', (['config', '"""games_to_track"""', '(100)'], {}), "(config, 'games_to_track', 100)\n", (1030, 1061), False, 'from common import tr_helpers, experience, env_configurations\n'), ((1088, 1151), 'common.tr_helpers.get_or_default', 'tr_helpers.get_or_default', (['self.config', '"""max_epochs"""', '(1000000.0)'], {}), "(self.config, 'max_epochs', 1000000.0)\n", (1113, 1151), False, 'from common import tr_helpers, experience, env_configurations\n'), ((1175, 1212), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.games_to_track'}), '([], maxlen=self.games_to_track)\n', (1180, 1212), False, 'from collections import deque\n'), ((1241, 1278), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.games_to_track'}), '([], maxlen=self.games_to_track)\n', (1246, 1278), False, 'from collections import deque\n'), ((2434, 2559), 'common.tr_helpers.LinearValueProcessor', 'tr_helpers.LinearValueProcessor', (["self.config['epsilon']", "self.config['min_epsilon']", "self.config['epsilon_decay_frames']"], {}), "(self.config['epsilon'], self.config[\n 'min_epsilon'], self.config['epsilon_decay_frames'])\n", (2465, 2559), False, 'from common import tr_helpers, experience, env_configurations\n'), ((2585, 2710), 'common.tr_helpers.LinearValueProcessor', 'tr_helpers.LinearValueProcessor', (["self.config['priority_beta']", "self.config['max_beta']", "self.config['beta_decay_frames']"], {}), "(self.config['priority_beta'], self.config[\n 'max_beta'], self.config['beta_decay_frames'])\n", (2616, 2710), False, 'from common import tr_helpers, experience, env_configurations\n'), ((2937, 2969), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.steps_num'}), '([], maxlen=self.steps_num)\n', (2942, 2969), False, 'from collections import deque\n'), ((4031, 4123), 'tensorflow.placeholder', 'tf.placeholder', (['observation_space.dtype'], {'shape': '((None,) + self.state_shape)', 'name': '"""obs_ph"""'}), "(observation_space.dtype, shape=(None,) + self.state_shape,\n name='obs_ph')\n", (4045, 4123), True, 'import tensorflow as tf\n'), ((4149, 4206), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""actions_ph"""'}), "(tf.int32, shape=[None], name='actions_ph')\n", (4163, 4206), True, 'import tensorflow as tf\n'), ((4236, 4295), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""rewards_ph"""'}), "(tf.float32, shape=[None], name='rewards_ph')\n", (4250, 4295), True, 'import tensorflow as tf\n'), ((4326, 4423), 'tensorflow.placeholder', 'tf.placeholder', (['observation_space.dtype'], {'shape': '((None,) + self.state_shape)', 'name': '"""next_obs_ph"""'}), "(observation_space.dtype, shape=(None,) + self.state_shape,\n name='next_obs_ph')\n", (4340, 4423), True, 'import tensorflow as tf\n'), ((4449, 4508), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""is_done_ph"""'}), "(tf.float32, shape=[None], name='is_done_ph')\n", (4463, 4508), True, 'import tensorflow as tf\n'), ((5165, 5200), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (5198, 5200), True, 'import tensorflow as tf\n'), ((5473, 5489), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5487, 5489), True, 'import tensorflow as tf\n'), ((5655, 5699), 'algos_tf14.tensorflow_utils.TensorFlowVariables', 'TensorFlowVariables', (['self.qvalues', 'self.sess'], {}), '(self.qvalues, self.sess)\n', (5674, 5699), False, 'from algos_tf14.tensorflow_utils import TensorFlowVariables\n'), ((5882, 5908), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res'], {'axis': '(2)'}), '(res, axis=2)\n', (5895, 5908), True, 'import tensorflow as tf\n'), ((6419, 6453), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits'], {'axis': '(2)'}), '(self.logits, axis=2)\n', (6432, 6453), True, 'import tensorflow as tf\n'), ((6748, 6789), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.target_logits'], {'axis': '(2)'}), '(self.target_logits, axis=2)\n', (6761, 6789), True, 'import tensorflow as tf\n'), ((7314, 7380), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""agent"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')\n", (7331, 7380), True, 'import tensorflow as tf\n'), ((7411, 7478), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""target"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')\n", (7428, 7478), True, 'import tensorflow as tf\n'), ((8474, 8552), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.atoms_num]', 'name': '"""best_proj_dir"""'}), "(tf.float32, shape=[None, self.atoms_num], name='best_proj_dir')\n", (8488, 8552), True, 'import tensorflow as tf\n'), ((8575, 8628), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['self.current_action_values'], {'axis': '(1)'}), '(self.current_action_values, axis=1)\n', (8592, 8628), True, 'import tensorflow as tf\n'), ((9011, 9039), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.td_loss'], {}), '(self.td_loss)\n', (9025, 9039), True, 'import tensorflow as tf\n'), ((9824, 9890), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""agent"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')\n", (9841, 9890), True, 'import tensorflow as tf\n'), ((9921, 9988), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""target"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')\n", (9938, 9988), True, 'import tensorflow as tf\n'), ((11479, 11514), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (11512, 11514), True, 'import tensorflow as tf\n'), ((15129, 15140), 'time.time', 'time.time', ([], {}), '()\n', (15138, 15140), False, 'import time\n'), ((15759, 15780), 'collections.deque', 'deque', (['[]'], {'maxlen': '(100)'}), '([], maxlen=100)\n', (15764, 15780), False, 'from collections import deque\n'), ((1318, 1360), 'tensorflow.constant', 'tf.constant', (['(0)'], {'shape': '()', 'dtype': 'tf.float32'}), '(0, shape=(), dtype=tf.float32)\n', (1329, 1360), True, 'import tensorflow as tf\n'), ((1898, 2024), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['(1.0)'], {'global_step': 'self.epoch_num', 'decay_steps': 'self.max_epochs', 'decay_rate': "config['decay_rate']"}), "(1.0, global_step=self.epoch_num, decay_steps=\n self.max_epochs, decay_rate=config['decay_rate'])\n", (1924, 2024), True, 'import tensorflow as tf\n'), ((3370, 3431), 'tensorflow.range', 'tf.range', (['self.v_min', '(self.v_max + self.delta_z)', 'self.delta_z'], {}), '(self.v_min, self.v_max + self.delta_z, self.delta_z)\n', (3378, 3431), True, 'import tensorflow as tf\n'), ((3463, 3515), 'common.categorical.CategoricalQ', 'CategoricalQ', (['self.atoms_num', 'self.v_min', 'self.v_max'], {}), '(self.atoms_num, self.v_min, self.v_max)\n', (3475, 3515), False, 'from common.categorical import CategoricalQ\n'), ((3638, 3729), 'common.experience.ReplayBuffer', 'experience.ReplayBuffer', (["config['replay_buffer_size']", 'observation_space', 'self.n_agents'], {}), "(config['replay_buffer_size'], observation_space,\n self.n_agents)\n", (3661, 3729), False, 'from common import tr_helpers, experience, env_configurations\n'), ((3771, 3900), 'common.experience.PrioritizedReplayBuffer', 'experience.PrioritizedReplayBuffer', (["config['replay_buffer_size']", "config['priority_alpha']", 'observation_space', 'self.n_agents'], {}), "(config['replay_buffer_size'], config[\n 'priority_alpha'], observation_space, self.n_agents)\n", (3805, 3900), False, 'from common import tr_helpers, experience, env_configurations\n'), ((3933, 3996), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""sample_weights"""'}), "(tf.float32, shape=[None], name='sample_weights')\n", (3947, 3996), True, 'import tensorflow as tf\n'), ((5517, 5565), 'tensorflow.assign', 'tf.assign', (['w_target', 'w_self'], {'validate_shape': '(True)'}), '(w_target, w_self, validate_shape=True)\n', (5526, 5565), True, 'import tensorflow as tf\n'), ((7183, 7222), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.next_logits'], {'axis': '(2)'}), '(self.next_logits, axis=2)\n', (7196, 7222), True, 'import tensorflow as tf\n'), ((7728, 7764), 'tensorflow.argmax', 'tf.argmax', (['self.next_qvalues'], {'axis': '(1)'}), '(self.next_qvalues, axis=1)\n', (7737, 7764), True, 'import tensorflow as tf\n'), ((7815, 7866), 'tensorflow.one_hot', 'tf.one_hot', (['self.next_selected_actions', 'actions_num'], {}), '(self.next_selected_actions, actions_num)\n', (7825, 7866), True, 'import tensorflow as tf\n'), ((8107, 8145), 'tensorflow.argmax', 'tf.argmax', (['self.target_qvalues'], {'axis': '(1)'}), '(self.target_qvalues, axis=1)\n', (8116, 8145), True, 'import tensorflow as tf\n'), ((8196, 8247), 'tensorflow.one_hot', 'tf.one_hot', (['self.next_selected_actions', 'actions_num'], {}), '(self.next_selected_actions, actions_num)\n', (8206, 8247), True, 'import tensorflow as tf\n'), ((8927, 8979), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(-log_probs * self.proj_dir_ph)'], {'axis': '(1)'}), '(-log_probs * self.proj_dir_ph, axis=1)\n', (8940, 8979), True, 'import tensorflow as tf\n'), ((10210, 10246), 'tensorflow.argmax', 'tf.argmax', (['self.next_qvalues'], {'axis': '(1)'}), '(self.next_qvalues, axis=1)\n', (10219, 10246), True, 'import tensorflow as tf\n'), ((10297, 10348), 'tensorflow.one_hot', 'tf.one_hot', (['self.next_selected_actions', 'actions_num'], {}), '(self.next_selected_actions, actions_num)\n', (10307, 10348), True, 'import tensorflow as tf\n'), ((11268, 11296), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.td_loss'], {}), '(self.td_loss)\n', (11282, 11296), True, 'import tensorflow as tf\n'), ((11344, 11457), 'tensorflow.losses.huber_loss', 'tf.losses.huber_loss', (['self.current_action_qvalues', 'self.reference_qvalues'], {'reduction': 'tf.losses.Reduction.MEAN'}), '(self.current_action_qvalues, self.reference_qvalues,\n reduction=tf.losses.Reduction.MEAN)\n', (11364, 11457), True, 'import tensorflow as tf\n'), ((12298, 12316), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (12314, 12316), True, 'import numpy as np\n'), ((12461, 12477), 'numpy.argmax', 'np.argmax', (['qvals'], {}), '(qvals)\n', (12470, 12477), True, 'import numpy as np\n'), ((15872, 15883), 'time.time', 'time.time', ([], {}), '()\n', (15881, 15883), False, 'import time\n'), ((16356, 16367), 'time.time', 'time.time', ([], {}), '()\n', (16365, 16367), False, 'import time\n'), ((16531, 16542), 'time.time', 'time.time', ([], {}), '()\n', (16540, 16542), False, 'import time\n'), ((18547, 18558), 'time.time', 'time.time', ([], {}), '()\n', (18556, 18558), False, 'import time\n'), ((4889, 4916), 'tensorflow.to_float', 'tf.to_float', (['self.input_obs'], {}), '(self.input_obs)\n', (4900, 4916), True, 'import tensorflow as tf\n'), ((4959, 4991), 'tensorflow.to_float', 'tf.to_float', (['self.input_next_obs'], {}), '(self.input_next_obs)\n', (4970, 4991), True, 'import tensorflow as tf\n'), ((5328, 5391), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(self.learning_rate * self.lr_multiplier)'], {}), '(self.learning_rate * self.lr_multiplier)\n', (5350, 5391), True, 'import tensorflow as tf\n'), ((5747, 5780), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5778, 5780), True, 'import tensorflow as tf\n'), ((8756, 8808), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(-log_probs * self.proj_dir_ph)'], {'axis': '(1)'}), '(-log_probs * self.proj_dir_ph, axis=1)\n', (8769, 8808), True, 'import tensorflow as tf\n'), ((10043, 10083), 'tensorflow.one_hot', 'tf.one_hot', (['self.actions_ph', 'actions_num'], {}), '(self.actions_ph, actions_num)\n', (10053, 10083), True, 'import tensorflow as tf\n'), ((10411, 10508), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.target_qvalues * self.next_selected_actions_onehot)'], {'reduction_indices': '[1]'}), '(self.target_qvalues * self.next_selected_actions_onehot,\n reduction_indices=[1])\n', (10424, 10508), True, 'import tensorflow as tf\n'), ((10585, 10640), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.target_qvalues'], {'reduction_indices': '(1)'}), '(self.target_qvalues, reduction_indices=1)\n', (10598, 10640), True, 'import tensorflow as tf\n'), ((10898, 10958), 'tensorflow.abs', 'tf.abs', (['(self.current_action_qvalues - self.reference_qvalues)'], {}), '(self.current_action_qvalues - self.reference_qvalues)\n', (10904, 10958), True, 'import tensorflow as tf\n'), ((11101, 11214), 'tensorflow.losses.huber_loss', 'tf.losses.huber_loss', (['self.current_action_qvalues', 'self.reference_qvalues'], {'reduction': 'tf.losses.Reduction.NONE'}), '(self.current_action_qvalues, self.reference_qvalues,\n reduction=tf.losses.Reduction.NONE)\n', (11121, 11214), True, 'import tensorflow as tf\n'), ((1777, 1830), 'common.tr_helpers.get_or_default', 'tr_helpers.get_or_default', (['config', '"""decay_power"""', '(1.0)'], {}), "(config, 'decay_power', 1.0)\n", (1802, 1830), False, 'from common import tr_helpers, experience, env_configurations\n'), ((7547, 7587), 'tensorflow.one_hot', 'tf.one_hot', (['self.actions_ph', 'actions_num'], {}), '(self.actions_ph, actions_num)\n', (7557, 7587), True, 'import tensorflow as tf\n'), ((11672, 11735), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(self.learning_rate * self.lr_multiplier)'], {}), '(self.learning_rate * self.lr_multiplier)\n', (11694, 11735), True, 'import tensorflow as tf\n'), ((18812, 18833), 'common.tr_helpers.free_mem', 'tr_helpers.free_mem', ([], {}), '()\n', (18831, 18833), False, 'from common import tr_helpers, experience, env_configurations\n'), ((19254, 19269), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (19261, 19269), True, 'import numpy as np\n'), ((20060, 20075), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (20067, 20075), True, 'import numpy as np\n'), ((2255, 2269), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2267, 2269), False, 'from datetime import datetime\n'), ((7944, 7997), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.next_selected_actions_onehot', '(-1)'], {}), '(self.next_selected_actions_onehot, -1)\n', (7958, 7997), True, 'import tensorflow as tf\n'), ((8325, 8378), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.next_selected_actions_onehot', '(-1)'], {}), '(self.next_selected_actions_onehot, -1)\n', (8339, 8378), True, 'import tensorflow as tf\n'), ((20724, 20749), 'numpy.sum', 'np.sum', (['self.game_rewards'], {}), '(self.game_rewards)\n', (20730, 20749), True, 'import numpy as np\n'), ((20790, 20815), 'numpy.sum', 'np.sum', (['self.game_lengths'], {}), '(self.game_lengths)\n', (20796, 20815), True, 'import numpy as np\n'), ((21230, 21255), 'numpy.asscalar', 'np.asscalar', (['mean_rewards'], {}), '(mean_rewards)\n', (21241, 21255), True, 'import numpy as np\n'), ((21443, 21468), 'numpy.asscalar', 'np.asscalar', (['mean_lengths'], {}), '(mean_lengths)\n', (21454, 21468), True, 'import numpy as np\n'), ((22511, 22536), 'numpy.sum', 'np.sum', (['self.game_rewards'], {}), '(self.game_rewards)\n', (22517, 22536), True, 'import numpy as np\n')] |
import itertools
import numpy as np
from scipy import ndimage
class ObjectiveFunction:
def __init__(self, msg=True):
self.flist = [
"totalWeight",
"solSize",
"crossCount",
"fillCount",
"maxConnectedEmpties"
]
self.registeredFuncs = []
if msg is True:
print("ObjectiveFunction object has made.")
def __len__(self):
return len(self.registeredFuncs)
def getFuncs(self):
return self.registeredFuncs
def solSize(self, puzzle):
"""
This method returns the number of words used in the solution
"""
return puzzle.solSize
def crossCount(self, puzzle):
"""
This method returns the number of crosses of a word
"""
return np.sum(puzzle.cover == 2)
def fillCount(self, puzzle):
"""
This method returns the number of character cells in the puzzle
"""
return np.sum(puzzle.cover >= 1)
def totalWeight(self, puzzle):
"""
This method returns the sum of the word weights used for the solution
"""
return puzzle.totalWeight
def maxConnectedEmpties(self, puzzle):
"""
This method returns the maximum number of concatenations for unfilled squares
"""
reverse_cover = puzzle.cover < 1
zero_label, nlbl = ndimage.label(reverse_cover)
mask = zero_label > 0
sizes = ndimage.sum(mask, zero_label, range(nlbl+1))
score = puzzle.width*puzzle.height - sizes.max()
return score
def register(self, funcNames, msg=True):
"""
This method registers an objective function in an instance
"""
for funcName in funcNames:
if funcName not in self.flist:
raise RuntimeError(f"ObjectiveFunction class does not have '{funcName}' function")
if msg is True:
print(f" - '{funcName}' function has registered.")
self.registeredFuncs = funcNames
return
def getScore(self, puzzle, i=0, func=None, all=False):
"""
This method returns any objective function value
"""
if all is True:
scores=np.zeros(len(self.registeredFuncs), dtype="int")
for n in range(scores.size):
scores[n] = eval(f"self.{self.registeredFuncs[n]}(puzzle)")
return scores
if func is None:
func = self.registeredFuncs[i]
return eval(f"self.{func}(puzzle)")
| [
"numpy.sum",
"scipy.ndimage.label"
] | [((820, 845), 'numpy.sum', 'np.sum', (['(puzzle.cover == 2)'], {}), '(puzzle.cover == 2)\n', (826, 845), True, 'import numpy as np\n'), ((991, 1016), 'numpy.sum', 'np.sum', (['(puzzle.cover >= 1)'], {}), '(puzzle.cover >= 1)\n', (997, 1016), True, 'import numpy as np\n'), ((1411, 1439), 'scipy.ndimage.label', 'ndimage.label', (['reverse_cover'], {}), '(reverse_cover)\n', (1424, 1439), False, 'from scipy import ndimage\n')] |
import argparse
import numpy as np
import time
from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds
def initialize_board(name='SYNTHETIC',port = None):
if name == 'SYNTHETIC':
BoardShim.enable_dev_board_logger()
# use synthetic board for demo
params = BrainFlowInputParams()
board_id = BoardIds.SYNTHETIC_BOARD.value
board = BoardShim(board_id, params)
board.rate = BoardShim.get_sampling_rate(board_id)
board.channels = BoardShim.get_eeg_channels(board_id)
board.time_channel = BoardShim.get_timestamp_channel(board_id)
board.eeg_channels = BoardShim.get_eeg_channels(board_id)
board.accel_channels = BoardShim.get_accel_channels(board_id)
elif name == 'OPENBCI':
board_id = BoardIds.CYTON_DAISY_BOARD.value
params = BrainFlowInputParams()
params.serial_port = port
board_id = BoardIds.CYTON_DAISY_BOARD.value
board = BoardShim(board_id, params)
board.rate = BoardShim.get_sampling_rate(board_id)
board.channels = BoardShim.get_eeg_channels(board_id)
board.time_channel = BoardShim.get_timestamp_channel(board_id)
board.eeg_channels = BoardShim.get_eeg_channels(board_id)
board.accel_channels = BoardShim.get_accel_channels(board_id)
print('Must have OpenBCI GUI open to work... (as port is not opened by Brainflow)')
board.prepare_session()
return board
def start_stream(board=None):
board.start_stream(num_samples=450000)
start_time = time.time()
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
return start_time
def pull_data(board=None,num_samples=450000):
data = board.get_current_board_data(num_samples=num_samples)
return data
def stop_stream(board=None,start_stream=None):
board.stop_stream()
stream_time = time.time() - start_stream
board.release_session()
return stream_time
def map_events_to_features(event_times=None,data_times = None,events=None):
curr_timestamp = 1
prev_timestamp = 0
closest_data = np.zeros((event_times.size),dtype=int)
for ii in range(event_times.size):
while True:
curr_diff = abs(event_times[ii] - data_times[curr_timestamp])
prev_diff = abs(event_times[ii] - data_times[prev_timestamp-1])
if curr_diff < prev_diff:
curr_timestamp += 1
prev_timestamp += 1
if curr_diff > prev_diff:
closest_data[ii] = int(curr_timestamp)
curr_timestamp += 1
prev_timestamp += 1
break
new_events = np.empty((data_times.size),dtype=str)
for ii in range(len(closest_data)-1):
this_event = closest_data[ii]
next_event = closest_data[ii+1]
stop = int(np.floor(this_event + (next_event-this_event)/2))
if ii == 0:
start = int(np.floor(this_event))
else:
prev_event = closest_data[ii-1]
start = int(np.floor(this_event + (prev_event-this_event)/2))
for jj in range(stop-start):
new_events[start+jj] = events[ii]
return new_events | [
"brainflow.board_shim.BoardShim.enable_dev_board_logger",
"brainflow.board_shim.BoardShim",
"brainflow.board_shim.BoardShim.get_accel_channels",
"brainflow.board_shim.BrainFlowInputParams",
"brainflow.board_shim.BoardShim.get_sampling_rate",
"numpy.floor",
"brainflow.board_shim.BoardShim.log_message",
... | [((1565, 1576), 'time.time', 'time.time', ([], {}), '()\n', (1574, 1576), False, 'import time\n'), ((1581, 1671), 'brainflow.board_shim.BoardShim.log_message', 'BoardShim.log_message', (['LogLevels.LEVEL_INFO.value', '"""start sleeping in the main thread"""'], {}), "(LogLevels.LEVEL_INFO.value,\n 'start sleeping in the main thread')\n", (1602, 1671), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((2130, 2167), 'numpy.zeros', 'np.zeros', (['event_times.size'], {'dtype': 'int'}), '(event_times.size, dtype=int)\n', (2138, 2167), True, 'import numpy as np\n'), ((2697, 2733), 'numpy.empty', 'np.empty', (['data_times.size'], {'dtype': 'str'}), '(data_times.size, dtype=str)\n', (2705, 2733), True, 'import numpy as np\n'), ((222, 257), 'brainflow.board_shim.BoardShim.enable_dev_board_logger', 'BoardShim.enable_dev_board_logger', ([], {}), '()\n', (255, 257), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((315, 337), 'brainflow.board_shim.BrainFlowInputParams', 'BrainFlowInputParams', ([], {}), '()\n', (335, 337), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((404, 431), 'brainflow.board_shim.BoardShim', 'BoardShim', (['board_id', 'params'], {}), '(board_id, params)\n', (413, 431), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((453, 490), 'brainflow.board_shim.BoardShim.get_sampling_rate', 'BoardShim.get_sampling_rate', (['board_id'], {}), '(board_id)\n', (480, 490), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((516, 552), 'brainflow.board_shim.BoardShim.get_eeg_channels', 'BoardShim.get_eeg_channels', (['board_id'], {}), '(board_id)\n', (542, 552), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((582, 623), 'brainflow.board_shim.BoardShim.get_timestamp_channel', 'BoardShim.get_timestamp_channel', (['board_id'], {}), '(board_id)\n', (613, 623), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((653, 689), 'brainflow.board_shim.BoardShim.get_eeg_channels', 'BoardShim.get_eeg_channels', (['board_id'], {}), '(board_id)\n', (679, 689), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((721, 759), 'brainflow.board_shim.BoardShim.get_accel_channels', 'BoardShim.get_accel_channels', (['board_id'], {}), '(board_id)\n', (749, 759), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((1909, 1920), 'time.time', 'time.time', ([], {}), '()\n', (1918, 1920), False, 'import time\n'), ((859, 881), 'brainflow.board_shim.BrainFlowInputParams', 'BrainFlowInputParams', ([], {}), '()\n', (879, 881), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((984, 1011), 'brainflow.board_shim.BoardShim', 'BoardShim', (['board_id', 'params'], {}), '(board_id, params)\n', (993, 1011), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((1033, 1070), 'brainflow.board_shim.BoardShim.get_sampling_rate', 'BoardShim.get_sampling_rate', (['board_id'], {}), '(board_id)\n', (1060, 1070), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((1096, 1132), 'brainflow.board_shim.BoardShim.get_eeg_channels', 'BoardShim.get_eeg_channels', (['board_id'], {}), '(board_id)\n', (1122, 1132), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((1162, 1203), 'brainflow.board_shim.BoardShim.get_timestamp_channel', 'BoardShim.get_timestamp_channel', (['board_id'], {}), '(board_id)\n', (1193, 1203), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((1233, 1269), 'brainflow.board_shim.BoardShim.get_eeg_channels', 'BoardShim.get_eeg_channels', (['board_id'], {}), '(board_id)\n', (1259, 1269), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((1301, 1339), 'brainflow.board_shim.BoardShim.get_accel_channels', 'BoardShim.get_accel_channels', (['board_id'], {}), '(board_id)\n', (1329, 1339), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\n'), ((2880, 2932), 'numpy.floor', 'np.floor', (['(this_event + (next_event - this_event) / 2)'], {}), '(this_event + (next_event - this_event) / 2)\n', (2888, 2932), True, 'import numpy as np\n'), ((2974, 2994), 'numpy.floor', 'np.floor', (['this_event'], {}), '(this_event)\n', (2982, 2994), True, 'import numpy as np\n'), ((3078, 3130), 'numpy.floor', 'np.floor', (['(this_event + (prev_event - this_event) / 2)'], {}), '(this_event + (prev_event - this_event) / 2)\n', (3086, 3130), True, 'import numpy as np\n')] |
"""
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
"""
from __future__ import division
from __future__ import print_function
import torch
import argparse
import csv
import datetime
import os
import pickle
import time
import numpy as np
import torch.optim as optim
from torch.optim import lr_scheduler
from modules_logsigma import *
from utils_logsigma import *
parser = argparse.ArgumentParser()
## arguments related to training ##
parser.add_argument('--epochs', type=int, default=500,
help='Number of epochs to train.')
parser.add_argument('--batch-size', type=int, default=128,
help='Number of samples per batch.')
parser.add_argument('--lr', type=float, default=0.0005,
help='Initial learning rate.')
parser.add_argument('--prediction-steps', type=int, default=10, metavar='N',
help='Num steps to predict before re-using teacher forcing.')
parser.add_argument('--lr-decay', type=int, default=200,
help='After how epochs to decay LR by a factor of gamma.')
parser.add_argument('--gamma', type=float, default=0.5,
help='LR decay factor.')
parser.add_argument('--patience', type=int, default=500,
help='Early stopping patience')
parser.add_argument('--encoder-dropout', type=float, default=0.0,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--decoder-dropout', type=float, default=0.0,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--dont-split-data', action='store_true', default=False,
help='Whether to not split training and validation data into two parts')
parser.add_argument('--split-enc-only', action='store_true', default=False,
help='Whether to give the encoder the first half of trajectories \
and the decoder the whole of the trajectories')
parser.add_argument('--fixed-var', type=bool, default=False,
help='If true will use a fixed small variance. If false will solve with variable variance.')
parser.add_argument('--anisotropic', type=bool, default=False,
help='If true use anisotropic sigma. If false will use isotropic sigma.')
## arguments related to loss function ##
parser.add_argument('--var', type=float, default=5e-5,
help='Output variance.')
parser.add_argument('--beta', type=float, default=1.0,
help='KL-divergence beta factor')
parser.add_argument('--mse-loss', action='store_true', default=False,
help='Use the MSE as the loss')
## arguments related to weight and bias initialisation ##
parser.add_argument('--seed', type=int, default=1,
help='Random seed.')
parser.add_argument('--encoder-init-type', type=str, default='xavier_normal',
help='The type of weight initialization to use in the encoder')
parser.add_argument('--decoder-init-type', type=str, default='default',
help='The type of weight initialization to use in the decoder')
parser.add_argument('--encoder-bias-scale', type=float, default=0.1,
help='The type of weight initialization to use in the encoder')
## arguments related to changing the model ##
parser.add_argument('--NRI', action='store_true', default=False,
help='Use the NRI model, rather than the fNRI model')
parser.add_argument('--edge-types-list', nargs='+', default=[2, 2],
help='The number of edge types to infer.') # takes arguments from cmd line as: --edge-types-list 2 2
parser.add_argument('--split-point', type=int, default=0,
help='The point at which factor graphs are split up in the encoder')
parser.add_argument('--encoder', type=str, default='mlp',
help='Type of path encoder model (mlp or cnn).')
parser.add_argument('--decoder', type=str, default='mlp',
help='Type of decoder model (mlp, rnn, or sim).')
parser.add_argument('--encoder-hidden', type=int, default=32,
help='Number of hidden units.')
parser.add_argument('--decoder-hidden', type=int, default=32,
help='Number of hidden units.')
parser.add_argument('--temp', type=float, default=0.5,
help='Temperature for Gumbel softmax.')
parser.add_argument('--temp_softplus', type=float, default= 5.0,
help='Temperature for softplus.')
parser.add_argument('--skip-first', action='store_true', default=False,
help='Skip the first edge type in each block in the decoder, i.e. it represents no-edge.')
parser.add_argument('--hard', action='store_true', default=False,
help='Uses discrete samples in training forward pass.')
parser.add_argument('--soft-valid', action='store_true', default=False,
help='Dont use hard in validation')
parser.add_argument('--prior', action='store_true', default=False,
help='Whether to use sparsity prior.')
## arguments related to the simulation data ##
parser.add_argument('--sim-folder', type=str, default='springcharge_5',
help='Name of the folder in the data folder to load simulation data from')
parser.add_argument('--data-folder', type=str, default='data',
help='Name of the data folder to load data from')
parser.add_argument('--num-atoms', type=int, default=5,
help='Number of atoms in simulation.')
parser.add_argument('--encoder_dims', type=int, default=4,
help='The number of input dimensions for the encoder (position + velocity).')
parser.add_argument('--decoder_dims', type=int, default=5,
help='The number of input dimensions for the decoder (position + velocity + sigma).')
parser.add_argument('--timesteps', type=int, default=49,
help='The number of time steps per sample.')
## Saving, loading etc. ##
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--save-folder', type=str, default='logs',
help='Where to save the trained model, leave empty to not save anything.')
parser.add_argument('--load-folder', type=str, default='',
help='Where to load the trained model if finetunning. ' +
'Leave empty to train from scratch')
parser.add_argument('--test', action='store_true', default=False,
help='Skip training and validation')
parser.add_argument('--plot', action='store_true', default=False,
help='Skip training and plot trajectories against actual')
parser.add_argument('--no-edge-acc', action='store_true', default=False,
help='Skip training and plot accuracy distributions')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.edge_types_list = list(map(int, args.edge_types_list))
args.edge_types_list.sort(reverse=True)
if all((isinstance(k, int) and k >= 1) for k in args.edge_types_list):
if args.NRI:
edge_types = np.prod(args.edge_types_list)
else:
edge_types = sum(args.edge_types_list)
else:
raise ValueError('Could not compute the edge-types-list')
if args.NRI:
print('Using NRI model')
if args.split_point != 0:
args.split_point = 0
print(args)
if args.prior:
prior = [[0.9, 0.1], [0.9, 0.1]] # TODO: hard coded for now
if not all(prior[i].size == args.edge_types_list[i] for i in range(len(args.edge_types_list))):
raise ValueError('Prior is incompatable with the edge types list')
print("Using prior: " + str(prior))
log_prior = []
for i in range(len(args.edge_types_list)):
prior_i = np.array(prior[i])
log_prior_i = torch.FloatTensor(np.log(prior))
log_prior_i = torch.unsqueeze(log_prior_i, 0)
log_prior_i = torch.unsqueeze(log_prior_i, 0)
log_prior_i = Variable(log_prior_i)
log_prior.append(log_prior_i)
if args.cuda:
log_prior = log_prior.cuda()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Save model and meta-data. Always saves in a new sub-folder.
if args.save_folder:
exp_counter = 0
now = datetime.datetime.now()
timestamp = now.isoformat().replace(':', '-')[:-7]
save_folder = os.path.join(args.save_folder, 'exp' + timestamp)
os.makedirs(save_folder)
meta_file = os.path.join(save_folder, 'metadata.pkl')
encoder_file = os.path.join(save_folder, 'encoder.pt')
decoder_file = os.path.join(save_folder, 'decoder.pt')
log_file = os.path.join(save_folder, 'log.txt')
log_csv_file = os.path.join(save_folder, 'log_csv.csv')
log = open(log_file, 'w')
log_csv = open(log_csv_file, 'w')
csv_writer = csv.writer(log_csv, delimiter=',')
pickle.dump({'args': args}, open(meta_file, "wb"))
par_file = open(os.path.join(save_folder, 'args.txt'), 'w')
print(args, file=par_file)
par_file.flush
par_file.close()
perm_csv_file = os.path.join(save_folder, 'perm_csv.csv')
perm_csv = open(perm_csv_file, 'w')
perm_writer = csv.writer(perm_csv, delimiter=',')
else:
print("WARNING: No save_folder provided!" +
"Testing (within this script) will throw an error.")
if args.NRI:
train_loader, valid_loader, test_loader, loc_max, loc_min, vel_max, vel_min = load_data_NRI(
args.batch_size, args.sim_folder, shuffle=True,
data_folder=args.data_folder)
else:
train_loader, valid_loader, test_loader, loc_max, loc_min, vel_max, vel_min = load_data_fNRI(
args.batch_size, args.sim_folder, shuffle=True,
data_folder=args.data_folder)
# Generate off-diagonal interaction graph
off_diag = np.ones([args.num_atoms, args.num_atoms]) - np.eye(args.num_atoms)
rel_rec = np.array(encode_onehot(np.where(off_diag)[1]), dtype=np.float32)
rel_send = np.array(encode_onehot(np.where(off_diag)[0]), dtype=np.float32)
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
if args.NRI:
edge_types_list = [edge_types]
else:
edge_types_list = args.edge_types_list
if args.encoder == 'mlp':
encoder = MLPEncoder_multi(args.timesteps * args.encoder_dims, args.encoder_hidden,
edge_types_list, args.encoder_dropout,
split_point=args.split_point,
init_type=args.encoder_init_type,
bias_init=args.encoder_bias_scale)
# elif args.encoder == 'cnn':
# encoder = CNNEncoder_multi(args.dims, args.encoder_hidden,
# edge_types_list,
# args.encoder_dropout,
# split_point=args.split_point,
# init_type=args.encoder_init_type)
#
# elif args.encoder == 'random':
# encoder = RandomEncoder(args.edge_types_list, args.cuda)
#
# elif args.encoder == 'ones':
# encoder = OnesEncoder(args.edge_types_list, args.cuda)
if args.decoder == 'mlp':
decoder = MLPDecoder_multi(n_in_node=args.decoder_dims,
edge_types=edge_types,
edge_types_list=edge_types_list,
msg_hid=args.decoder_hidden,
msg_out=args.decoder_hidden,
n_hid=args.decoder_hidden,
do_prob=args.decoder_dropout,
skip_first=args.skip_first,
init_type=args.decoder_init_type)
# elif args.decoder == 'stationary':
# decoder = StationaryDecoder()
#
# elif args.decoder == 'velocity':
# decoder = VelocityStepDecoder()
if args.load_folder:
print('Loading model from: ' + args.load_folder)
encoder_file = os.path.join(args.load_folder, 'encoder.pt')
decoder_file = os.path.join(args.load_folder, 'decoder.pt')
if not args.cuda:
encoder.load_state_dict(torch.load(encoder_file, map_location='cpu'))
decoder.load_state_dict(torch.load(decoder_file, map_location='cpu'))
else:
encoder.load_state_dict(torch.load(encoder_file))
decoder.load_state_dict(torch.load(decoder_file))
args.save_folder = False
optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()),
lr=args.lr)
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_decay,
gamma=args.gamma)
if args.cuda:
encoder.cuda()
decoder.cuda()
rel_rec = rel_rec.cuda()
rel_send = rel_send.cuda()
rel_rec = Variable(rel_rec)
rel_send = Variable(rel_send)
def train(epoch, best_val_loss):
t = time.time()
nll_train = []
nll_var_train = []
mse_train = []
kl_train = []
kl_list_train = []
kl_var_list_train = []
acc_train = []
acc_var_train = []
perm_train = []
acc_var_blocks_train = []
acc_blocks_train = []
KLb_train = []
KLb_blocks_train = []
# array of loss components
loss_1_array = []
loss_2_array = []
# gets an array of the sigma tensor per run through of the batch
sigmadecoderoutput = []
encoder.train()
decoder.train()
scheduler.step()
if not args.plot:
for batch_idx, (data, relations) in enumerate(train_loader): # relations are the ground truth interactions graphs
if args.cuda:
data, relations = data.cuda(), relations.cuda()
data, relations = Variable(data), Variable(relations)
if args.dont_split_data:
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data[:, :, :args.timesteps, :].contiguous()
elif args.split_enc_only:
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data
else:
# assert (data.size(2) - args.timesteps) >= args.timesteps
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data[:, :, -args.timesteps:, :].contiguous()
# stores the values of the uncertainty (log(sigma^2)). This will be an array of size [batchsize, no. of particles, time,no. of axes (isotropic = 1, anisotropic = 4]
# initialise sigma to an array large negative numbers, under softplus function this will make them small positive numbers
logsigma = initlogsigma(len(data_decoder), len(data_decoder[0][0]), args.anisotropic, args.num_atoms, inversesoftplus(pow(args.var , 1/2) , args.temp_softplus))
if args.cuda:
logsigma = logsigma.cuda()
logsigma = Variable(logsigma)
optimizer.zero_grad()
logits = encoder(data_encoder, rel_rec, rel_send)
if args.NRI:
# dim of logits, edges and prob are [batchsize, N^2-N, edgetypes] where N = no. of particles
edges = gumbel_softmax(logits, tau=args.temp, hard=args.hard)
prob = my_softmax(logits, -1)
loss_kl = kl_categorical_uniform(prob, args.num_atoms, edge_types)
loss_kl_split = [loss_kl]
loss_kl_var_split = [kl_categorical_uniform_var(prob, args.num_atoms, edge_types)]
KLb_train.append(0)
KLb_blocks_train.append([0])
if args.no_edge_acc:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = 0, np.array([0]), np.zeros(len(args.edge_types_list)), 0, np.zeros(len(args.edge_types_list))
else:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_NRI(logits, relations, args.edge_types_list)
else:
# dim of logits, edges and prob are [batchsize, N^2-N, sum(edge_types_list)] where N = no. of particles
logits_split = torch.split(logits, args.edge_types_list, dim=-1)
edges_split = tuple([gumbel_softmax(logits_i, tau=args.temp, hard=args.hard)
for logits_i in logits_split])
edges = torch.cat(edges_split, dim=-1)
prob_split = [my_softmax(logits_i, -1) for logits_i in logits_split]
if args.prior:
loss_kl_split = [kl_categorical(prob_split[type_idx], log_prior[type_idx], args.num_atoms)
for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
else:
loss_kl_split = [kl_categorical_uniform(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
loss_kl_var_split = [kl_categorical_uniform_var(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
if args.no_edge_acc:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = 0, np.array([0]), np.zeros(len(args.edge_types_list)), 0, np.zeros(len(args.edge_types_list))
else:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_fNRI(logits_split, relations,
args.edge_types_list, args.skip_first)
KLb_blocks = KL_between_blocks(prob_split, args.num_atoms)
KLb_train.append(sum(KLb_blocks).data.item())
KLb_blocks_train.append([KL.data.item() for KL in KLb_blocks])
# fixed variance
if args.fixed_var:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
# forward() in decoder called here - will need to alter decoder to include sigma
output, logsigma, accel = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, args.prediction_steps)
loss_nll = nll_gaussian(output, target, args.var)
loss_nll_var = nll_gaussian_var(output, target, args.var)
# variable variance
else:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
if args.anisotropic:
# forward() in decoder called here - will need to alter decoder to include sigma
output, logsigma, accel = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, args.prediction_steps)
sigmadecoderoutput.append(logsigma)
### here needs an anisotropic Loss function with sigmas along the 4 directions (along vi, ai and perp to vi, ai )
loss_nll, loss_1, loss_2 = nll_gaussian_multivariatesigma_efficient(output, target, logsigma, accel)
loss_nll_var = nll_gaussian_var_multivariatesigma_efficient(output, target,logsigma, accel)
loss_1_array.append(loss_1)
loss_2_array.append(loss_2)
else:
# forward() in decoder called here - will need to alter decoder to include sigma
output, logsigma, accel = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, args.prediction_steps)
sigmadecoderoutput.append(logsigma)
# in case of isotropic we need to recast sigma to the same shape as output as it is required in the gaussian function
logsigma = tile(logsigma, 3, list(output.size())[3])
loss_nll, loss_1, loss_2 = nll_gaussian_variablesigma(output, target, logsigma)
loss_nll_var = nll_gaussian_var__variablesigma(output, target, logsigma)
loss_1_array.append(loss_1)
loss_2_array.append(loss_2)
if args.mse_loss:
loss = F.mse_loss(output, target)
else:
loss = loss_nll
if not math.isclose(args.beta, 0, rel_tol=1e-6):
loss += args.beta * loss_kl
perm_train.append(perm)
acc_train.append(acc_perm)
acc_blocks_train.append(acc_blocks)
acc_var_train.append(acc_var)
acc_var_blocks_train.append(acc_var_blocks)
loss.backward()
optimizer.step()
mse_train.append(F.mse_loss(output, target).data.item())
nll_train.append(loss_nll.data.item())
kl_train.append(loss_kl.data.item())
kl_list_train.append([kl.data.item() for kl in loss_kl_split])
nll_var_train.append(loss_nll_var.data.item())
kl_var_list_train.append([kl_var.data.item() for kl_var in loss_kl_var_split])
if (args.plot):
if not(args.fixed_var):
import matplotlib.pyplot as plt
# gets the iterations array [1,2, ..... , final]
iteration = np.linspace(1,len(loss_1_array), len(loss_1_array));
# plots Loss_1 and Loss_2 vs iteration normalised by total loss
for i in range(len(loss_1_array)):
loss = loss_1_array[i] + loss_2_array[i]
loss_1_array[i] = loss_1_array[i] / loss
loss_2_array[i] = loss_2_array[i] / loss
fig = plt.figure()
plt.plot(iteration, loss_1_array, label = 'loss 1')
plt.plot(iteration, loss_2_array, label = 'loss 2')
plt.xlabel('iteration')
plt.ylabel('Loss Component/Total Loss')
plt.legend('loss 1', 'loss 2')
plt.show()
nll_val = []
nll_var_val = []
mse_val = []
kl_val = []
kl_list_val = []
kl_var_list_val = []
acc_val = []
acc_var_val = []
acc_blocks_val = []
acc_var_blocks_val = []
perm_val = []
KLb_val = []
KLb_blocks_val = [] # KL between blocks list
nll_M_val = []
nll_M_var_val = []
# for z-score analysis
zscorelist = []
encoder.eval()
decoder.eval()
for batch_idx, (data, relations) in enumerate(valid_loader):
with torch.no_grad():
if args.cuda:
data, relations = data.cuda(), relations.cuda()
if args.dont_split_data:
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data[:, :, :args.timesteps, :].contiguous()
elif args.split_enc_only:
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data
else:
assert (data.size(2) - args.timesteps) >= args.timesteps
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data[:, :, -args.timesteps:, :].contiguous()
# stores the values of the uncertainty (log(sigma^2)). This will be an array of size [batchsize, no. of particles, time,no. of axes (isotropic = 1, anisotropic = 4)]
# initialise sigma to an array of large negative numbers which become small positive numbers when passed through softplus
logsigma = initlogsigma(len(data_decoder), len(data_decoder[0][0]), args.anisotropic, args.num_atoms, inversesoftplus(pow(args.var, 1/2), args.temp_softplus))
if args.cuda:
logsigma = logsigma.cuda()
# dim of logits, edges and prob are [batchsize, N^2-N, sum(edge_types_list)] where N = no. of particles
logits = encoder(data_encoder, rel_rec, rel_send)
if args.NRI:
# dim of logits, edges and prob are [batchsize, N^2-N, edgetypes] where N = no. of particles
edges = gumbel_softmax(logits, tau=args.temp, hard=args.hard) # uses concrete distribution (for hard=False) to sample edge types
prob = my_softmax(logits, -1) # my_softmax returns the softmax over the edgetype dim
loss_kl = kl_categorical_uniform(prob, args.num_atoms, edge_types)
loss_kl_split = [loss_kl]
loss_kl_var_split = [kl_categorical_uniform_var(prob, args.num_atoms, edge_types)]
KLb_val.append(0)
KLb_blocks_val.append([0])
if args.no_edge_acc:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = 0, np.array([0]), np.zeros(len(args.edge_types_list)), 0, np.zeros(len(args.edge_types_list))
else:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_NRI(logits, relations, args.edge_types_list)
else:
# dim of logits, edges and prob are [batchsize, N^2-N, sum(edge_types_list)] where N = no. of particles
logits_split = torch.split(logits, args.edge_types_list, dim=-1)
edges_split = tuple([gumbel_softmax(logits_i, tau=args.temp, hard=args.hard)
for logits_i in logits_split])
edges = torch.cat(edges_split, dim=-1)
prob_split = [my_softmax(logits_i, -1) for logits_i in logits_split]
if args.prior:
loss_kl_split = [kl_categorical(prob_split[type_idx], log_prior[type_idx], args.num_atoms)
for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
else:
loss_kl_split = [kl_categorical_uniform(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
loss_kl_var_split = [kl_categorical_uniform_var(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
if args.no_edge_acc:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = 0, np.array([0]), np.zeros(len(args.edge_types_list)), 0, np.zeros(len(args.edge_types_list))
else:
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_fNRI(logits_split, relations,
args.edge_types_list, args.skip_first)
KLb_blocks = KL_between_blocks(prob_split, args.num_atoms)
KLb_val.append(sum(KLb_blocks).data.item())
KLb_blocks_val.append([KL.data.item() for KL in KLb_blocks])
if args.fixed_var:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
# one prediction step
output, logsigma, accel = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 49)
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
from trajectory_plot import draw_lines
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i, linestyle='-')
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = ['layer ' + str(j) for j in range(len(args.edge_types_list))]
# block_names = [ 'springs', 'charges' ]
acc_text = [block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
# plt.savefig(os.path.join(args.load_folder,str(i)+'_pred_and_true.png'), dpi=300)
plt.show()
loss_nll = nll_gaussian(output, target, args.var)
loss_nll_var = nll_gaussian_var(output, target, args.var)
# all prediction steps needed
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, args.prediction_steps)
loss_nll_M = nll_gaussian(output_M, target, args.var)
loss_nll_M_var = nll_gaussian_var(output_M, target, args.var)
perm_val.append(perm)
acc_val.append(acc_perm)
acc_blocks_val.append(acc_blocks)
acc_var_val.append(acc_var)
acc_var_blocks_val.append(acc_var_blocks)
mse_val.append(F.mse_loss(output_M, target).data.item())
nll_val.append(loss_nll.data.item())
nll_var_val.append(loss_nll_var.data.item())
kl_val.append(loss_kl.data.item())
kl_list_val.append([kl_loss.data.item() for kl_loss in loss_kl_split])
kl_var_list_val.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_val.append(loss_nll_M.data.item())
nll_M_var_val.append(loss_nll_M_var.data.item())
else:
if not (args.anisotropic):
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
# in case of isotropic we need to recast sigma to the same shape as output as it is required in the gaussian function
output, logsigmaone, accelone = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 49)
logsigma_plot = tile(logsigma_plot, 3, list(output_plot.size())[3])
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
sigma_plot = torch.exp(logsigma_plot / 2)
from trajectory_plot import draw_lines_sigma
from matplotlib.patches import Ellipse, Rectangle
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
# ax = fig.add_axes([0, 0, 1, 1])
ax = fig.add_subplot(111)
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
xmin_t, ymin_t, xmax_t, ymax_t = -1, -1, 1, 1
xmin_o, ymin_o, xmax_o, ymax_o = -0.5, -0.5, 0.5, 0.5
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines_sigma(target, i,sigma_plot.detach().cpu().numpy(), ax, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines_sigma(output_plot.detach().cpu().numpy(), i,sigma_plot.detach().cpu().numpy(), ax, linestyle='-', plot_ellipses=True)
rect = Rectangle((-1, -1), 2, 2, edgecolor='r', facecolor='none')
ax.add_patch(rect)
# # isotropic therefore the ellipses become circles
# indices = torch.LongTensor([0, 1])
# if args.cuda:
# indices = indices.cuda()
# positions = torch.index_select(output_plot, 3, indices)
# sigma_plot_pos = torch.index_select(sigma_plot, 3, indices)
#
# # iterate through each of the atoms
# for j in range(positions.size()[1]):
# ellipses = []
# # get the first timestep component of (x,y)
# ellipses.append(Ellipse((positions.tolist()[i][j][0][0], positions.tolist()[i][j][0][1]), width = sigma_plot_pos.tolist()[i][j][0][0], height = sigma_plot_pos.tolist()[i][j][0][0], angle = 0.0))
# # if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
# # keeps track of current plot value
# l=0
# for k in range(positions.size()[2]-1):
# deltar = (torch.from_numpy(positions.cpu().numpy()[i][j][k+1])- torch.from_numpy(positions.numpy()[i][j][l])).norm(p=2, dim=0, keepdim=True)
# deltasigma = (torch.from_numpy(sigma_plot_pos.cpu().numpy()[i][j][l])).norm(p=2, dim=0, keepdim=True)
# if (deltar.item()>2*deltasigma.item()):
# # check that it is far away from others
# isfarapart = True
# for m in range(positions.size()[1]):
# for n in range(positions.size()[2]):
# if (m!= j):
# deltar = (torch.from_numpy(positions.cpu().numpy()[i][m][n])- torch.from_numpy(positions.numpy()[i][j][k+1])).norm(p=2, dim=0, keepdim=True)
# deltasigma = (torch.from_numpy(sigma_plot_pos.cpu().numpy()[i][j][k+1])).norm(p=2, dim=0, keepdim=True)
# if (deltar< deltasigma.item()):
# isfarapart = False
# if isfarapart:
# ellipses.append(Ellipse((positions.tolist()[i][j][k+1][0], positions[i][j][k+1][1]), width = sigma_plot_pos.tolist()[i][j][k+1][0], height = sigma_plot_pos.tolist()[i][j][k+1][0], angle = 0.0))
# # updates to new r0 : Deltar = r - r0:
# l = k
# # fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
# colour = np.random.rand(3)
# for e in ellipses:
# ax.add_artist(e)
# e.set_clip_box(ax.bbox)
# # e.set_alpha(0.6)
# e.set_facecolor(colour)
# ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
# ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xlim([-1,1])
ax.set_ylim([-1, 1])
block_names = ['layer ' + str(j) for j in range(len(args.edge_types_list))]
# block_names = [ 'springs', 'charges' ]
acc_text = [block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
plt.savefig(os.path.join(args.load_folder,str(i)+'_pred_and_true.png'), dpi=300)
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# for z score
# make sure we aren't dividing by 0
if (torch.min(sigma_plot)< pow(10, -7)):
accuracy = np.full((sigma_plot.size(0), sigma_plot.size(1), sigma_plot.size(2), sigma_plot.size(3)), pow(10, -7),dtype=np.float32)
accuracy = torch.from_numpy(accuracy)
if args.cuda:
accuracy = accuracy.cuda()
output_plot = torch.max(output_plot, accuracy)
zscore = (output_plot - target) / sigma_plot
zscorelist.append(zscore)
# in case of isotropic we need to recast sigma to the same shape as output as it is required in the gaussian function
logsigmaone = tile(logsigmaone, 3, list(output.size())[3])
loss_nll, loss_1, loss_2 = nll_gaussian_variablesigma(output, target, logsigmaone)
loss_nll_var = nll_gaussian_var__variablesigma(output, target, logsigmaone)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, args.prediction_steps)
loss_nll_M, loss_1_M, loss_2_M = nll_gaussian_variablesigma(output_M, target, sigma_M)
loss_nll_M_var = nll_gaussian_var__variablesigma(output_M, target, sigma_M)
logsigma = logsigmaone
perm_val.append(perm)
acc_val.append(acc_perm)
acc_blocks_val.append(acc_blocks)
acc_var_val.append(acc_var)
acc_var_blocks_val.append(acc_var_blocks)
mse_val.append(F.mse_loss(output_M, target).data.item())
nll_val.append(loss_nll.data.item())
nll_var_val.append(loss_nll_var.data.item())
kl_val.append(loss_kl.data.item())
kl_list_val.append([kl_loss.data.item() for kl_loss in loss_kl_split])
kl_var_list_val.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_val.append(loss_nll_M.data.item())
nll_M_var_val.append(loss_nll_M_var.data.item())
else:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigmaone, accelone = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 49)
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
sigma_plot = torch.exp(logsigma_plot/2)
from trajectory_plot import draw_lines
from matplotlib.patches import Ellipse
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i, linestyle='-')
indices_1 = torch.LongTensor([0, 1])
indices_2 = torch.LongTensor([2,3])
indices_3 = torch.LongTensor([0])
if args.cuda:
indices_1, indices_2, indices_3 = indices_1.cuda(), indices_2.cuda(), indices_3.cuda()
positions = torch.index_select(output_plot, 3, indices_1)
ellipses = []
# plots the uncertainty ellipses for gaussian case.
# iterate through each of the atoms
# need to get the angles of the terms to be plotted:
velocities = torch.index_select(output_plot, 3, indices_2)
velnorm = velocities.norm(p=2, dim=3, keepdim=True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
# v||.x is just the first term of the tensor
normalisedvelx = torch.index_select(normalisedvel, 3, indices_3)
# angle of rotation is Theta = acos(v||.x) for normalised v|| and x (need angle in degrees not radians)
angle = torch.acos(normalisedvelx).squeeze() * 180/3.14159
for j in range(positions.size()[1]):
# get the first timestep component of (x,y) and angles
ellipses.append(
Ellipse((positions.tolist()[i][j][0][0], positions.tolist()[i][j][0][1]),
width=sigma_plot.tolist()[i][j][0][0],
height=sigma_plot.tolist()[i][j][0][1], angle=angle.tolist()[i][j][0]))
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
for k in range(positions.size()[2] - 1):
deltar = (torch.from_numpy(positions.cpu().numpy()[i][j][k + 1]) - torch.from_numpy(
positions.cpu().numpy()[i][j][k])).norm(p=2, dim=0, keepdim=True)
deltasigma = (torch.from_numpy(sigma_plot.cpu().numpy()[i][j][k + 1])).norm(p=2, dim=0,
keepdim=True)
if (deltar.item() > 2 * deltasigma.item()):
ellipses.append(
Ellipse((positions.tolist()[i][j][k + 1][0], positions[i][j][k + 1][1]),
width=sigma_plot.tolist()[i][j][k + 1][0],
height=sigma_plot.tolist()[i][j][k + 1][1], angle=angle.tolist()[i][j][k+1]))
fig1, ax1 = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ellipses:
ax1.add_artist(e)
e.set_clip_box(ax1.bbox)
e.set_alpha(0.6)
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = ['layer ' + str(j) for j in range(len(args.edge_types_list))]
# block_names = [ 'springs', 'charges' ]
acc_text = [block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
# plt.savefig(os.path.join(args.load_folder,str(i)+'_pred_and_true.png'), dpi=300)
plt.show()
# for z score
# make sure we aren't dividing by 0
if (torch.min(sigma_plot) < pow(10, -7)):
accuracy = np.full((sigma_plot.size(0), sigma_plot.size(1), sigma_plot.size(2), sigma_plot.size(3)), pow(10, -7), dtype=np.float32)
accuracy = torch.from_numpy(accuracy)
if args.cuda:
accuracy = accuracy.cuda()
output_plot = torch.max(output_plot, accuracy)
zscore = (output_plot - target) / sigma_plot
zscorelist.append(zscore)
loss_nll, loss_1, loss_2 = nll_gaussian_multivariatesigma_efficient(output, target, logsigmaone, accelone)
loss_nll_var = nll_gaussian_var_multivariatesigma_efficient(output, target, logsigmaone, accelone)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, args.prediction_steps)
loss_nll_M, loss_1_M, loss_2_M = nll_gaussian_multivariatesigma_efficient(output_M, target, sigma_M, accel_M)
loss_nll_M_var = nll_gaussian_var_multivariatesigma_efficient(output_M, target, sigma_M, accel_M)
logsigma = logsigmaone
perm_val.append(perm)
acc_val.append(acc_perm)
acc_blocks_val.append(acc_blocks)
acc_var_val.append(acc_var)
acc_var_blocks_val.append(acc_var_blocks)
mse_val.append(F.mse_loss(output_M, target).data.item())
nll_val.append(loss_nll.data.item())
nll_var_val.append(loss_nll_var.data.item())
kl_val.append(loss_kl.data.item())
kl_list_val.append([kl_loss.data.item() for kl_loss in loss_kl_split])
kl_var_list_val.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_val.append(loss_nll_M.data.item())
nll_M_var_val.append(loss_nll_M_var.data.item())
# deal with z-score here
if (args.plot):
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
zscorelistint = np.empty((0))
for i in range(len(zscorelist)):
zscorelistint = np.append(zscorelistint, zscorelist[i].numpy())
bins = np.arange(-4, 4.1, 0.1)
# get histogram distribution
histdata, bin_edges, patches = plt.hist(zscorelistint, bins, density = True)
# take the histdata point to be at the centre of the bin_edges:
# Gaussian fit- we expect a good model to give mean = 0 and sigma = 1
xcoords = np.empty(len(bin_edges) - 1)
for i in range(len(bin_edges) - 1):
xcoords[i] = (bin_edges[i] + bin_edges[i+1]) /2
numberofpoints = len(xcoords)
# mean is 1/N SUM(xy)
mean_gaussian = np.sum(xcoords * histdata) / numberofpoints
# var = 1/N SUM(y*(x-mean) ** 2)
sigma = np.sqrt(np.sum(histdata * (xcoords - mean_gaussian) ** 2) / numberofpoints)
optimised_params, pcov = curve_fit(gaussian, xcoords, histdata, p0 = [1, mean_gaussian, sigma])
plt.plot(xcoords, gaussian(xcoords, *optimised_params), label = 'fit')
optimised_params_lor, pcov = curve_fit(lorentzian, xcoords, histdata, p0=[1, mean_gaussian, sigma])
plt.plot(xcoords, lorentzian(xcoords, *optimised_params_lor), 'k')
plt.xlabel("z-score")
plt.ylabel("frequency")
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.xlim(-4, 4)
plt.show()
print("Gaussian Fit with mean: " + str(optimised_params[1]) + " and std: " + str(optimised_params[2]))
print("Lorentzian Fit with mean: " + str(optimised_params_lor[1]) + " and std: " + str(optimised_params_lor[2]))
print('Epoch: {:03d}'.format(epoch),
'perm_val: ' + str(np.around(np.mean(np.array(perm_val), axis=0), 4)),
'time: {:.1f}s'.format(time.time() - t))
print('nll_trn: {:.2f}'.format(np.mean(nll_train)),
'kl_trn: {:.5f}'.format(np.mean(kl_train)),
'mse_trn: {:.10f}'.format(np.mean(mse_train)),
'acc_trn: {:.5f}'.format(np.mean(acc_train)),
'KLb_trn: {:.5f}'.format(np.mean(KLb_train))
)
print('acc_b_trn: ' + str(np.around(np.mean(np.array(acc_blocks_train), axis=0), 4)),
'kl_trn: ' + str(np.around(np.mean(np.array(kl_list_train), axis=0), 4))
)
print('nll_val: {:.2f}'.format(np.mean(nll_M_val)),
'kl_val: {:.5f}'.format(np.mean(kl_val)),
'mse_val: {:.10f}'.format(np.mean(mse_val)),
'acc_val: {:.5f}'.format(np.mean(acc_val)),
'KLb_val: {:.5f}'.format(np.mean(KLb_val))
)
print('acc_b_val: ' + str(np.around(np.mean(np.array(acc_blocks_val), axis=0), 4)),
'kl_val: ' + str(np.around(np.mean(np.array(kl_list_val), axis=0), 4))
)
print('Epoch: {:04d}'.format(epoch),
'perm_val: ' + str(np.around(np.mean(np.array(perm_val), axis=0), 4)),
'time: {:.4f}s'.format(time.time() - t),
file=log)
print('nll_trn: {:.5f}'.format(np.mean(nll_train)),
'kl_trn: {:.5f}'.format(np.mean(kl_train)),
'mse_trn: {:.10f}'.format(np.mean(mse_train)),
'acc_trn: {:.5f}'.format(np.mean(acc_train)),
'KLb_trn: {:.5f}'.format(np.mean(KLb_train)),
'acc_b_trn: ' + str(np.around(np.mean(np.array(acc_blocks_train), axis=0), 4)),
'kl_trn: ' + str(np.around(np.mean(np.array(kl_list_train), axis=0), 4)),
file=log)
print('nll_val: {:.5f}'.format(np.mean(nll_M_val)),
'kl_val: {:.5f}'.format(np.mean(kl_val)),
'mse_val: {:.10f}'.format(np.mean(mse_val)),
'acc_val: {:.5f}'.format(np.mean(acc_val)),
'KLb_val: {:.5f}'.format(np.mean(KLb_val)),
'acc_b_val: ' + str(np.around(np.mean(np.array(acc_blocks_val), axis=0), 4)),
'kl_val: ' + str(np.around(np.mean(np.array(kl_list_val), axis=0), 4)),
file=log)
if epoch == 0:
labels = ['epoch', 'nll trn', 'kl trn', 'mse train', 'KLb trn', 'acc trn']
labels += ['b' + str(i) + ' acc trn' for i in range(len(args.edge_types_list))] + ['nll var trn']
labels += ['b' + str(i) + ' kl trn' for i in range(len(kl_list_train[0]))]
labels += ['b' + str(i) + ' kl var trn' for i in range(len(kl_list_train[0]))]
labels += ['acc var trn'] + ['b' + str(i) + ' acc var trn' for i in range(len(args.edge_types_list))]
labels += ['nll val', 'nll_M_val', 'kl val', 'mse val', 'KLb val', 'acc val']
labels += ['b' + str(i) + ' acc val' for i in range(len(args.edge_types_list))]
labels += ['nll var val', 'nll_M var val']
labels += ['b' + str(i) + ' kl val' for i in range(len(kl_list_val[0]))]
labels += ['b' + str(i) + ' kl var val' for i in range(len(kl_list_val[0]))]
labels += ['acc var val'] + ['b' + str(i) + ' acc var val' for i in range(len(args.edge_types_list))]
csv_writer.writerow(labels)
labels = ['trn ' + str(i) for i in range(len(perm_train[0]))]
labels += ['val ' + str(i) for i in range(len(perm_val[0]))]
perm_writer.writerow(labels)
csv_writer.writerow([epoch, np.mean(nll_train), np.mean(kl_train),
np.mean(mse_train), np.mean(KLb_train), np.mean(acc_train)] +
list(np.mean(np.array(acc_blocks_train), axis=0)) +
[np.mean(nll_var_train)] +
list(np.mean(np.array(kl_list_train), axis=0)) +
list(np.mean(np.array(kl_var_list_train), axis=0)) +
# list(np.mean(np.array(KLb_blocks_train),axis=0)) +
[np.mean(acc_var_train)] + list(np.mean(np.array(acc_var_blocks_train), axis=0)) +
[np.mean(nll_val), np.mean(nll_M_val), np.mean(kl_val), np.mean(mse_val),
np.mean(KLb_val), np.mean(acc_val)] +
list(np.mean(np.array(acc_blocks_val), axis=0)) +
[np.mean(nll_var_val), np.mean(nll_M_var_val)] +
list(np.mean(np.array(kl_list_val), axis=0)) +
list(np.mean(np.array(kl_var_list_val), axis=0)) +
# list(np.mean(np.array(KLb_blocks_val),axis=0))
[np.mean(acc_var_val)] + list(np.mean(np.array(acc_var_blocks_val), axis=0))
)
perm_writer.writerow(list(np.mean(np.array(perm_train), axis=0)) +
list(np.mean(np.array(perm_val), axis=0))
)
log.flush()
if args.save_folder and np.mean(nll_M_val) < best_val_loss:
torch.save(encoder.state_dict(), encoder_file)
torch.save(decoder.state_dict(), decoder_file)
print('Best model so far, saving...')
return np.mean(nll_M_val)
def test():
t = time.time()
nll_test = []
nll_var_test = []
mse_1_test = []
mse_10_test = []
mse_20_test = []
kl_test = []
kl_list_test = []
kl_var_list_test = []
acc_test = []
acc_var_test = []
acc_blocks_test = []
acc_var_blocks_test = []
perm_test = []
KLb_test = []
KLb_blocks_test = [] # KL between blocks list
nll_M_test = []
nll_M_var_test = []
encoder.eval()
decoder.eval()
if not args.cuda:
encoder.load_state_dict(torch.load(encoder_file, map_location='cpu'))
decoder.load_state_dict(torch.load(decoder_file, map_location='cpu'))
else:
encoder.load_state_dict(torch.load(encoder_file))
decoder.load_state_dict(torch.load(decoder_file))
for batch_idx, (data, relations) in enumerate(test_loader):
with torch.no_grad():
if args.cuda:
data, relations = data.cuda(), relations.cuda()
assert (data.size(2) - args.timesteps) >= args.timesteps
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data[:, :, -args.timesteps:, :].contiguous()
# stores the values of the uncertainty (log(sigma^2)). This will be an array of size [batchsize, no. of particles, time,no. of axes (isotropic = 1, anisotropic = 2)]
# initialise sigma to an array of large negative numbers which become small positive numbers when passted through softplus function.
logsigma = initlogsigma(len(data_decoder), len(data_decoder[0][0]), args.anisotropic, args.num_atoms, inversesoftplus(pow(args.var, 1/2), args.temp_softplus))
if args.cuda:
logsigma = logsigma.cuda()
# dim of logits, edges and prob are [batchsize, N^2-N, sum(edge_types_list)] where N = no. of particles
logits = encoder(data_encoder, rel_rec, rel_send)
if args.NRI:
edges = gumbel_softmax(logits, tau=args.temp, hard=args.hard)
prob = my_softmax(logits, -1)
loss_kl = kl_categorical_uniform(prob, args.num_atoms, edge_types)
loss_kl_split = [loss_kl]
loss_kl_var_split = [kl_categorical_uniform_var(prob, args.num_atoms, edge_types)]
KLb_test.append(0)
KLb_blocks_test.append([0])
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_NRI(logits, relations, args.edge_types_list)
else:
logits_split = torch.split(logits, args.edge_types_list, dim=-1)
edges_split = tuple([gumbel_softmax(logits_i, tau=args.temp, hard=args.hard) for logits_i in logits_split])
edges = torch.cat(edges_split, dim=-1)
prob_split = [my_softmax(logits_i, -1) for logits_i in logits_split]
if args.prior:
loss_kl_split = [kl_categorical(prob_split[type_idx], log_prior[type_idx],
args.num_atoms) for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
else:
loss_kl_split = [kl_categorical_uniform(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
loss_kl_var_split = [kl_categorical_uniform_var(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_fNRI(logits_split, relations,
args.edge_types_list, args.skip_first)
KLb_blocks = KL_between_blocks(prob_split, args.num_atoms)
KLb_test.append(sum(KLb_blocks).data.item())
KLb_blocks_test.append([KL.data.item() for KL in KLb_blocks])
if args.fixed_var:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigma, accel = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 49)
from trajectory_plot import draw_lines
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
sigma_plot = torch.exp(logsigma_plot/2)
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i, linestyle='-')
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = [str(j) for j in range(len(args.edge_types_list))]
acc_text = ['layer ' + block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
plt.show()
loss_nll = nll_gaussian(output, target, args.var) # compute the reconstruction loss. nll_gaussian is from utils.py
loss_nll_var = nll_gaussian_var(output, target, args.var)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, args.prediction_steps)
loss_nll_M = nll_gaussian(output_M, target, args.var)
loss_nll_M_var = nll_gaussian_var(output_M, target, args.var)
perm_test.append(perm)
acc_test.append(acc_perm)
acc_blocks_test.append(acc_blocks)
acc_var_test.append(acc_var)
acc_var_blocks_test.append(acc_var_blocks)
output_10, sigma_10, accel_10 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 10)
output_20, sigma_20, accel_20 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 20)
mse_1_test.append(F.mse_loss(output, target).data.item())
mse_10_test.append(F.mse_loss(output_10, target).data.item())
mse_20_test.append(F.mse_loss(output_20, target).data.item())
nll_test.append(loss_nll.data.item())
kl_test.append(loss_kl.data.item())
kl_list_test.append([kl_loss.data.item() for kl_loss in loss_kl_split])
nll_var_test.append(loss_nll_var.data.item())
kl_var_list_test.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_test.append(loss_nll_M.data.item())
nll_M_var_test.append(loss_nll_M_var.data.item())
else:
if args.anisotropic:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigmaone, accelone = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 49)
output_plot_en, sigma_plot_en, accel_plot_en = decoder(data_encoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 49)
from trajectory_plot import draw_lines
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
sigma_plot = torch.exp(logsigma_plot / 2)
from trajectory_plot import draw_lines
from matplotlib.patches import Ellipse
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i,
linestyle='-')
indices_1 = torch.LongTensor([0, 1])
indices_2 = torch.LongTensor([2, 3])
indices_3 = torch.LongTensor([0])
if args.cuda:
indices_1, indices_2, indices_3 = indices_1.cuda(), indices_2.cuda(), indices_3.cuda()
positions = torch.index_select(output_plot, 3, indices_1)
ellipses = []
# plots the uncertainty ellipses for gaussian case.
# iterate through each of the atoms
# need to get the angles of the terms to be plotted:
velocities = torch.index_select(output_plot, 3, indices_2)
velnorm = velocities.norm(p=2, dim=3, keepdim=True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
# v||.x is just the first term of the tensor
normalisedvelx = torch.index_select(normalisedvel, 3, indices_3)
# angle of rotation is Theta = acos(v||.x) for normalised v|| and x (need angle in degrees not radians)
angle = torch.acos(normalisedvelx).squeeze() * 180 / 3.14159
for j in range(positions.size()[1]):
# get the first timestep component of (x,y) and angles
ellipses.append(
Ellipse((positions.tolist()[i][j][0][0], positions.tolist()[i][j][0][1]),
width=sigma_plot.tolist()[i][j][0][0],
height=sigma_plot.tolist()[i][j][0][1], angle=angle.tolist()[i][j][0]))
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
for k in range(positions.size()[2] - 1):
deltar = (torch.from_numpy(positions.cpu().numpy()[i][j][k + 1]) - torch.from_numpy(
positions.cpu().numpy()[i][j][k])).norm(p=2, dim=0, keepdim=True)
deltasigma = (torch.from_numpy(sigma_plot.cpu().numpy()[i][j][k + 1])).norm(p=2,
dim=0,
keepdim=True)
if (deltar.item() > 2 * deltasigma.item()):
ellipses.append(
Ellipse((positions.tolist()[i][j][k + 1][0], positions[i][j][k + 1][1]),
width=sigma_plot.tolist()[i][j][k + 1][0],
height=sigma_plot.tolist()[i][j][k + 1][1],
angle=angle.tolist()[i][j][k + 1]))
fig1, ax1 = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ellipses:
ax1.add_artist(e)
e.set_clip_box(ax1.bbox)
e.set_alpha(0.6)
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = ['layer ' + str(j) for j in range(len(args.edge_types_list))]
# block_names = [ 'springs', 'charges' ]
acc_text = [block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
# plt.savefig(os.path.join(args.load_folder,str(i)+'_pred_and_true.png'), dpi=300)
plt.show()
loss_nll, loss_1, loss_2 = nll_gaussian_multivariatesigma_efficient(output, target, logsigmaone, accelone) # compute the reconstruction loss. nll_gaussian is from utils.py
loss_nll_var = nll_gaussian_var_multivariatesigma_efficient(output, target, logsigmaone, accelone)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, args.prediction_steps)
loss_nll_M, loss_1_M, loss_2_M = nll_gaussian_multivariatesigma_efficient(output_M, target, sigma_M, accel_M)
loss_nll_M_var = nll_gaussian_var_multivariatesigma_efficient(output_M, target, sigma_M, accel_M)
perm_test.append(perm)
acc_test.append(acc_perm)
acc_blocks_test.append(acc_blocks)
acc_var_test.append(acc_var)
acc_var_blocks_test.append(acc_var_blocks)
output_10, sigma_10, accel_10 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 10)
output_20, sigma_20, accel_20 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 20)
mse_1_test.append(F.mse_loss(output, target).data.item())
mse_10_test.append(F.mse_loss(output_10, target).data.item())
mse_20_test.append(F.mse_loss(output_20, target).data.item())
nll_test.append(loss_nll.data.item())
kl_test.append(loss_kl.data.item())
kl_list_test.append([kl_loss.data.item() for kl_loss in loss_kl_split])
nll_var_test.append(loss_nll_var.data.item())
kl_var_list_test.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_test.append(loss_nll_M.data.item())
nll_M_var_test.append(loss_nll_M_var.data.item())
logsigma = logsigmaone
else:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigmaone, accelone = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 49)
output_plot_en, sigma_plot_en, accel_plot_en = decoder(data_encoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 49)
from trajectory_plot import draw_lines
sigma_plot = torch.exp(logsigma_plot / 2)
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
from trajectory_plot import draw_lines
from matplotlib.patches import Ellipse
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i,
linestyle='-')
# isotropic therefore the ellipses become circles
indices = torch.LongTensor([0, 1])
if args.cuda:
indices = indices.cuda()
positions = torch.index_select(output_plot, 3, indices)
ellipses = []
# iterate through each of the atoms
for j in range(positions.size()[1]):
# get the first timestep component of (x,y)
ellipses.append(
Ellipse((positions.tolist()[i][j][0][0], positions.tolist()[i][j][0][1]),
width=sigma_plot.tolist()[i][j][0][0],
height=sigma_plot.tolist()[i][j][0][0], angle=0.0))
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
for k in range(positions.size()[2] - 1):
deltar = (torch.from_numpy(positions.cpu().numpy()[i][j][k + 1]) - torch.from_numpy(
positions.cpu().numpy()[i][j][k])).norm(p=2, dim=0, keepdim=True)
deltasigma = (torch.from_numpy(sigma_plot.cpu().numpy()[i][j][k + 1])).norm(p=2,
dim=0,
keepdim=True)
if (deltar.item() > 2 * deltasigma.item()):
ellipses.append(
Ellipse((positions.tolist()[i][j][k + 1][0], positions[i][j][k + 1][1]),
width=sigma_plot.tolist()[i][j][k + 1][0],
height=sigma_plot.tolist()[i][j][k + 1][0], angle=0.0))
fig1, ax1 = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ellipses:
ax1.add_artist(e)
e.set_clip_box(ax1.bbox)
e.set_alpha(0.6)
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = ['layer ' + str(j) for j in range(len(args.edge_types_list))]
# block_names = [ 'springs', 'charges' ]
acc_text = [block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
# plt.savefig(os.path.join(args.load_folder,str(i)+'_pred_and_true.png'), dpi=300)
plt.show()
# in case of isotropic we need to recast sigma to the same shape as output as it is required in the gaussian function
logsigmaone = tile(logsigmaone, 3, list(output.size())[3])
loss_nll, loss_1, loss_2 = nll_gaussian_variablesigma(output, target, logsigmaone) # compute the reconstruction loss. nll_gaussian is from utils.py
loss_nll_var = nll_gaussian_var__variablesigma(output, target, logsigmaone)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, args.prediction_steps)
loss_nll_M, loss_1_M, loss_2_M = nll_gaussian_variablesigma(output_M, target, sigma_M)
loss_nll_M_var = nll_gaussian_var__variablesigma(output_M, target, sigma_M)
perm_test.append(perm)
acc_test.append(acc_perm)
acc_blocks_test.append(acc_blocks)
acc_var_test.append(acc_var)
acc_var_blocks_test.append(acc_var_blocks)
output_10, sigma_10, accel_10 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 10)
output_20, sigma_20, accel_20 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 20)
mse_1_test.append(F.mse_loss(output, target).data.item())
mse_10_test.append(F.mse_loss(output_10, target).data.item())
mse_20_test.append(F.mse_loss(output_20, target).data.item())
nll_test.append(loss_nll.data.item())
kl_test.append(loss_kl.data.item())
kl_list_test.append([kl_loss.data.item() for kl_loss in loss_kl_split])
nll_var_test.append(loss_nll_var.data.item())
kl_var_list_test.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_test.append(loss_nll_M.data.item())
nll_M_var_test.append(loss_nll_M_var.data.item())
logsigma = logsigmaone
print('--------------------------------')
print('------------Testing-------------')
print('--------------------------------')
print('nll_test: {:.2f}'.format(np.mean(nll_test)),
'nll_M_test: {:.2f}'.format(np.mean(nll_M_test)),
'kl_test: {:.5f}'.format(np.mean(kl_test)),
'mse_1_test: {:.10f}'.format(np.mean(mse_1_test)),
'mse_10_test: {:.10f}'.format(np.mean(mse_10_test)),
'mse_20_test: {:.10f}'.format(np.mean(mse_20_test)),
'acc_test: {:.5f}'.format(np.mean(acc_test)),
'acc_var_test: {:.5f}'.format(np.mean(acc_var_test)),
'KLb_test: {:.5f}'.format(np.mean(KLb_test)),
'time: {:.1f}s'.format(time.time() - t))
print('acc_b_test: ' + str(np.around(np.mean(np.array(acc_blocks_test), axis=0), 4)),
'acc_var_b: ' + str(np.around(np.mean(np.array(acc_var_blocks_test), axis=0), 4)),
'kl_test: ' + str(np.around(np.mean(np.array(kl_list_test), axis=0), 4))
)
if args.save_folder:
print('--------------------------------', file=log)
print('------------Testing-------------', file=log)
print('--------------------------------', file=log)
print('nll_test: {:.2f}'.format(np.mean(nll_test)),
'nll_M_test: {:.2f}'.format(np.mean(nll_M_test)),
'kl_test: {:.5f}'.format(np.mean(kl_test)),
'mse_1_test: {:.10f}'.format(np.mean(mse_1_test)),
'mse_10_test: {:.10f}'.format(np.mean(mse_10_test)),
'mse_20_test: {:.10f}'.format(np.mean(mse_20_test)),
'acc_test: {:.5f}'.format(np.mean(acc_test)),
'acc_var_test: {:.5f}'.format(np.mean(acc_var_test)),
'KLb_test: {:.5f}'.format(np.mean(KLb_test)),
'time: {:.1f}s'.format(time.time() - t),
file=log)
print('acc_b_test: ' + str(np.around(np.mean(np.array(acc_blocks_test), axis=0), 4)),
'acc_var_b_test: ' + str(np.around(np.mean(np.array(acc_var_blocks_test), axis=0), 4)),
'kl_test: ' + str(np.around(np.mean(np.array(kl_list_test), axis=0), 4)),
file=log)
log.flush()
# Train model
if not args.test:
t_total = time.time()
best_val_loss = np.inf
best_epoch = 0
for epoch in range(args.epochs):
val_loss = train(epoch, best_val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_epoch = epoch
if epoch - best_epoch > args.patience and epoch > 99:
break
print("Optimization Finished!")
print("Best Epoch: {:04d}".format(best_epoch))
if args.save_folder:
print("Best Epoch: {:04d}".format(best_epoch), file=log)
log.flush()
test()
if log is not None:
print(save_folder)
log.close()
log_csv.close()
perm_csv.close() | [
"numpy.prod",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"torch.LongTensor",
"numpy.log",
"torch.max",
"torch.exp",
"torch.min",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"trajectory_plot.draw_lines",... | [((455, 480), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (478, 480), False, 'import argparse\n'), ((8263, 8288), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (8277, 8288), True, 'import numpy as np\n'), ((8289, 8317), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8306, 8317), False, 'import torch\n'), ((10219, 10245), 'torch.FloatTensor', 'torch.FloatTensor', (['rel_rec'], {}), '(rel_rec)\n', (10236, 10245), False, 'import torch\n'), ((10257, 10284), 'torch.FloatTensor', 'torch.FloatTensor', (['rel_send'], {}), '(rel_send)\n', (10274, 10284), False, 'import torch\n'), ((12665, 12738), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.lr_decay', 'gamma': 'args.gamma'}), '(optimizer, step_size=args.lr_decay, gamma=args.gamma)\n', (12684, 12738), False, 'from torch.optim import lr_scheduler\n'), ((7058, 7083), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7081, 7083), False, 'import torch\n'), ((8336, 8369), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8358, 8369), False, 'import torch\n'), ((8484, 8507), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8505, 8507), False, 'import datetime\n'), ((8581, 8630), 'os.path.join', 'os.path.join', (['args.save_folder', "('exp' + timestamp)"], {}), "(args.save_folder, 'exp' + timestamp)\n", (8593, 8630), False, 'import os\n'), ((8635, 8659), 'os.makedirs', 'os.makedirs', (['save_folder'], {}), '(save_folder)\n', (8646, 8659), False, 'import os\n'), ((8676, 8717), 'os.path.join', 'os.path.join', (['save_folder', '"""metadata.pkl"""'], {}), "(save_folder, 'metadata.pkl')\n", (8688, 8717), False, 'import os\n'), ((8737, 8776), 'os.path.join', 'os.path.join', (['save_folder', '"""encoder.pt"""'], {}), "(save_folder, 'encoder.pt')\n", (8749, 8776), False, 'import os\n'), ((8796, 8835), 'os.path.join', 'os.path.join', (['save_folder', '"""decoder.pt"""'], {}), "(save_folder, 'decoder.pt')\n", (8808, 8835), False, 'import os\n'), ((8852, 8888), 'os.path.join', 'os.path.join', (['save_folder', '"""log.txt"""'], {}), "(save_folder, 'log.txt')\n", (8864, 8888), False, 'import os\n'), ((8908, 8948), 'os.path.join', 'os.path.join', (['save_folder', '"""log_csv.csv"""'], {}), "(save_folder, 'log_csv.csv')\n", (8920, 8948), False, 'import os\n'), ((9034, 9068), 'csv.writer', 'csv.writer', (['log_csv'], {'delimiter': '""","""'}), "(log_csv, delimiter=',')\n", (9044, 9068), False, 'import csv\n'), ((9281, 9322), 'os.path.join', 'os.path.join', (['save_folder', '"""perm_csv.csv"""'], {}), "(save_folder, 'perm_csv.csv')\n", (9293, 9322), False, 'import os\n'), ((9381, 9416), 'csv.writer', 'csv.writer', (['perm_csv'], {'delimiter': '""","""'}), "(perm_csv, delimiter=',')\n", (9391, 9416), False, 'import csv\n'), ((9991, 10032), 'numpy.ones', 'np.ones', (['[args.num_atoms, args.num_atoms]'], {}), '([args.num_atoms, args.num_atoms])\n', (9998, 10032), True, 'import numpy as np\n'), ((10035, 10057), 'numpy.eye', 'np.eye', (['args.num_atoms'], {}), '(args.num_atoms)\n', (10041, 10057), True, 'import numpy as np\n'), ((12094, 12138), 'os.path.join', 'os.path.join', (['args.load_folder', '"""encoder.pt"""'], {}), "(args.load_folder, 'encoder.pt')\n", (12106, 12138), False, 'import os\n'), ((12158, 12202), 'os.path.join', 'os.path.join', (['args.load_folder', '"""decoder.pt"""'], {}), "(args.load_folder, 'decoder.pt')\n", (12170, 12202), False, 'import os\n'), ((12987, 12998), 'time.time', 'time.time', ([], {}), '()\n', (12996, 12998), False, 'import time\n'), ((55563, 55581), 'numpy.mean', 'np.mean', (['nll_M_val'], {}), '(nll_M_val)\n', (55570, 55581), True, 'import numpy as np\n'), ((55604, 55615), 'time.time', 'time.time', ([], {}), '()\n', (55613, 55615), False, 'import time\n'), ((83028, 83039), 'time.time', 'time.time', ([], {}), '()\n', (83037, 83039), False, 'import time\n'), ((7294, 7323), 'numpy.prod', 'np.prod', (['args.edge_types_list'], {}), '(args.edge_types_list)\n', (7301, 7323), True, 'import numpy as np\n'), ((7943, 7961), 'numpy.array', 'np.array', (['prior[i]'], {}), '(prior[i])\n', (7951, 7961), True, 'import numpy as np\n'), ((8039, 8070), 'torch.unsqueeze', 'torch.unsqueeze', (['log_prior_i', '(0)'], {}), '(log_prior_i, 0)\n', (8054, 8070), False, 'import torch\n'), ((8093, 8124), 'torch.unsqueeze', 'torch.unsqueeze', (['log_prior_i', '(0)'], {}), '(log_prior_i, 0)\n', (8108, 8124), False, 'import torch\n'), ((9145, 9182), 'os.path.join', 'os.path.join', (['save_folder', '"""args.txt"""'], {}), "(save_folder, 'args.txt')\n", (9157, 9182), False, 'import os\n'), ((48815, 48826), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (48823, 48826), True, 'import numpy as np\n'), ((48961, 48984), 'numpy.arange', 'np.arange', (['(-4)', '(4.1)', '(0.1)'], {}), '(-4, 4.1, 0.1)\n', (48970, 48984), True, 'import numpy as np\n'), ((49061, 49104), 'matplotlib.pyplot.hist', 'plt.hist', (['zscorelistint', 'bins'], {'density': '(True)'}), '(zscorelistint, bins, density=True)\n', (49069, 49104), True, 'import matplotlib.pyplot as plt\n'), ((49711, 49779), 'scipy.optimize.curve_fit', 'curve_fit', (['gaussian', 'xcoords', 'histdata'], {'p0': '[1, mean_gaussian, sigma]'}), '(gaussian, xcoords, histdata, p0=[1, mean_gaussian, sigma])\n', (49720, 49779), False, 'from scipy.optimize import curve_fit\n'), ((49899, 49969), 'scipy.optimize.curve_fit', 'curve_fit', (['lorentzian', 'xcoords', 'histdata'], {'p0': '[1, mean_gaussian, sigma]'}), '(lorentzian, xcoords, histdata, p0=[1, mean_gaussian, sigma])\n', (49908, 49969), False, 'from scipy.optimize import curve_fit\n'), ((50053, 50074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z-score"""'], {}), "('z-score')\n", (50063, 50074), True, 'import matplotlib.pyplot as plt\n'), ((50083, 50106), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (50093, 50106), True, 'import matplotlib.pyplot as plt\n'), ((50115, 50162), 'matplotlib.pyplot.text', 'plt.text', (['(60)', '(0.025)', '"""$\\\\mu=100,\\\\ \\\\sigma=15$"""'], {}), "(60, 0.025, '$\\\\mu=100,\\\\ \\\\sigma=15$')\n", (50123, 50162), True, 'import matplotlib.pyplot as plt\n'), ((50168, 50183), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-4)', '(4)'], {}), '(-4, 4)\n', (50176, 50183), True, 'import matplotlib.pyplot as plt\n'), ((50192, 50202), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50200, 50202), True, 'import matplotlib.pyplot as plt\n'), ((8002, 8015), 'numpy.log', 'np.log', (['prior'], {}), '(prior)\n', (8008, 8015), True, 'import numpy as np\n'), ((10091, 10109), 'numpy.where', 'np.where', (['off_diag'], {}), '(off_diag)\n', (10099, 10109), True, 'import numpy as np\n'), ((10167, 10185), 'numpy.where', 'np.where', (['off_diag'], {}), '(off_diag)\n', (10175, 10185), True, 'import numpy as np\n'), ((12257, 12301), 'torch.load', 'torch.load', (['encoder_file'], {'map_location': '"""cpu"""'}), "(encoder_file, map_location='cpu')\n", (12267, 12301), False, 'import torch\n'), ((12335, 12379), 'torch.load', 'torch.load', (['decoder_file'], {'map_location': '"""cpu"""'}), "(decoder_file, map_location='cpu')\n", (12345, 12379), False, 'import torch\n'), ((12423, 12447), 'torch.load', 'torch.load', (['encoder_file'], {}), '(encoder_file)\n', (12433, 12447), False, 'import torch\n'), ((12481, 12505), 'torch.load', 'torch.load', (['decoder_file'], {}), '(decoder_file)\n', (12491, 12505), False, 'import torch\n'), ((22883, 22898), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22896, 22898), False, 'import torch\n'), ((49500, 49526), 'numpy.sum', 'np.sum', (['(xcoords * histdata)'], {}), '(xcoords * histdata)\n', (49506, 49526), True, 'import numpy as np\n'), ((50644, 50662), 'numpy.mean', 'np.mean', (['nll_train'], {}), '(nll_train)\n', (50651, 50662), True, 'import numpy as np\n'), ((50699, 50716), 'numpy.mean', 'np.mean', (['kl_train'], {}), '(kl_train)\n', (50706, 50716), True, 'import numpy as np\n'), ((50755, 50773), 'numpy.mean', 'np.mean', (['mse_train'], {}), '(mse_train)\n', (50762, 50773), True, 'import numpy as np\n'), ((50811, 50829), 'numpy.mean', 'np.mean', (['acc_train'], {}), '(acc_train)\n', (50818, 50829), True, 'import numpy as np\n'), ((50867, 50885), 'numpy.mean', 'np.mean', (['KLb_train'], {}), '(KLb_train)\n', (50874, 50885), True, 'import numpy as np\n'), ((51119, 51137), 'numpy.mean', 'np.mean', (['nll_M_val'], {}), '(nll_M_val)\n', (51126, 51137), True, 'import numpy as np\n'), ((51174, 51189), 'numpy.mean', 'np.mean', (['kl_val'], {}), '(kl_val)\n', (51181, 51189), True, 'import numpy as np\n'), ((51228, 51244), 'numpy.mean', 'np.mean', (['mse_val'], {}), '(mse_val)\n', (51235, 51244), True, 'import numpy as np\n'), ((51282, 51298), 'numpy.mean', 'np.mean', (['acc_val'], {}), '(acc_val)\n', (51289, 51298), True, 'import numpy as np\n'), ((51336, 51352), 'numpy.mean', 'np.mean', (['KLb_val'], {}), '(KLb_val)\n', (51343, 51352), True, 'import numpy as np\n'), ((51775, 51793), 'numpy.mean', 'np.mean', (['nll_train'], {}), '(nll_train)\n', (51782, 51793), True, 'import numpy as np\n'), ((51830, 51847), 'numpy.mean', 'np.mean', (['kl_train'], {}), '(kl_train)\n', (51837, 51847), True, 'import numpy as np\n'), ((51886, 51904), 'numpy.mean', 'np.mean', (['mse_train'], {}), '(mse_train)\n', (51893, 51904), True, 'import numpy as np\n'), ((51942, 51960), 'numpy.mean', 'np.mean', (['acc_train'], {}), '(acc_train)\n', (51949, 51960), True, 'import numpy as np\n'), ((51998, 52016), 'numpy.mean', 'np.mean', (['KLb_train'], {}), '(KLb_train)\n', (52005, 52016), True, 'import numpy as np\n'), ((52248, 52266), 'numpy.mean', 'np.mean', (['nll_M_val'], {}), '(nll_M_val)\n', (52255, 52266), True, 'import numpy as np\n'), ((52303, 52318), 'numpy.mean', 'np.mean', (['kl_val'], {}), '(kl_val)\n', (52310, 52318), True, 'import numpy as np\n'), ((52357, 52373), 'numpy.mean', 'np.mean', (['mse_val'], {}), '(mse_val)\n', (52364, 52373), True, 'import numpy as np\n'), ((52411, 52427), 'numpy.mean', 'np.mean', (['acc_val'], {}), '(acc_val)\n', (52418, 52427), True, 'import numpy as np\n'), ((52465, 52481), 'numpy.mean', 'np.mean', (['KLb_val'], {}), '(KLb_val)\n', (52472, 52481), True, 'import numpy as np\n'), ((55360, 55378), 'numpy.mean', 'np.mean', (['nll_M_val'], {}), '(nll_M_val)\n', (55367, 55378), True, 'import numpy as np\n'), ((56107, 56151), 'torch.load', 'torch.load', (['encoder_file'], {'map_location': '"""cpu"""'}), "(encoder_file, map_location='cpu')\n", (56117, 56151), False, 'import torch\n'), ((56185, 56229), 'torch.load', 'torch.load', (['decoder_file'], {'map_location': '"""cpu"""'}), "(decoder_file, map_location='cpu')\n", (56195, 56229), False, 'import torch\n'), ((56273, 56297), 'torch.load', 'torch.load', (['encoder_file'], {}), '(encoder_file)\n', (56283, 56297), False, 'import torch\n'), ((56331, 56355), 'torch.load', 'torch.load', (['decoder_file'], {}), '(decoder_file)\n', (56341, 56355), False, 'import torch\n'), ((56435, 56450), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (56448, 56450), False, 'import torch\n'), ((80973, 80990), 'numpy.mean', 'np.mean', (['nll_test'], {}), '(nll_test)\n', (80980, 80990), True, 'import numpy as np\n'), ((81031, 81050), 'numpy.mean', 'np.mean', (['nll_M_test'], {}), '(nll_M_test)\n', (81038, 81050), True, 'import numpy as np\n'), ((81088, 81104), 'numpy.mean', 'np.mean', (['kl_test'], {}), '(kl_test)\n', (81095, 81104), True, 'import numpy as np\n'), ((81146, 81165), 'numpy.mean', 'np.mean', (['mse_1_test'], {}), '(mse_1_test)\n', (81153, 81165), True, 'import numpy as np\n'), ((81208, 81228), 'numpy.mean', 'np.mean', (['mse_10_test'], {}), '(mse_10_test)\n', (81215, 81228), True, 'import numpy as np\n'), ((81271, 81291), 'numpy.mean', 'np.mean', (['mse_20_test'], {}), '(mse_20_test)\n', (81278, 81291), True, 'import numpy as np\n'), ((81330, 81347), 'numpy.mean', 'np.mean', (['acc_test'], {}), '(acc_test)\n', (81337, 81347), True, 'import numpy as np\n'), ((81390, 81411), 'numpy.mean', 'np.mean', (['acc_var_test'], {}), '(acc_var_test)\n', (81397, 81411), True, 'import numpy as np\n'), ((81450, 81467), 'numpy.mean', 'np.mean', (['KLb_test'], {}), '(KLb_test)\n', (81457, 81467), True, 'import numpy as np\n'), ((16206, 16255), 'torch.split', 'torch.split', (['logits', 'args.edge_types_list'], {'dim': '(-1)'}), '(logits, args.edge_types_list, dim=-1)\n', (16217, 16255), False, 'import torch\n'), ((16441, 16471), 'torch.cat', 'torch.cat', (['edges_split'], {'dim': '(-1)'}), '(edges_split, dim=-1)\n', (16450, 16471), False, 'import torch\n'), ((22060, 22072), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22070, 22072), True, 'import matplotlib.pyplot as plt\n'), ((22089, 22138), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'loss_1_array'], {'label': '"""loss 1"""'}), "(iteration, loss_1_array, label='loss 1')\n", (22097, 22138), True, 'import matplotlib.pyplot as plt\n'), ((22157, 22206), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'loss_2_array'], {'label': '"""loss 2"""'}), "(iteration, loss_2_array, label='loss 2')\n", (22165, 22206), True, 'import matplotlib.pyplot as plt\n'), ((22225, 22248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (22235, 22248), True, 'import matplotlib.pyplot as plt\n'), ((22265, 22304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss Component/Total Loss"""'], {}), "('Loss Component/Total Loss')\n", (22275, 22304), True, 'import matplotlib.pyplot as plt\n'), ((22321, 22351), 'matplotlib.pyplot.legend', 'plt.legend', (['"""loss 1"""', '"""loss 2"""'], {}), "('loss 1', 'loss 2')\n", (22331, 22351), True, 'import matplotlib.pyplot as plt\n'), ((22368, 22378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22376, 22378), True, 'import matplotlib.pyplot as plt\n'), ((25522, 25571), 'torch.split', 'torch.split', (['logits', 'args.edge_types_list'], {'dim': '(-1)'}), '(logits, args.edge_types_list, dim=-1)\n', (25533, 25571), False, 'import torch\n'), ((25757, 25787), 'torch.cat', 'torch.cat', (['edges_split'], {'dim': '(-1)'}), '(edges_split, dim=-1)\n', (25766, 25787), False, 'import torch\n'), ((49609, 49658), 'numpy.sum', 'np.sum', (['(histdata * (xcoords - mean_gaussian) ** 2)'], {}), '(histdata * (xcoords - mean_gaussian) ** 2)\n', (49615, 49658), True, 'import numpy as np\n'), ((50591, 50602), 'time.time', 'time.time', ([], {}), '()\n', (50600, 50602), False, 'import time\n'), ((51702, 51713), 'time.time', 'time.time', ([], {}), '()\n', (51711, 51713), False, 'import time\n'), ((58138, 58187), 'torch.split', 'torch.split', (['logits', 'args.edge_types_list'], {'dim': '(-1)'}), '(logits, args.edge_types_list, dim=-1)\n', (58149, 58187), False, 'import torch\n'), ((58336, 58366), 'torch.cat', 'torch.cat', (['edges_split'], {'dim': '(-1)'}), '(edges_split, dim=-1)\n', (58345, 58366), False, 'import torch\n'), ((81503, 81514), 'time.time', 'time.time', ([], {}), '()\n', (81512, 81514), False, 'import time\n'), ((82044, 82061), 'numpy.mean', 'np.mean', (['nll_test'], {}), '(nll_test)\n', (82051, 82061), True, 'import numpy as np\n'), ((82106, 82125), 'numpy.mean', 'np.mean', (['nll_M_test'], {}), '(nll_M_test)\n', (82113, 82125), True, 'import numpy as np\n'), ((82167, 82183), 'numpy.mean', 'np.mean', (['kl_test'], {}), '(kl_test)\n', (82174, 82183), True, 'import numpy as np\n'), ((82229, 82248), 'numpy.mean', 'np.mean', (['mse_1_test'], {}), '(mse_1_test)\n', (82236, 82248), True, 'import numpy as np\n'), ((82295, 82315), 'numpy.mean', 'np.mean', (['mse_10_test'], {}), '(mse_10_test)\n', (82302, 82315), True, 'import numpy as np\n'), ((82362, 82382), 'numpy.mean', 'np.mean', (['mse_20_test'], {}), '(mse_20_test)\n', (82369, 82382), True, 'import numpy as np\n'), ((82425, 82442), 'numpy.mean', 'np.mean', (['acc_test'], {}), '(acc_test)\n', (82432, 82442), True, 'import numpy as np\n'), ((82489, 82510), 'numpy.mean', 'np.mean', (['acc_var_test'], {}), '(acc_var_test)\n', (82496, 82510), True, 'import numpy as np\n'), ((82553, 82570), 'numpy.mean', 'np.mean', (['KLb_test'], {}), '(KLb_test)\n', (82560, 82570), True, 'import numpy as np\n'), ((55048, 55068), 'numpy.mean', 'np.mean', (['acc_var_val'], {}), '(acc_var_val)\n', (55055, 55068), True, 'import numpy as np\n'), ((55085, 55113), 'numpy.array', 'np.array', (['acc_var_blocks_val'], {}), '(acc_var_blocks_val)\n', (55093, 55113), True, 'import numpy as np\n'), ((55188, 55208), 'numpy.array', 'np.array', (['perm_train'], {}), '(perm_train)\n', (55196, 55208), True, 'import numpy as np\n'), ((55259, 55277), 'numpy.array', 'np.array', (['perm_val'], {}), '(perm_val)\n', (55267, 55277), True, 'import numpy as np\n'), ((82610, 82621), 'time.time', 'time.time', ([], {}), '()\n', (82619, 82621), False, 'import time\n'), ((15785, 15798), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (15793, 15798), True, 'import numpy as np\n'), ((17597, 17610), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17605, 17610), True, 'import numpy as np\n'), ((25101, 25114), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (25109, 25114), True, 'import numpy as np\n'), ((26913, 26926), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (26921, 26926), True, 'import numpy as np\n'), ((28695, 28721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (28705, 28721), True, 'import matplotlib.pyplot as plt\n'), ((28835, 28882), 'trajectory_plot.draw_lines', 'draw_lines', (['target', 'i'], {'linestyle': '""":"""', 'alpha': '(0.6)'}), "(target, i, linestyle=':', alpha=0.6)\n", (28845, 28882), False, 'from trajectory_plot import draw_lines\n'), ((29680, 29768), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', 'acc_text'], {'horizontalalignment': '"""center"""', 'transform': 'ax.transAxes'}), "(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.\n transAxes)\n", (29688, 29768), True, 'import matplotlib.pyplot as plt\n'), ((29896, 29906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29904, 29906), True, 'import matplotlib.pyplot as plt\n'), ((32537, 32565), 'torch.exp', 'torch.exp', (['(logsigma_plot / 2)'], {}), '(logsigma_plot / 2)\n', (32546, 32565), False, 'import torch\n'), ((41635, 41663), 'torch.exp', 'torch.exp', (['(logsigma_plot / 2)'], {}), '(logsigma_plot / 2)\n', (41644, 41663), False, 'import torch\n'), ((50524, 50542), 'numpy.array', 'np.array', (['perm_val'], {}), '(perm_val)\n', (50532, 50542), True, 'import numpy as np\n'), ((50947, 50973), 'numpy.array', 'np.array', (['acc_blocks_train'], {}), '(acc_blocks_train)\n', (50955, 50973), True, 'import numpy as np\n'), ((51034, 51057), 'numpy.array', 'np.array', (['kl_list_train'], {}), '(kl_list_train)\n', (51042, 51057), True, 'import numpy as np\n'), ((51414, 51438), 'numpy.array', 'np.array', (['acc_blocks_val'], {}), '(acc_blocks_val)\n', (51422, 51438), True, 'import numpy as np\n'), ((51499, 51520), 'numpy.array', 'np.array', (['kl_list_val'], {}), '(kl_list_val)\n', (51507, 51520), True, 'import numpy as np\n'), ((51635, 51653), 'numpy.array', 'np.array', (['perm_val'], {}), '(perm_val)\n', (51643, 51653), True, 'import numpy as np\n'), ((52067, 52093), 'numpy.array', 'np.array', (['acc_blocks_train'], {}), '(acc_blocks_train)\n', (52075, 52093), True, 'import numpy as np\n'), ((52154, 52177), 'numpy.array', 'np.array', (['kl_list_train'], {}), '(kl_list_train)\n', (52162, 52177), True, 'import numpy as np\n'), ((52532, 52556), 'numpy.array', 'np.array', (['acc_blocks_val'], {}), '(acc_blocks_val)\n', (52540, 52556), True, 'import numpy as np\n'), ((52617, 52638), 'numpy.array', 'np.array', (['kl_list_val'], {}), '(kl_list_val)\n', (52625, 52638), True, 'import numpy as np\n'), ((61022, 61050), 'torch.exp', 'torch.exp', (['(logsigma_plot / 2)'], {}), '(logsigma_plot / 2)\n', (61031, 61050), False, 'import torch\n'), ((81570, 81595), 'numpy.array', 'np.array', (['acc_blocks_test'], {}), '(acc_blocks_test)\n', (81578, 81595), True, 'import numpy as np\n'), ((81659, 81688), 'numpy.array', 'np.array', (['acc_var_blocks_test'], {}), '(acc_var_blocks_test)\n', (81667, 81688), True, 'import numpy as np\n'), ((81750, 81772), 'numpy.array', 'np.array', (['kl_list_test'], {}), '(kl_list_test)\n', (81758, 81772), True, 'import numpy as np\n'), ((32800, 32826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (32810, 32826), True, 'import matplotlib.pyplot as plt\n'), ((33587, 33645), 'matplotlib.patches.Rectangle', 'Rectangle', (['(-1, -1)', '(2)', '(2)'], {'edgecolor': '"""r"""', 'facecolor': '"""none"""'}), "((-1, -1), 2, 2, edgecolor='r', facecolor='none')\n", (33596, 33645), False, 'from matplotlib.patches import Ellipse, Rectangle\n'), ((37781, 37869), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', 'acc_text'], {'horizontalalignment': '"""center"""', 'transform': 'ax.transAxes'}), "(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.\n transAxes)\n", (37789, 37869), True, 'import matplotlib.pyplot as plt\n'), ((38112, 38127), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (38122, 38127), True, 'import matplotlib.pyplot as plt\n'), ((38156, 38171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (38166, 38171), True, 'import matplotlib.pyplot as plt\n'), ((38200, 38210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38208, 38210), True, 'import matplotlib.pyplot as plt\n'), ((38337, 38358), 'torch.min', 'torch.min', (['sigma_plot'], {}), '(sigma_plot)\n', (38346, 38358), False, 'import torch\n'), ((38572, 38598), 'torch.from_numpy', 'torch.from_numpy', (['accuracy'], {}), '(accuracy)\n', (38588, 38598), False, 'import torch\n'), ((38742, 38774), 'torch.max', 'torch.max', (['output_plot', 'accuracy'], {}), '(output_plot, accuracy)\n', (38751, 38774), False, 'import torch\n'), ((41879, 41905), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (41889, 41905), True, 'import matplotlib.pyplot as plt\n'), ((42027, 42074), 'trajectory_plot.draw_lines', 'draw_lines', (['target', 'i'], {'linestyle': '""":"""', 'alpha': '(0.6)'}), "(target, i, linestyle=':', alpha=0.6)\n", (42037, 42074), False, 'from trajectory_plot import draw_lines\n'), ((42241, 42265), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1]'], {}), '([0, 1])\n', (42257, 42265), False, 'import torch\n'), ((42306, 42330), 'torch.LongTensor', 'torch.LongTensor', (['[2, 3]'], {}), '([2, 3])\n', (42322, 42330), False, 'import torch\n'), ((42370, 42391), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (42386, 42391), False, 'import torch\n'), ((42593, 42638), 'torch.index_select', 'torch.index_select', (['output_plot', '(3)', 'indices_1'], {}), '(output_plot, 3, indices_1)\n', (42611, 42638), False, 'import torch\n'), ((42947, 42992), 'torch.index_select', 'torch.index_select', (['output_plot', '(3)', 'indices_2'], {}), '(output_plot, 3, indices_2)\n', (42965, 42992), False, 'import torch\n'), ((43281, 43328), 'torch.index_select', 'torch.index_select', (['normalisedvel', '(3)', 'indices_3'], {}), '(normalisedvel, 3, indices_3)\n', (43299, 43328), False, 'import torch\n'), ((45236, 45280), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'aspect': 'equal'}"}), "(subplot_kw={'aspect': 'equal'})\n", (45248, 45280), True, 'import matplotlib.pyplot as plt\n'), ((46199, 46287), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', 'acc_text'], {'horizontalalignment': '"""center"""', 'transform': 'ax.transAxes'}), "(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.\n transAxes)\n", (46207, 46287), True, 'import matplotlib.pyplot as plt\n'), ((46422, 46432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (46430, 46432), True, 'import matplotlib.pyplot as plt\n'), ((46567, 46588), 'torch.min', 'torch.min', (['sigma_plot'], {}), '(sigma_plot)\n', (46576, 46588), False, 'import torch\n'), ((46804, 46830), 'torch.from_numpy', 'torch.from_numpy', (['accuracy'], {}), '(accuracy)\n', (46820, 46830), False, 'import torch\n'), ((46974, 47006), 'torch.max', 'torch.max', (['output_plot', 'accuracy'], {}), '(output_plot, accuracy)\n', (46983, 47006), False, 'import torch\n'), ((54912, 54937), 'numpy.array', 'np.array', (['kl_var_list_val'], {}), '(kl_var_list_val)\n', (54920, 54937), True, 'import numpy as np\n'), ((61140, 61166), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (61150, 61166), True, 'import matplotlib.pyplot as plt\n'), ((61288, 61335), 'trajectory_plot.draw_lines', 'draw_lines', (['target', 'i'], {'linestyle': '""":"""', 'alpha': '(0.6)'}), "(target, i, linestyle=':', alpha=0.6)\n", (61298, 61335), False, 'from trajectory_plot import draw_lines\n'), ((62109, 62197), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', 'acc_text'], {'horizontalalignment': '"""center"""', 'transform': 'ax.transAxes'}), "(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.\n transAxes)\n", (62117, 62197), True, 'import matplotlib.pyplot as plt\n'), ((62221, 62231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (62229, 62231), True, 'import matplotlib.pyplot as plt\n'), ((65483, 65511), 'torch.exp', 'torch.exp', (['(logsigma_plot / 2)'], {}), '(logsigma_plot / 2)\n', (65492, 65511), False, 'import torch\n'), ((73745, 73773), 'torch.exp', 'torch.exp', (['(logsigma_plot / 2)'], {}), '(logsigma_plot / 2)\n', (73754, 73773), False, 'import torch\n'), ((82705, 82730), 'numpy.array', 'np.array', (['acc_blocks_test'], {}), '(acc_blocks_test)\n', (82713, 82730), True, 'import numpy as np\n'), ((82803, 82832), 'numpy.array', 'np.array', (['acc_var_blocks_test'], {}), '(acc_var_blocks_test)\n', (82811, 82832), True, 'import numpy as np\n'), ((82898, 82920), 'numpy.array', 'np.array', (['kl_list_test'], {}), '(kl_list_test)\n', (82906, 82920), True, 'import numpy as np\n'), ((54756, 54776), 'numpy.mean', 'np.mean', (['nll_var_val'], {}), '(nll_var_val)\n', (54763, 54776), True, 'import numpy as np\n'), ((54778, 54800), 'numpy.mean', 'np.mean', (['nll_M_var_val'], {}), '(nll_M_var_val)\n', (54785, 54800), True, 'import numpy as np\n'), ((54841, 54862), 'numpy.array', 'np.array', (['kl_list_val'], {}), '(kl_list_val)\n', (54849, 54862), True, 'import numpy as np\n'), ((65745, 65771), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (65755, 65771), True, 'import matplotlib.pyplot as plt\n'), ((65901, 65948), 'trajectory_plot.draw_lines', 'draw_lines', (['target', 'i'], {'linestyle': '""":"""', 'alpha': '(0.6)'}), "(target, i, linestyle=':', alpha=0.6)\n", (65911, 65948), False, 'from trajectory_plot import draw_lines\n'), ((66199, 66223), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1]'], {}), '([0, 1])\n', (66215, 66223), False, 'import torch\n'), ((66268, 66292), 'torch.LongTensor', 'torch.LongTensor', (['[2, 3]'], {}), '([2, 3])\n', (66284, 66292), False, 'import torch\n'), ((66337, 66358), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (66353, 66358), False, 'import torch\n'), ((66572, 66617), 'torch.index_select', 'torch.index_select', (['output_plot', '(3)', 'indices_1'], {}), '(output_plot, 3, indices_1)\n', (66590, 66617), False, 'import torch\n'), ((66946, 66991), 'torch.index_select', 'torch.index_select', (['output_plot', '(3)', 'indices_2'], {}), '(output_plot, 3, indices_2)\n', (66964, 66991), False, 'import torch\n'), ((67296, 67343), 'torch.index_select', 'torch.index_select', (['normalisedvel', '(3)', 'indices_3'], {}), '(normalisedvel, 3, indices_3)\n', (67314, 67343), False, 'import torch\n'), ((69501, 69545), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'aspect': 'equal'}"}), "(subplot_kw={'aspect': 'equal'})\n", (69513, 69545), True, 'import matplotlib.pyplot as plt\n'), ((70520, 70608), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', 'acc_text'], {'horizontalalignment': '"""center"""', 'transform': 'ax.transAxes'}), "(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.\n transAxes)\n", (70528, 70608), True, 'import matplotlib.pyplot as plt\n'), ((70751, 70761), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (70759, 70761), True, 'import matplotlib.pyplot as plt\n'), ((74561, 74587), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (74571, 74587), True, 'import matplotlib.pyplot as plt\n'), ((74717, 74764), 'trajectory_plot.draw_lines', 'draw_lines', (['target', 'i'], {'linestyle': '""":"""', 'alpha': '(0.6)'}), "(target, i, linestyle=':', alpha=0.6)\n", (74727, 74764), False, 'from trajectory_plot import draw_lines\n'), ((75095, 75119), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1]'], {}), '([0, 1])\n', (75111, 75119), False, 'import torch\n'), ((75271, 75314), 'torch.index_select', 'torch.index_select', (['output_plot', '(3)', 'indices'], {}), '(output_plot, 3, indices)\n', (75289, 75314), False, 'import torch\n'), ((77245, 77289), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'aspect': 'equal'}"}), "(subplot_kw={'aspect': 'equal'})\n", (77257, 77289), True, 'import matplotlib.pyplot as plt\n'), ((78264, 78352), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', 'acc_text'], {'horizontalalignment': '"""center"""', 'transform': 'ax.transAxes'}), "(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.\n transAxes)\n", (78272, 78352), True, 'import matplotlib.pyplot as plt\n'), ((78495, 78505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (78503, 78505), True, 'import matplotlib.pyplot as plt\n'), ((54521, 54537), 'numpy.mean', 'np.mean', (['nll_val'], {}), '(nll_val)\n', (54528, 54537), True, 'import numpy as np\n'), ((54539, 54557), 'numpy.mean', 'np.mean', (['nll_M_val'], {}), '(nll_M_val)\n', (54546, 54557), True, 'import numpy as np\n'), ((54559, 54574), 'numpy.mean', 'np.mean', (['kl_val'], {}), '(kl_val)\n', (54566, 54574), True, 'import numpy as np\n'), ((54576, 54592), 'numpy.mean', 'np.mean', (['mse_val'], {}), '(mse_val)\n', (54583, 54592), True, 'import numpy as np\n'), ((54619, 54635), 'numpy.mean', 'np.mean', (['KLb_val'], {}), '(KLb_val)\n', (54626, 54635), True, 'import numpy as np\n'), ((54637, 54653), 'numpy.mean', 'np.mean', (['acc_val'], {}), '(acc_val)\n', (54644, 54653), True, 'import numpy as np\n'), ((54694, 54718), 'numpy.array', 'np.array', (['acc_blocks_val'], {}), '(acc_blocks_val)\n', (54702, 54718), True, 'import numpy as np\n'), ((43497, 43523), 'torch.acos', 'torch.acos', (['normalisedvelx'], {}), '(normalisedvelx)\n', (43507, 43523), False, 'import torch\n'), ((54414, 54436), 'numpy.mean', 'np.mean', (['acc_var_train'], {}), '(acc_var_train)\n', (54421, 54436), True, 'import numpy as np\n'), ((54453, 54483), 'numpy.array', 'np.array', (['acc_var_blocks_train'], {}), '(acc_var_blocks_train)\n', (54461, 54483), True, 'import numpy as np\n'), ((67520, 67546), 'torch.acos', 'torch.acos', (['normalisedvelx'], {}), '(normalisedvelx)\n', (67530, 67546), False, 'import torch\n'), ((54272, 54299), 'numpy.array', 'np.array', (['kl_var_list_train'], {}), '(kl_var_list_train)\n', (54280, 54299), True, 'import numpy as np\n'), ((54136, 54158), 'numpy.mean', 'np.mean', (['nll_var_train'], {}), '(nll_var_train)\n', (54143, 54158), True, 'import numpy as np\n'), ((54199, 54222), 'numpy.array', 'np.array', (['kl_list_train'], {}), '(kl_list_train)\n', (54207, 54222), True, 'import numpy as np\n'), ((53909, 53927), 'numpy.mean', 'np.mean', (['nll_train'], {}), '(nll_train)\n', (53916, 53927), True, 'import numpy as np\n'), ((53929, 53946), 'numpy.mean', 'np.mean', (['kl_train'], {}), '(kl_train)\n', (53936, 53946), True, 'import numpy as np\n'), ((53973, 53991), 'numpy.mean', 'np.mean', (['mse_train'], {}), '(mse_train)\n', (53980, 53991), True, 'import numpy as np\n'), ((53993, 54011), 'numpy.mean', 'np.mean', (['KLb_train'], {}), '(KLb_train)\n', (54000, 54011), True, 'import numpy as np\n'), ((54013, 54031), 'numpy.mean', 'np.mean', (['acc_train'], {}), '(acc_train)\n', (54020, 54031), True, 'import numpy as np\n'), ((54072, 54098), 'numpy.array', 'np.array', (['acc_blocks_train'], {}), '(acc_blocks_train)\n', (54080, 54098), True, 'import numpy as np\n')] |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unit tests for discrete-action Policy Gradient functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
import tree as nest
from trfl import discrete_policy_gradient_ops as pg_ops
class EntropyCostTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for discrete_policy_entropy op."""
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testEntropy(self, is_multi_actions):
with self.test_session() as sess:
# Large values check numerical stability through the logs
policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
[0, 1000]])
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
entropy = entropy_op.extra.entropy
self.assertEqual(entropy.get_shape(), tf.TensorShape(6))
# Get these reference values in Torch with:
# c = nnd.EntropyCriterion()
# s = nn.LogSoftMax()
# result = c:forward(s:forward(logits))
expected_entropy = num_action_components * np.array(
[0.58220309, 0.58220309, 0.36533386, 0.69314718, 0, 0])
self.assertAllClose(sess.run(entropy),
expected_entropy,
atol=1e-4)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testGradient(self, is_multi_actions):
with self.test_session() as sess:
policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
[0, 1000]])
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
entropy = entropy_op.extra.entropy
# Counterintuitively, the gradient->0 as policy->deterministic, that's why
# the gradients for the large logit cases are `[0, 0]`. They should
# strictly be >0, but they get truncated when we run out of precision.
expected_gradients = np.array([[0.1966119, -0.1966119],
[0.1966119, -0.1966119],
[0.2099872, -0.2099872],
[0, 0],
[0, 0],
[0, 0]])
for policy_logits in nest.flatten(policy_logits_nest):
gradients = tf.gradients(entropy, policy_logits)
grad_policy_logits = sess.run(gradients[0])
self.assertAllClose(grad_policy_logits,
expected_gradients,
atol=1e-4)
@parameterized.named_parameters(('TwoActions', 2),
('FiveActions', 5),
('TenActions', 10),
('MixedMultiActions', [2, 5, 10]))
def testNormalisation(self, num_actions):
with self.test_session() as sess:
if isinstance(num_actions, list):
policy_logits = [tf.constant([[1.0] * n], dtype=tf.float32)
for n in num_actions]
else:
policy_logits = tf.constant(
[[1.0] * num_actions], dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(
policy_logits, normalise=True)
self.assertAllClose(sess.run(entropy_op.loss), [-1.0])
@parameterized.named_parameters(
('Fixed', 5, 4, 3, False),
('DynamicLength', None, 4, 3, False),
('DynamicBatch', 5, None, 3, False),
('DynamicBatchAndLength', None, None, 3, False),
('DynamicAll', None, None, None, False),
('NormFixed', 5, 4, 3, True),
('NormDynamicLength', None, 4, 3, True),
('NormDynamicBatch', 5, None, 3, True),
('NormDynamicBatchAndLength', None, None, 3, True),
('NormDynamicAll', None, None, None, True))
def testShapeInference3D(self, sequence_length, batch_size, num_actions,
normalise):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
op = pg_ops.discrete_policy_entropy_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
normalise=normalise)
op.extra.entropy.get_shape().assert_is_compatible_with([T, B])
op.loss.get_shape().assert_is_compatible_with([T, B])
@parameterized.named_parameters(
('Fixed2D', 4, 3, False),
('DynamicBatch2D', None, 3, False),
('DynamicAll2D', None, None, False),
('NormFixed2D', 4, 3, True),
('NormDynamicBatch2D', None, 3, True),
('NormDynamicAll2D', None, None, True))
def testShapeInference2D(self, batch_size, num_actions, normalise):
policy_logits = tf.placeholder(tf.float32, shape=[batch_size, num_actions])
op = pg_ops.discrete_policy_entropy_loss(policy_logits, normalise=normalise)
op.extra.entropy.get_shape().assert_is_compatible_with([batch_size])
op.loss.get_shape().assert_is_compatible_with([batch_size])
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
class DiscretePolicyGradientLossTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for discrete_policy_gradient_loss op."""
def _setUpLoss(self, is_multi_actions):
policy_logits_np = np.array([[[0, 1], [0, 1]],
[[1, 1], [0, 100]]])
actions_np = np.array([[0, 0],
[1, 1]], dtype=np.int32)
if is_multi_actions:
self._num_action_components = 3
self._policy_logits_nest = [
tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(self._num_action_components)]
self._actions_nest = [tf.constant(actions_np, dtype=tf.int32)
for _ in xrange(self._num_action_components)]
else:
self._num_action_components = 1
self._policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
self._actions_nest = tf.constant(actions_np, dtype=tf.int32)
self._action_values = tf.constant([[0, 1], [2, 1]], dtype=tf.float32)
self._loss = pg_ops.discrete_policy_gradient_loss(
self._policy_logits_nest, self._actions_nest, self._action_values)
def testLoss(self, is_multi_actions):
self._setUpLoss(is_multi_actions)
with self.test_session() as sess:
self.assertEqual(self._loss.get_shape(), tf.TensorShape(2)) # [B]
self.assertAllClose(
sess.run(self._loss),
# computed by summing expected losses from DiscretePolicyGradientTest
# over the two sequences of length two which I've split the batch
# into:
self._num_action_components * np.array([1.386294, 1.313262]))
def testGradients(self, is_multi_actions):
self._setUpLoss(is_multi_actions)
with self.test_session() as sess:
total_loss = tf.reduce_sum(self._loss)
gradients = tf.gradients(
[total_loss], nest.flatten(self._policy_logits_nest))
grad_policy_logits_nest = sess.run(gradients)
for grad_policy_logits in grad_policy_logits_nest:
self.assertAllClose(grad_policy_logits,
[[[0, 0], [-0.731, 0.731]],
[[1, -1], [0, 0]]], atol=1e-4)
dead_grads = tf.gradients(
[total_loss],
nest.flatten(self._actions_nest) + [self._action_values])
for grad in dead_grads:
self.assertIsNone(grad)
class DiscretePolicyGradientTest(tf.test.TestCase):
"""Tests for discrete_policy_gradient op."""
def testLoss(self):
with self.test_session() as sess:
policy_logits = tf.constant([[0, 1], [0, 1], [1, 1], [0, 100]],
dtype=tf.float32)
action_values = tf.constant([0, 1, 2, 1], dtype=tf.float32)
actions = tf.constant([0, 0, 1, 1], dtype=tf.int32)
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
self.assertEqual(loss.get_shape(), tf.TensorShape(4))
# Calculate the targets with:
# loss = action_value*(-logits[action] + log(sum_a(exp(logits[a]))))
# The final case (with large logits), runs out of precision and gets
# truncated to 0, but isn't `nan`.
self.assertAllClose(sess.run(loss), [0, 1.313262, 1.386294, 0])
def testGradients(self):
with self.test_session() as sess:
policy_logits = tf.constant([[0, 1], [0, 1], [1, 1], [0, 100]],
dtype=tf.float32)
action_values = tf.constant([0, 1, 2, 1], dtype=tf.float32)
actions = tf.constant([0, 0, 1, 1], dtype=tf.int32)
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
total_loss = tf.reduce_sum(loss)
gradients = tf.gradients([total_loss], [policy_logits])
grad_policy_logits = sess.run(gradients[0])
# The final case (with large logits), runs out of precision and gets
# truncated to 0, but isn't `nan`.
self.assertAllClose(grad_policy_logits,
[[0, 0], [-0.731, 0.731], [1, -1], [0, 0]], atol=1e-4)
self.assertAllEqual(tf.gradients([total_loss], [actions, action_values]),
[None, None])
def testDynamicBatchSize(self):
policy_logits = tf.placeholder(tf.float32, shape=[None, 3])
action_values = tf.placeholder(tf.float32, shape=[None])
actions = tf.placeholder(tf.int32, shape=[None])
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
self.assertEqual(loss.get_shape().as_list(), [None])
gradients = tf.gradients(tf.reduce_sum(loss), [policy_logits])
self.assertAllEqual(gradients[0].get_shape().as_list(), [None, 3])
class SequenceAdvantageActorCriticLossTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.named_parameters(
('SingleActionEntropyNormalise', False, True),
('SingleActionNoEntropyNormalise', False, False),
('MultiActionsEntropyNormalise', True, True),
('MultiActionsNoEntropyNormalise', True, False),
)
def testLossSequence(self, is_multi_actions, normalise_entropy):
# A sequence of length 2, batch size 1, 3 possible actions.
num_actions = 3
policy_logits = [[[0., 0., 1.]], [[0., 1., 0.]]]
actions = [[0], [1]]
baseline_values = [[0.2], [0.3]]
rewards = [[0.4], [0.5]]
pcontinues = [[0.9], [0.8]]
bootstrap_value = [0.1]
baseline_cost = 0.15
entropy_cost = 0.25
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits, dtype=tf.float32)
for _ in xrange(num_action_components)]
actions_nest = [tf.constant(actions, dtype=tf.int32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits, dtype=tf.float32)
actions_nest = tf.constant(actions, dtype=tf.int32)
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits_nest,
tf.constant(baseline_values, dtype=tf.float32),
actions_nest,
tf.constant(rewards, dtype=tf.float32),
tf.constant(pcontinues, dtype=tf.float32),
tf.constant(bootstrap_value, dtype=tf.float32),
baseline_cost=baseline_cost,
entropy_cost=entropy_cost,
normalise_entropy=normalise_entropy)
# Manually calculate the discounted returns.
return1 = 0.5 + 0.8 * 0.1
return0 = 0.4 + 0.9 * return1
with self.test_session() as sess:
# Discounted returns
self.assertAllClose(sess.run(extra.discounted_returns),
[[return0], [return1]])
# Advantages
advantages = [return0 - baseline_values[0][0],
return1 - baseline_values[1][0]]
self.assertAllClose(sess.run(extra.advantages),
[[adv] for adv in advantages])
# Baseline
expected_baseline_loss = baseline_cost*sum([0.5 * adv**2 for adv in
advantages])
self.assertAllClose(
sess.run(extra.baseline_loss), [expected_baseline_loss])
# Policy Gradient loss
# loss = sum_t(action_value*(-logits[action] +
# log(sum_a(exp(logits[a])))))
#
# The below takes advantage of there only being one minibatch dim.
normalise = lambda logits: np.log(np.exp(logits).sum())
batch = 0
expected_policy_gradient_loss = num_action_components * sum([
advantages[0]*(-(policy_logits[0][batch][actions[0][batch]]) +
normalise(policy_logits[0])),
advantages[1]*(-(policy_logits[1][batch][actions[1][batch]]) +
normalise(policy_logits[1])),
])
self.assertAllClose(sess.run(extra.policy_gradient_loss),
[expected_policy_gradient_loss])
# Entropy, calculated as per discrete_policy_entropy tests.
expected_entropy = num_action_components*0.97533*2
expected_entropy_loss = -entropy_cost*expected_entropy
if normalise_entropy:
expected_entropy_loss /= (num_action_components * np.log(num_actions))
self.assertAllClose(sess.run(extra.entropy),
[expected_entropy], atol=1e-4)
self.assertAllClose(sess.run(extra.entropy_loss), [expected_entropy_loss],
atol=1e-4)
# Total loss
expected_loss = [expected_entropy_loss + expected_policy_gradient_loss +
expected_baseline_loss]
self.assertAllClose(sess.run(loss), expected_loss, atol=1e-4)
@parameterized.named_parameters(('Fixed', 5, 4, 3),
('DynamicLength', None, 4, 3),
('DynamicBatch', 5, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testShapeInference(self, sequence_length, batch_size, num_actions):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
baseline_values=tf.placeholder(tf.float32, shape=[T, B]),
actions=tf.placeholder(tf.int32, shape=[T, B]),
rewards=tf.placeholder(tf.float32, shape=[T, B]),
pcontinues=tf.placeholder(tf.float32, shape=[T, B]),
bootstrap_value=tf.placeholder(tf.float32, shape=[B]),
entropy_cost=1)
extra.discounted_returns.get_shape().assert_is_compatible_with([T, B])
extra.advantages.get_shape().assert_is_compatible_with([T, B])
extra.baseline_loss.get_shape().assert_is_compatible_with([B])
extra.policy_gradient_loss.get_shape().assert_is_compatible_with([B])
extra.entropy.get_shape().assert_is_compatible_with([B])
extra.entropy_loss.get_shape().assert_is_compatible_with([B])
loss.get_shape().assert_is_compatible_with([B])
@parameterized.named_parameters(('Fixed', 5, 4, 3),
('DynamicLength', None, 4, 3),
('DynamicBatch', 5, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testShapeInferenceGAE(self, sequence_length, batch_size, num_actions):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
baseline_values=tf.placeholder(tf.float32, shape=[T, B]),
actions=tf.placeholder(tf.int32, shape=[T, B]),
rewards=tf.placeholder(tf.float32, shape=[T, B]),
pcontinues=tf.placeholder(tf.float32, shape=[T, B]),
bootstrap_value=tf.placeholder(tf.float32, shape=[B]),
lambda_=0.9,
entropy_cost=1)
extra.discounted_returns.get_shape().assert_is_compatible_with([T, B])
extra.advantages.get_shape().assert_is_compatible_with([T, B])
extra.baseline_loss.get_shape().assert_is_compatible_with([B])
extra.policy_gradient_loss.get_shape().assert_is_compatible_with([B])
extra.entropy.get_shape().assert_is_compatible_with([B])
extra.entropy_loss.get_shape().assert_is_compatible_with([B])
loss.get_shape().assert_is_compatible_with([B])
class SequenceAdvantageActorCriticLossGradientTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super(SequenceAdvantageActorCriticLossGradientTest, self).setUp()
self.num_actions = 3
self.num_action_components = 5
policy_logits_np = np.array([[[0., 0., 1.]], [[0., 1., 0.]]])
self.policy_logits = tf.constant(policy_logits_np, dtype=tf.float32)
self.multi_policy_logits = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(self.num_action_components)]
self.baseline_values = tf.constant([[0.2], [0.3]])
actions_np = np.array([[0], [1]])
actions = tf.constant(actions_np)
multi_actions = [tf.constant(actions_np)
for _ in xrange(self.num_action_components)]
rewards = tf.constant([[0.4], [0.5]])
pcontinues = tf.constant([[0.9], [0.8]])
bootstrap_value = tf.constant([0.1])
baseline_cost = 0.15
entropy_cost = 0.25
self.op = pg_ops.sequence_advantage_actor_critic_loss(
self.policy_logits, self.baseline_values, actions, rewards, pcontinues,
bootstrap_value, baseline_cost=baseline_cost, entropy_cost=entropy_cost)
self.multi_op = pg_ops.sequence_advantage_actor_critic_loss(
self.multi_policy_logits, self.baseline_values, multi_actions, rewards,
pcontinues, bootstrap_value, baseline_cost=baseline_cost,
entropy_cost=entropy_cost)
self.invalid_grad_inputs = [actions, rewards, pcontinues, bootstrap_value]
self.invalid_grad_outputs = [None]*len(self.invalid_grad_inputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testPolicyGradients(self, is_multi_actions):
if is_multi_actions:
loss = self.multi_op.extra.policy_gradient_loss
policy_logits_nest = self.multi_policy_logits
else:
loss = self.op.extra.policy_gradient_loss
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(loss, policy_logits)[0] * self.num_actions
for policy_logits in nest.flatten(policy_logits_nest)]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
def testNonDifferentiableDiscountedReturns(self):
self.assertAllEqual(tf.gradients(self.op.extra.discounted_returns,
self.invalid_grad_inputs),
self.invalid_grad_outputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testEntropyGradients(self, is_multi_actions):
if is_multi_actions:
loss = self.multi_op.extra.entropy_loss
policy_logits_nest = self.multi_policy_logits
else:
loss = self.op.extra.entropy_loss
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(loss, policy_logits)[0] * self.num_actions
for policy_logits in nest.flatten(policy_logits_nest)]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
def testBaselineGradients(self):
loss = self.op.extra.baseline_loss
grad_baseline = tf.gradients(loss, self.baseline_values)[0]
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllEqual(tf.gradients(loss, self.policy_logits), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testTotalLossGradients(self, is_multi_actions):
with self.test_session() as sess:
if is_multi_actions:
total_loss = tf.reduce_sum(self.multi_op.loss)
policy_logits_nest = self.multi_policy_logits
else:
total_loss = tf.reduce_sum(self.op.loss)
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(total_loss, policy_logits)[0]
for policy_logits in nest.flatten(policy_logits_nest)]
grad_baseline = tf.gradients(total_loss, self.baseline_values)[0]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
# These values were just generated once and hard-coded here to check for
# regressions. Calculating by hand would be too time-consuming,
# error-prone and unreadable.
self.assertAllClose(sess.run(grad_policy),
[[[-0.5995, 0.1224, 0.4770]],
[[0.0288, -0.0576, 0.0288]]],
atol=1e-4)
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllClose(sess.run(grad_baseline), [[-0.1083], [-0.0420]],
atol=1e-4)
self.assertAllEqual(tf.gradients(total_loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.compat.v1.placeholder",
"tree.flatten",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.TensorShape",
"numpy.log",
"absl.testing.parameterized.named_parameters",
"trfl.discrete_policy_gradient_ops.sequence_advantage_actor_critic_loss",
"trfl.discrete_policy_gradient_ops.discrete_p... | [((6516, 6595), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleAction', False)", "('MultiActions', True)"], {}), "(('SingleAction', False), ('MultiActions', True))\n", (6546, 6595), False, 'from absl.testing import parameterized\n'), ((1228, 1307), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleAction', False)", "('MultiActions', True)"], {}), "(('SingleAction', False), ('MultiActions', True))\n", (1258, 1307), False, 'from absl.testing import parameterized\n'), ((2545, 2624), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleAction', False)", "('MultiActions', True)"], {}), "(('SingleAction', False), ('MultiActions', True))\n", (2575, 2624), False, 'from absl.testing import parameterized\n'), ((4179, 4308), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('TwoActions', 2)", "('FiveActions', 5)", "('TenActions', 10)", "('MixedMultiActions', [2, 5, 10])"], {}), "(('TwoActions', 2), ('FiveActions', 5), (\n 'TenActions', 10), ('MixedMultiActions', [2, 5, 10]))\n", (4209, 4308), False, 'from absl.testing import parameterized\n'), ((4907, 5362), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Fixed', 5, 4, 3, False)", "('DynamicLength', None, 4, 3, False)", "('DynamicBatch', 5, None, 3, False)", "('DynamicBatchAndLength', None, None, 3, False)", "('DynamicAll', None, None, None, False)", "('NormFixed', 5, 4, 3, True)", "('NormDynamicLength', None, 4, 3, True)", "('NormDynamicBatch', 5, None, 3, True)", "('NormDynamicBatchAndLength', None, None, 3, True)", "('NormDynamicAll', None, None, None, True)"], {}), "(('Fixed', 5, 4, 3, False), ('DynamicLength',\n None, 4, 3, False), ('DynamicBatch', 5, None, 3, False), (\n 'DynamicBatchAndLength', None, None, 3, False), ('DynamicAll', None,\n None, None, False), ('NormFixed', 5, 4, 3, True), ('NormDynamicLength',\n None, 4, 3, True), ('NormDynamicBatch', 5, None, 3, True), (\n 'NormDynamicBatchAndLength', None, None, 3, True), ('NormDynamicAll',\n None, None, None, True))\n", (4937, 5362), False, 'from absl.testing import parameterized\n'), ((5870, 6119), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Fixed2D', 4, 3, False)", "('DynamicBatch2D', None, 3, False)", "('DynamicAll2D', None, None, False)", "('NormFixed2D', 4, 3, True)", "('NormDynamicBatch2D', None, 3, True)", "('NormDynamicAll2D', None, None, True)"], {}), "(('Fixed2D', 4, 3, False), ('DynamicBatch2D',\n None, 3, False), ('DynamicAll2D', None, None, False), ('NormFixed2D', 4,\n 3, True), ('NormDynamicBatch2D', None, 3, True), ('NormDynamicAll2D',\n None, None, True))\n", (5900, 6119), False, 'from absl.testing import parameterized\n'), ((11497, 11734), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleActionEntropyNormalise', False, True)", "('SingleActionNoEntropyNormalise', False, False)", "('MultiActionsEntropyNormalise', True, True)", "('MultiActionsNoEntropyNormalise', True, False)"], {}), "(('SingleActionEntropyNormalise', False, True\n ), ('SingleActionNoEntropyNormalise', False, False), (\n 'MultiActionsEntropyNormalise', True, True), (\n 'MultiActionsNoEntropyNormalise', True, False))\n", (11527, 11734), False, 'from absl.testing import parameterized\n'), ((15367, 15563), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Fixed', 5, 4, 3)", "('DynamicLength', None, 4, 3)", "('DynamicBatch', 5, None, 3)", "('DynamicBatchAndLength', None, None, 3)", "('DynamicAll', None, None, None)"], {}), "(('Fixed', 5, 4, 3), ('DynamicLength', None, \n 4, 3), ('DynamicBatch', 5, None, 3), ('DynamicBatchAndLength', None,\n None, 3), ('DynamicAll', None, None, None))\n", (15397, 15563), False, 'from absl.testing import parameterized\n'), ((16778, 16974), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Fixed', 5, 4, 3)", "('DynamicLength', None, 4, 3)", "('DynamicBatch', 5, None, 3)", "('DynamicBatchAndLength', None, None, 3)", "('DynamicAll', None, None, None)"], {}), "(('Fixed', 5, 4, 3), ('DynamicLength', None, \n 4, 3), ('DynamicBatch', 5, None, 3), ('DynamicBatchAndLength', None,\n None, 3), ('DynamicAll', None, None, None))\n", (16808, 16974), False, 'from absl.testing import parameterized\n'), ((19842, 19921), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleAction', False)", "('MultiActions', True)"], {}), "(('SingleAction', False), ('MultiActions', True))\n", (19872, 19921), False, 'from absl.testing import parameterized\n'), ((20951, 21030), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleAction', False)", "('MultiActions', True)"], {}), "(('SingleAction', False), ('MultiActions', True))\n", (20981, 21030), False, 'from absl.testing import parameterized\n'), ((22210, 22289), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('SingleAction', False)", "('MultiActions', True)"], {}), "(('SingleAction', False), ('MultiActions', True))\n", (22240, 22289), False, 'from absl.testing import parameterized\n'), ((23746, 23760), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (23758, 23760), True, 'import tensorflow.compat.v1 as tf\n'), ((6235, 6294), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[batch_size, num_actions]'}), '(tf.float32, shape=[batch_size, num_actions])\n', (6249, 6294), True, 'import tensorflow.compat.v1 as tf\n'), ((6304, 6375), 'trfl.discrete_policy_gradient_ops.discrete_policy_entropy_loss', 'pg_ops.discrete_policy_entropy_loss', (['policy_logits'], {'normalise': 'normalise'}), '(policy_logits, normalise=normalise)\n', (6339, 6375), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((6826, 6874), 'numpy.array', 'np.array', (['[[[0, 1], [0, 1]], [[1, 1], [0, 100]]]'], {}), '([[[0, 1], [0, 1]], [[1, 1], [0, 100]]])\n', (6834, 6874), True, 'import numpy as np\n'), ((6925, 6967), 'numpy.array', 'np.array', (['[[0, 0], [1, 1]]'], {'dtype': 'np.int32'}), '([[0, 0], [1, 1]], dtype=np.int32)\n', (6933, 6967), True, 'import numpy as np\n'), ((7573, 7620), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 1], [2, 1]]'], {'dtype': 'tf.float32'}), '([[0, 1], [2, 1]], dtype=tf.float32)\n', (7584, 7620), True, 'import tensorflow.compat.v1 as tf\n'), ((7639, 7747), 'trfl.discrete_policy_gradient_ops.discrete_policy_gradient_loss', 'pg_ops.discrete_policy_gradient_loss', (['self._policy_logits_nest', 'self._actions_nest', 'self._action_values'], {}), '(self._policy_logits_nest, self.\n _actions_nest, self._action_values)\n', (7675, 7747), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((10884, 10927), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 3]'}), '(tf.float32, shape=[None, 3])\n', (10898, 10927), True, 'import tensorflow.compat.v1 as tf\n'), ((10948, 10988), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), '(tf.float32, shape=[None])\n', (10962, 10988), True, 'import tensorflow.compat.v1 as tf\n'), ((11003, 11041), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]'}), '(tf.int32, shape=[None])\n', (11017, 11041), True, 'import tensorflow.compat.v1 as tf\n'), ((11053, 11123), 'trfl.discrete_policy_gradient_ops.discrete_policy_gradient', 'pg_ops.discrete_policy_gradient', (['policy_logits', 'actions', 'action_values'], {}), '(policy_logits, actions, action_values)\n', (11084, 11123), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((18529, 18577), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]'], {}), '([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]])\n', (18537, 18577), True, 'import numpy as np\n'), ((18597, 18644), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (18608, 18644), True, 'import tensorflow.compat.v1 as tf\n'), ((18829, 18856), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0.2], [0.3]]'], {}), '([[0.2], [0.3]])\n', (18840, 18856), True, 'import tensorflow.compat.v1 as tf\n'), ((18874, 18894), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (18882, 18894), True, 'import numpy as np\n'), ((18909, 18932), 'tensorflow.compat.v1.constant', 'tf.constant', (['actions_np'], {}), '(actions_np)\n', (18920, 18932), True, 'import tensorflow.compat.v1 as tf\n'), ((19058, 19085), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0.4], [0.5]]'], {}), '([[0.4], [0.5]])\n', (19069, 19085), True, 'import tensorflow.compat.v1 as tf\n'), ((19103, 19130), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0.9], [0.8]]'], {}), '([[0.9], [0.8]])\n', (19114, 19130), True, 'import tensorflow.compat.v1 as tf\n'), ((19153, 19171), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.1]'], {}), '([0.1])\n', (19164, 19171), True, 'import tensorflow.compat.v1 as tf\n'), ((19236, 19433), 'trfl.discrete_policy_gradient_ops.sequence_advantage_actor_critic_loss', 'pg_ops.sequence_advantage_actor_critic_loss', (['self.policy_logits', 'self.baseline_values', 'actions', 'rewards', 'pcontinues', 'bootstrap_value'], {'baseline_cost': 'baseline_cost', 'entropy_cost': 'entropy_cost'}), '(self.policy_logits, self.\n baseline_values, actions, rewards, pcontinues, bootstrap_value,\n baseline_cost=baseline_cost, entropy_cost=entropy_cost)\n', (19279, 19433), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((19463, 19672), 'trfl.discrete_policy_gradient_ops.sequence_advantage_actor_critic_loss', 'pg_ops.sequence_advantage_actor_critic_loss', (['self.multi_policy_logits', 'self.baseline_values', 'multi_actions', 'rewards', 'pcontinues', 'bootstrap_value'], {'baseline_cost': 'baseline_cost', 'entropy_cost': 'entropy_cost'}), '(self.multi_policy_logits, self.\n baseline_values, multi_actions, rewards, pcontinues, bootstrap_value,\n baseline_cost=baseline_cost, entropy_cost=entropy_cost)\n', (19506, 19672), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((1512, 1577), 'numpy.array', 'np.array', (['[[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000], [0, 1000]]'], {}), '([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000], [0, 1000]])\n', (1520, 1577), True, 'import numpy as np\n'), ((1965, 2020), 'trfl.discrete_policy_gradient_ops.discrete_policy_entropy_loss', 'pg_ops.discrete_policy_entropy_loss', (['policy_logits_nest'], {}), '(policy_logits_nest)\n', (2000, 2020), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((2766, 2831), 'numpy.array', 'np.array', (['[[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000], [0, 1000]]'], {}), '([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000], [0, 1000]])\n', (2774, 2831), True, 'import numpy as np\n'), ((3219, 3274), 'trfl.discrete_policy_gradient_ops.discrete_policy_entropy_loss', 'pg_ops.discrete_policy_entropy_loss', (['policy_logits_nest'], {}), '(policy_logits_nest)\n', (3254, 3274), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((3575, 3689), 'numpy.array', 'np.array', (['[[0.1966119, -0.1966119], [0.1966119, -0.1966119], [0.2099872, -0.2099872],\n [0, 0], [0, 0], [0, 0]]'], {}), '([[0.1966119, -0.1966119], [0.1966119, -0.1966119], [0.2099872, -\n 0.2099872], [0, 0], [0, 0], [0, 0]])\n', (3583, 3689), True, 'import numpy as np\n'), ((3897, 3929), 'tree.flatten', 'nest.flatten', (['policy_logits_nest'], {}), '(policy_logits_nest)\n', (3909, 3929), True, 'import tree as nest\n'), ((4764, 4830), 'trfl.discrete_policy_gradient_ops.discrete_policy_entropy_loss', 'pg_ops.discrete_policy_entropy_loss', (['policy_logits'], {'normalise': '(True)'}), '(policy_logits, normalise=True)\n', (4799, 4830), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((7431, 7478), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (7442, 7478), True, 'import tensorflow.compat.v1 as tf\n'), ((7506, 7545), 'tensorflow.compat.v1.constant', 'tf.constant', (['actions_np'], {'dtype': 'tf.int32'}), '(actions_np, dtype=tf.int32)\n', (7517, 7545), True, 'import tensorflow.compat.v1 as tf\n'), ((8388, 8413), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['self._loss'], {}), '(self._loss)\n', (8401, 8413), True, 'import tensorflow.compat.v1 as tf\n'), ((9154, 9219), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 1], [0, 1], [1, 1], [0, 100]]'], {'dtype': 'tf.float32'}), '([[0, 1], [0, 1], [1, 1], [0, 100]], dtype=tf.float32)\n', (9165, 9219), True, 'import tensorflow.compat.v1 as tf\n'), ((9276, 9319), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 1, 2, 1]'], {'dtype': 'tf.float32'}), '([0, 1, 2, 1], dtype=tf.float32)\n', (9287, 9319), True, 'import tensorflow.compat.v1 as tf\n'), ((9336, 9377), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 0, 1, 1]'], {'dtype': 'tf.int32'}), '([0, 0, 1, 1], dtype=tf.int32)\n', (9347, 9377), True, 'import tensorflow.compat.v1 as tf\n'), ((9391, 9461), 'trfl.discrete_policy_gradient_ops.discrete_policy_gradient', 'pg_ops.discrete_policy_gradient', (['policy_logits', 'actions', 'action_values'], {}), '(policy_logits, actions, action_values)\n', (9422, 9461), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((9959, 10024), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 1], [0, 1], [1, 1], [0, 100]]'], {'dtype': 'tf.float32'}), '([[0, 1], [0, 1], [1, 1], [0, 100]], dtype=tf.float32)\n', (9970, 10024), True, 'import tensorflow.compat.v1 as tf\n'), ((10081, 10124), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 1, 2, 1]'], {'dtype': 'tf.float32'}), '([0, 1, 2, 1], dtype=tf.float32)\n', (10092, 10124), True, 'import tensorflow.compat.v1 as tf\n'), ((10141, 10182), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 0, 1, 1]'], {'dtype': 'tf.int32'}), '([0, 0, 1, 1], dtype=tf.int32)\n', (10152, 10182), True, 'import tensorflow.compat.v1 as tf\n'), ((10196, 10266), 'trfl.discrete_policy_gradient_ops.discrete_policy_gradient', 'pg_ops.discrete_policy_gradient', (['policy_logits', 'actions', 'action_values'], {}), '(policy_logits, actions, action_values)\n', (10227, 10266), True, 'from trfl import discrete_policy_gradient_ops as pg_ops\n'), ((10331, 10350), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (10344, 10350), True, 'import tensorflow.compat.v1 as tf\n'), ((10369, 10412), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['[total_loss]', '[policy_logits]'], {}), '([total_loss], [policy_logits])\n', (10381, 10412), True, 'import tensorflow.compat.v1 as tf\n'), ((11253, 11272), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (11266, 11272), True, 'import tensorflow.compat.v1 as tf\n'), ((12542, 12586), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits'], {'dtype': 'tf.float32'}), '(policy_logits, dtype=tf.float32)\n', (12553, 12586), True, 'import tensorflow.compat.v1 as tf\n'), ((12608, 12644), 'tensorflow.compat.v1.constant', 'tf.constant', (['actions'], {'dtype': 'tf.int32'}), '(actions, dtype=tf.int32)\n', (12619, 12644), True, 'import tensorflow.compat.v1 as tf\n'), ((12745, 12791), 'tensorflow.compat.v1.constant', 'tf.constant', (['baseline_values'], {'dtype': 'tf.float32'}), '(baseline_values, dtype=tf.float32)\n', (12756, 12791), True, 'import tensorflow.compat.v1 as tf\n'), ((12823, 12861), 'tensorflow.compat.v1.constant', 'tf.constant', (['rewards'], {'dtype': 'tf.float32'}), '(rewards, dtype=tf.float32)\n', (12834, 12861), True, 'import tensorflow.compat.v1 as tf\n'), ((12871, 12912), 'tensorflow.compat.v1.constant', 'tf.constant', (['pcontinues'], {'dtype': 'tf.float32'}), '(pcontinues, dtype=tf.float32)\n', (12882, 12912), True, 'import tensorflow.compat.v1 as tf\n'), ((12922, 12968), 'tensorflow.compat.v1.constant', 'tf.constant', (['bootstrap_value'], {'dtype': 'tf.float32'}), '(bootstrap_value, dtype=tf.float32)\n', (12933, 12968), True, 'import tensorflow.compat.v1 as tf\n'), ((18677, 18724), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (18688, 18724), True, 'import tensorflow.compat.v1 as tf\n'), ((18954, 18977), 'tensorflow.compat.v1.constant', 'tf.constant', (['actions_np'], {}), '(actions_np)\n', (18965, 18977), True, 'import tensorflow.compat.v1 as tf\n'), ((20537, 20577), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.baseline_values'], {}), '(loss, self.baseline_values)\n', (20549, 20577), True, 'import tensorflow.compat.v1 as tf\n'), ((20611, 20655), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.invalid_grad_inputs'], {}), '(loss, self.invalid_grad_inputs)\n', (20623, 20655), True, 'import tensorflow.compat.v1 as tf\n'), ((20785, 20857), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['self.op.extra.discounted_returns', 'self.invalid_grad_inputs'], {}), '(self.op.extra.discounted_returns, self.invalid_grad_inputs)\n', (20797, 20857), True, 'import tensorflow.compat.v1 as tf\n'), ((21631, 21671), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.baseline_values'], {}), '(loss, self.baseline_values)\n', (21643, 21671), True, 'import tensorflow.compat.v1 as tf\n'), ((21705, 21749), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.invalid_grad_inputs'], {}), '(loss, self.invalid_grad_inputs)\n', (21717, 21749), True, 'import tensorflow.compat.v1 as tf\n'), ((21897, 21937), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.baseline_values'], {}), '(loss, self.baseline_values)\n', (21909, 21937), True, 'import tensorflow.compat.v1 as tf\n'), ((21989, 22011), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[2, 1]'], {}), '([2, 1])\n', (22003, 22011), True, 'import tensorflow.compat.v1 as tf\n'), ((22037, 22075), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.policy_logits'], {}), '(loss, self.policy_logits)\n', (22049, 22075), True, 'import tensorflow.compat.v1 as tf\n'), ((22109, 22153), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'self.invalid_grad_inputs'], {}), '(loss, self.invalid_grad_inputs)\n', (22121, 22153), True, 'import tensorflow.compat.v1 as tf\n'), ((1897, 1944), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (1908, 1944), True, 'import tensorflow.compat.v1 as tf\n'), ((2106, 2123), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['(6)'], {}), '(6)\n', (2120, 2123), True, 'import tensorflow.compat.v1 as tf\n'), ((2339, 2403), 'numpy.array', 'np.array', (['[0.58220309, 0.58220309, 0.36533386, 0.69314718, 0, 0]'], {}), '([0.58220309, 0.58220309, 0.36533386, 0.69314718, 0, 0])\n', (2347, 2403), True, 'import numpy as np\n'), ((3151, 3198), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (3162, 3198), True, 'import tensorflow.compat.v1 as tf\n'), ((3951, 3987), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['entropy', 'policy_logits'], {}), '(entropy, policy_logits)\n', (3963, 3987), True, 'import tensorflow.compat.v1 as tf\n'), ((4679, 4731), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1.0] * num_actions]'], {'dtype': 'tf.float32'}), '([[1.0] * num_actions], dtype=tf.float32)\n', (4690, 4731), True, 'import tensorflow.compat.v1 as tf\n'), ((5667, 5710), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B, A]'}), '(tf.float32, shape=[T, B, A])\n', (5681, 5710), True, 'import tensorflow.compat.v1 as tf\n'), ((7104, 7151), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (7115, 7151), True, 'import tensorflow.compat.v1 as tf\n'), ((7236, 7275), 'tensorflow.compat.v1.constant', 'tf.constant', (['actions_np'], {'dtype': 'tf.int32'}), '(actions_np, dtype=tf.int32)\n', (7247, 7275), True, 'import tensorflow.compat.v1 as tf\n'), ((7916, 7933), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['(2)'], {}), '(2)\n', (7930, 7933), True, 'import tensorflow.compat.v1 as tf\n'), ((8470, 8508), 'tree.flatten', 'nest.flatten', (['self._policy_logits_nest'], {}), '(self._policy_logits_nest)\n', (8482, 8508), True, 'import tree as nest\n'), ((9548, 9565), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['(4)'], {}), '(4)\n', (9562, 9565), True, 'import tensorflow.compat.v1 as tf\n'), ((10735, 10787), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['[total_loss]', '[actions, action_values]'], {}), '([total_loss], [actions, action_values])\n', (10747, 10787), True, 'import tensorflow.compat.v1 as tf\n'), ((12239, 12283), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits'], {'dtype': 'tf.float32'}), '(policy_logits, dtype=tf.float32)\n', (12250, 12283), True, 'import tensorflow.compat.v1 as tf\n'), ((12374, 12410), 'tensorflow.compat.v1.constant', 'tf.constant', (['actions'], {'dtype': 'tf.int32'}), '(actions, dtype=tf.int32)\n', (12385, 12410), True, 'import tensorflow.compat.v1 as tf\n'), ((15938, 15981), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B, A]'}), '(tf.float32, shape=[T, B, A])\n', (15952, 15981), True, 'import tensorflow.compat.v1 as tf\n'), ((16007, 16047), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B]'}), '(tf.float32, shape=[T, B])\n', (16021, 16047), True, 'import tensorflow.compat.v1 as tf\n'), ((16065, 16103), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[T, B]'}), '(tf.int32, shape=[T, B])\n', (16079, 16103), True, 'import tensorflow.compat.v1 as tf\n'), ((16121, 16161), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B]'}), '(tf.float32, shape=[T, B])\n', (16135, 16161), True, 'import tensorflow.compat.v1 as tf\n'), ((16182, 16222), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B]'}), '(tf.float32, shape=[T, B])\n', (16196, 16222), True, 'import tensorflow.compat.v1 as tf\n'), ((16248, 16285), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[B]'}), '(tf.float32, shape=[B])\n', (16262, 16285), True, 'import tensorflow.compat.v1 as tf\n'), ((17352, 17395), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B, A]'}), '(tf.float32, shape=[T, B, A])\n', (17366, 17395), True, 'import tensorflow.compat.v1 as tf\n'), ((17421, 17461), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B]'}), '(tf.float32, shape=[T, B])\n', (17435, 17461), True, 'import tensorflow.compat.v1 as tf\n'), ((17479, 17517), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[T, B]'}), '(tf.int32, shape=[T, B])\n', (17493, 17517), True, 'import tensorflow.compat.v1 as tf\n'), ((17535, 17575), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B]'}), '(tf.float32, shape=[T, B])\n', (17549, 17575), True, 'import tensorflow.compat.v1 as tf\n'), ((17596, 17636), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[T, B]'}), '(tf.float32, shape=[T, B])\n', (17610, 17636), True, 'import tensorflow.compat.v1 as tf\n'), ((17662, 17699), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[B]'}), '(tf.float32, shape=[B])\n', (17676, 17699), True, 'import tensorflow.compat.v1 as tf\n'), ((18766, 18800), 'six.moves.xrange', 'xrange', (['self.num_action_components'], {}), '(self.num_action_components)\n', (18772, 18800), False, 'from six.moves import xrange\n'), ((19008, 19042), 'six.moves.xrange', 'xrange', (['self.num_action_components'], {}), '(self.num_action_components)\n', (19014, 19042), False, 'from six.moves import xrange\n'), ((20361, 20393), 'tree.flatten', 'nest.flatten', (['policy_logits_nest'], {}), '(policy_logits_nest)\n', (20373, 20393), True, 'import tree as nest\n'), ((20485, 20510), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[2, 1, 3]'], {}), '([2, 1, 3])\n', (20499, 20510), True, 'import tensorflow.compat.v1 as tf\n'), ((21455, 21487), 'tree.flatten', 'nest.flatten', (['policy_logits_nest'], {}), '(policy_logits_nest)\n', (21467, 21487), True, 'import tree as nest\n'), ((21579, 21604), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[2, 1, 3]'], {}), '([2, 1, 3])\n', (21593, 21604), True, 'import tensorflow.compat.v1 as tf\n'), ((22464, 22497), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['self.multi_op.loss'], {}), '(self.multi_op.loss)\n', (22477, 22497), True, 'import tensorflow.compat.v1 as tf\n'), ((22585, 22612), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['self.op.loss'], {}), '(self.op.loss)\n', (22598, 22612), True, 'import tensorflow.compat.v1 as tf\n'), ((22829, 22875), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['total_loss', 'self.baseline_values'], {}), '(total_loss, self.baseline_values)\n', (22841, 22875), True, 'import tensorflow.compat.v1 as tf\n'), ((23448, 23470), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[2, 1]'], {}), '([2, 1])\n', (23462, 23470), True, 'import tensorflow.compat.v1 as tf\n'), ((23611, 23661), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['total_loss', 'self.invalid_grad_inputs'], {}), '(total_loss, self.invalid_grad_inputs)\n', (23623, 23661), True, 'import tensorflow.compat.v1 as tf\n'), ((1704, 1751), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (1715, 1751), True, 'import tensorflow.compat.v1 as tf\n'), ((2958, 3005), 'tensorflow.compat.v1.constant', 'tf.constant', (['policy_logits_np'], {'dtype': 'tf.float32'}), '(policy_logits_np, dtype=tf.float32)\n', (2969, 3005), True, 'import tensorflow.compat.v1 as tf\n'), ((4553, 4595), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1.0] * n]'], {'dtype': 'tf.float32'}), '([[1.0] * n], dtype=tf.float32)\n', (4564, 4595), True, 'import tensorflow.compat.v1 as tf\n'), ((7171, 7206), 'six.moves.xrange', 'xrange', (['self._num_action_components'], {}), '(self._num_action_components)\n', (7177, 7206), False, 'from six.moves import xrange\n'), ((7313, 7348), 'six.moves.xrange', 'xrange', (['self._num_action_components'], {}), '(self._num_action_components)\n', (7319, 7348), False, 'from six.moves import xrange\n'), ((8215, 8245), 'numpy.array', 'np.array', (['[1.386294, 1.313262]'], {}), '([1.386294, 1.313262])\n', (8223, 8245), True, 'import numpy as np\n'), ((8850, 8882), 'tree.flatten', 'nest.flatten', (['self._actions_nest'], {}), '(self._actions_nest)\n', (8862, 8882), True, 'import tree as nest\n'), ((12321, 12350), 'six.moves.xrange', 'xrange', (['num_action_components'], {}), '(num_action_components)\n', (12327, 12350), False, 'from six.moves import xrange\n'), ((12442, 12471), 'six.moves.xrange', 'xrange', (['num_action_components'], {}), '(num_action_components)\n', (12448, 12471), False, 'from six.moves import xrange\n'), ((14902, 14921), 'numpy.log', 'np.log', (['num_actions'], {}), '(num_actions)\n', (14908, 14921), True, 'import numpy as np\n'), ((20276, 20309), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'policy_logits'], {}), '(loss, policy_logits)\n', (20288, 20309), True, 'import tensorflow.compat.v1 as tf\n'), ((21370, 21403), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['loss', 'policy_logits'], {}), '(loss, policy_logits)\n', (21382, 21403), True, 'import tensorflow.compat.v1 as tf\n'), ((22699, 22738), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['total_loss', 'policy_logits'], {}), '(total_loss, policy_logits)\n', (22711, 22738), True, 'import tensorflow.compat.v1 as tf\n'), ((22773, 22805), 'tree.flatten', 'nest.flatten', (['policy_logits_nest'], {}), '(policy_logits_nest)\n', (22785, 22805), True, 'import tree as nest\n'), ((22973, 22998), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[2, 1, 3]'], {}), '([2, 1, 3])\n', (22987, 22998), True, 'import tensorflow.compat.v1 as tf\n'), ((1791, 1820), 'six.moves.xrange', 'xrange', (['num_action_components'], {}), '(num_action_components)\n', (1797, 1820), False, 'from six.moves import xrange\n'), ((3045, 3074), 'six.moves.xrange', 'xrange', (['num_action_components'], {}), '(num_action_components)\n', (3051, 3074), False, 'from six.moves import xrange\n'), ((14137, 14151), 'numpy.exp', 'np.exp', (['logits'], {}), '(logits)\n', (14143, 14151), True, 'import numpy as np\n')] |
from __future__ import print_function
import logging
import pprint
import math
import numpy
import traceback
import operator
import theano
from six.moves import input
from picklable_itertools.extras import equizip
from theano import tensor
from blocks.bricks import Tanh, Initializable
from blocks.bricks.base import application
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import SimpleRecurrent, Bidirectional
from blocks.bricks.attention import SequenceContentAttention
from blocks.bricks.parallel import Fork
from blocks.bricks.sequence_generators import (
SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback)
from blocks.config import config
from blocks.graph import ComputationGraph
from fuel.transformers import Mapping, Batch, Padding, Filter
from fuel.datasets import OneBillionWord, TextFile
from fuel.schemes import ConstantScheme
from blocks.serialization import load_parameter_values
from blocks.algorithms import (GradientDescent, Scale,
StepClipping, CompositeRule)
from blocks.initialization import Orthogonal, IsotropicGaussian, Constant
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Printing, Timing
from blocks.extensions.saveload import Checkpoint
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.main_loop import MainLoop
from blocks.filter import VariableFilter
from blocks.utils import dict_union
from blocks.search import BeamSearch
config.recursion_limit = 100000
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
# Dictionaries
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
char2code = {v: k for k, v in code2char.items()}
def reverse_words(sample):
sentence = sample[0]
result = []
word_start = -1
for i, code in enumerate(sentence):
if code >= char2code[' ']:
if word_start >= 0:
result.extend(sentence[i - 1:word_start - 1:-1])
word_start = -1
result.append(code)
else:
if word_start == -1:
word_start = i
return (result,)
def _lower(s):
return s.lower()
def _transpose(data):
return tuple(array.T for array in data)
def _filter_long(data):
return len(data[0]) <= 100
def _is_nan(log):
return math.isnan(log.current_row['total_gradient_norm'])
class WordReverser(Initializable):
"""The top brick.
It is often convenient to gather all bricks of the model under the
roof of a single top brick.
"""
def __init__(self, dimension, alphabet_size, **kwargs):
super(WordReverser, self).__init__(**kwargs)
encoder = Bidirectional(
SimpleRecurrent(dim=dimension, activation=Tanh()))
fork = Fork([name for name in encoder.prototype.apply.sequences
if name != 'mask'])
fork.input_dim = dimension
fork.output_dims = [encoder.prototype.get_dim(name) for name in fork.input_names]
lookup = LookupTable(alphabet_size, dimension)
transition = SimpleRecurrent(
activation=Tanh(),
dim=dimension, name="transition")
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=2 * dimension, match_dim=dimension, name="attention")
readout = Readout(
readout_dim=alphabet_size,
source_names=[transition.apply.states[0],
attention.take_glimpses.outputs[0]],
emitter=SoftmaxEmitter(name="emitter"),
feedback_brick=LookupFeedback(alphabet_size, dimension),
name="readout")
generator = SequenceGenerator(
readout=readout, transition=transition, attention=attention,
name="generator")
self.lookup = lookup
self.fork = fork
self.encoder = encoder
self.generator = generator
self.children = [lookup, fork, encoder, generator]
@application
def cost(self, chars, chars_mask, targets, targets_mask):
return self.generator.cost_matrix(
targets, targets_mask,
attended=self.encoder.apply(
**dict_union(
self.fork.apply(self.lookup.apply(chars), as_dict=True),
mask=chars_mask)),
attended_mask=chars_mask)
@application
def generate(self, chars):
return self.generator.generate(
n_steps=3 * chars.shape[0], batch_size=chars.shape[1],
attended=self.encoder.apply(
**dict_union(
self.fork.apply(self.lookup.apply(chars), as_dict=True))),
attended_mask=tensor.ones(chars.shape))
def main(mode, save_path, num_batches, data_path=None):
reverser = WordReverser(100, len(char2code), name="reverser")
if mode == "train":
# Data processing pipeline
dataset_options = dict(dictionary=char2code, level="character",
preprocess=_lower)
if data_path:
dataset = TextFile(data_path, **dataset_options)
else:
dataset = OneBillionWord("training", [99], **dataset_options)
data_stream = dataset.get_example_stream()
data_stream = Filter(data_stream, _filter_long)
data_stream = Mapping(data_stream, reverse_words,
add_sources=("targets",))
data_stream = Batch(data_stream, iteration_scheme=ConstantScheme(10))
data_stream = Padding(data_stream)
data_stream = Mapping(data_stream, _transpose)
# Initialization settings
reverser.weights_init = IsotropicGaussian(0.1)
reverser.biases_init = Constant(0.0)
reverser.push_initialization_config()
reverser.encoder.weights_init = Orthogonal()
reverser.generator.transition.weights_init = Orthogonal()
# Build the cost computation graph
chars = tensor.lmatrix("features")
chars_mask = tensor.matrix("features_mask")
targets = tensor.lmatrix("targets")
targets_mask = tensor.matrix("targets_mask")
batch_cost = reverser.cost(
chars, chars_mask, targets, targets_mask).sum()
batch_size = chars.shape[1].copy(name="batch_size")
cost = aggregation.mean(batch_cost, batch_size)
cost.name = "sequence_log_likelihood"
logger.info("Cost graph is built")
# Give an idea of what's going on
model = Model(cost)
parameters = model.get_parameter_dict()
logger.info("Parameters:\n" +
pprint.pformat(
[(key, value.get_value().shape) for key, value
in parameters.items()],
width=120))
# Initialize parameters
for brick in model.get_top_bricks():
brick.initialize()
# Define the training algorithm.
cg = ComputationGraph(cost)
algorithm = GradientDescent(
cost=cost, parameters=cg.parameters,
step_rule=CompositeRule([StepClipping(10.0), Scale(0.01)]))
# Fetch variables useful for debugging
generator = reverser.generator
(energies,) = VariableFilter(
applications=[generator.readout.readout],
name_regex="output")(cg.variables)
(activations,) = VariableFilter(
applications=[generator.transition.apply],
name=generator.transition.apply.states[0])(cg.variables)
max_length = chars.shape[0].copy(name="max_length")
cost_per_character = aggregation.mean(
batch_cost, batch_size * max_length).copy(
name="character_log_likelihood")
min_energy = energies.min().copy(name="min_energy")
max_energy = energies.max().copy(name="max_energy")
mean_activation = abs(activations).mean().copy(
name="mean_activation")
observables = [
cost, min_energy, max_energy, mean_activation,
batch_size, max_length, cost_per_character,
algorithm.total_step_norm, algorithm.total_gradient_norm]
for name, parameter in parameters.items():
observables.append(parameter.norm(2).copy(name + "_norm"))
observables.append(algorithm.gradients[parameter].norm(2).copy(
name + "_grad_norm"))
# Construct the main loop and start training!
average_monitoring = TrainingDataMonitoring(
observables, prefix="average", every_n_batches=10)
main_loop = MainLoop(
model=model,
data_stream=data_stream,
algorithm=algorithm,
extensions=[
Timing(),
TrainingDataMonitoring(observables, after_batch=True),
average_monitoring,
FinishAfter(after_n_batches=num_batches)
# This shows a way to handle NaN emerging during
# training: simply finish it.
.add_condition(["after_batch"], _is_nan),
# Saving the model and the log separately is convenient,
# because loading the whole pickle takes quite some time.
Checkpoint(save_path, every_n_batches=500,
save_separately=["model", "log"]),
Printing(every_n_batches=1)])
main_loop.run()
elif mode == "sample" or mode == "beam_search":
chars = tensor.lmatrix("input")
generated = reverser.generate(chars)
model = Model(generated)
logger.info("Loading the model..")
model.set_parameter_values(load_parameter_values(save_path))
def generate(input_):
"""Generate output sequences for an input sequence.
Incapsulates most of the difference between sampling and beam
search.
Returns
-------
outputs : list of lists
Trimmed output sequences.
costs : list
The negative log-likelihood of generating the respective
sequences.
"""
if mode == "beam_search":
samples, = VariableFilter(
applications=[reverser.generator.generate], name="outputs")(
ComputationGraph(generated[1]))
# NOTE: this will recompile beam search functions
# every time user presses Enter. Do not create
# a new `BeamSearch` object every time if
# speed is important for you.
beam_search = BeamSearch(samples)
outputs, costs = beam_search.search(
{chars: input_}, char2code['</S>'],
3 * input_.shape[0])
else:
_1, outputs, _2, _3, costs = (
model.get_theano_function()(input_))
outputs = list(outputs.T)
costs = list(costs.T)
for i in range(len(outputs)):
outputs[i] = list(outputs[i])
try:
true_length = outputs[i].index(char2code['</S>']) + 1
except ValueError:
true_length = len(outputs[i])
outputs[i] = outputs[i][:true_length]
costs[i] = costs[i][:true_length].sum()
return outputs, costs
while True:
try:
line = input("Enter a sentence\n")
message = ("Enter the number of samples\n" if mode == "sample"
else "Enter the beam size\n")
batch_size = int(input(message))
except EOFError:
break
except Exception:
traceback.print_exc()
continue
encoded_input = [char2code.get(char, char2code["<UNK>"])
for char in line.lower().strip()]
encoded_input = ([char2code['<S>']] + encoded_input +
[char2code['</S>']])
print("Encoder input:", encoded_input)
target = reverse_words((encoded_input,))[0]
print("Target: ", target)
samples, costs = generate(
numpy.repeat(numpy.array(encoded_input)[:, None],
batch_size, axis=1))
messages = []
for sample, cost in equizip(samples, costs):
message = "({})".format(cost)
message += "".join(code2char[code] for code in sample)
if sample == target:
message += " CORRECT!"
messages.append((cost, message))
messages.sort(key=operator.itemgetter(0), reverse=True)
for _, message in messages:
print(message)
| [
"logging.getLogger",
"blocks.initialization.Orthogonal",
"theano.tensor.ones",
"blocks.bricks.attention.SequenceContentAttention",
"fuel.datasets.OneBillionWord",
"numpy.array",
"blocks.bricks.parallel.Fork",
"blocks.graph.ComputationGraph",
"fuel.transformers.Filter",
"operator.itemgetter",
"si... | [((1600, 1627), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1617, 1627), False, 'import logging\n'), ((2539, 2589), 'math.isnan', 'math.isnan', (["log.current_row['total_gradient_norm']"], {}), "(log.current_row['total_gradient_norm'])\n", (2549, 2589), False, 'import math\n'), ((2986, 3062), 'blocks.bricks.parallel.Fork', 'Fork', (["[name for name in encoder.prototype.apply.sequences if name != 'mask']"], {}), "([name for name in encoder.prototype.apply.sequences if name != 'mask'])\n", (2990, 3062), False, 'from blocks.bricks.parallel import Fork\n'), ((3225, 3262), 'blocks.bricks.lookup.LookupTable', 'LookupTable', (['alphabet_size', 'dimension'], {}), '(alphabet_size, dimension)\n', (3236, 3262), False, 'from blocks.bricks.lookup import LookupTable\n'), ((3398, 3531), 'blocks.bricks.attention.SequenceContentAttention', 'SequenceContentAttention', ([], {'state_names': 'transition.apply.states', 'attended_dim': '(2 * dimension)', 'match_dim': 'dimension', 'name': '"""attention"""'}), "(state_names=transition.apply.states, attended_dim=\n 2 * dimension, match_dim=dimension, name='attention')\n", (3422, 3531), False, 'from blocks.bricks.attention import SequenceContentAttention\n'), ((3904, 4005), 'blocks.bricks.sequence_generators.SequenceGenerator', 'SequenceGenerator', ([], {'readout': 'readout', 'transition': 'transition', 'attention': 'attention', 'name': '"""generator"""'}), "(readout=readout, transition=transition, attention=\n attention, name='generator')\n", (3921, 4005), False, 'from blocks.bricks.sequence_generators import SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback\n'), ((5497, 5530), 'fuel.transformers.Filter', 'Filter', (['data_stream', '_filter_long'], {}), '(data_stream, _filter_long)\n', (5503, 5530), False, 'from fuel.transformers import Mapping, Batch, Padding, Filter\n'), ((5553, 5614), 'fuel.transformers.Mapping', 'Mapping', (['data_stream', 'reverse_words'], {'add_sources': "('targets',)"}), "(data_stream, reverse_words, add_sources=('targets',))\n", (5560, 5614), False, 'from fuel.transformers import Mapping, Batch, Padding, Filter\n'), ((5745, 5765), 'fuel.transformers.Padding', 'Padding', (['data_stream'], {}), '(data_stream)\n', (5752, 5765), False, 'from fuel.transformers import Mapping, Batch, Padding, Filter\n'), ((5788, 5820), 'fuel.transformers.Mapping', 'Mapping', (['data_stream', '_transpose'], {}), '(data_stream, _transpose)\n', (5795, 5820), False, 'from fuel.transformers import Mapping, Batch, Padding, Filter\n'), ((5888, 5910), 'blocks.initialization.IsotropicGaussian', 'IsotropicGaussian', (['(0.1)'], {}), '(0.1)\n', (5905, 5910), False, 'from blocks.initialization import Orthogonal, IsotropicGaussian, Constant\n'), ((5942, 5955), 'blocks.initialization.Constant', 'Constant', (['(0.0)'], {}), '(0.0)\n', (5950, 5955), False, 'from blocks.initialization import Orthogonal, IsotropicGaussian, Constant\n'), ((6042, 6054), 'blocks.initialization.Orthogonal', 'Orthogonal', ([], {}), '()\n', (6052, 6054), False, 'from blocks.initialization import Orthogonal, IsotropicGaussian, Constant\n'), ((6108, 6120), 'blocks.initialization.Orthogonal', 'Orthogonal', ([], {}), '()\n', (6118, 6120), False, 'from blocks.initialization import Orthogonal, IsotropicGaussian, Constant\n'), ((6181, 6207), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""features"""'], {}), "('features')\n", (6195, 6207), False, 'from theano import tensor\n'), ((6229, 6259), 'theano.tensor.matrix', 'tensor.matrix', (['"""features_mask"""'], {}), "('features_mask')\n", (6242, 6259), False, 'from theano import tensor\n'), ((6278, 6303), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""targets"""'], {}), "('targets')\n", (6292, 6303), False, 'from theano import tensor\n'), ((6327, 6356), 'theano.tensor.matrix', 'tensor.matrix', (['"""targets_mask"""'], {}), "('targets_mask')\n", (6340, 6356), False, 'from theano import tensor\n'), ((6528, 6568), 'blocks.monitoring.aggregation.mean', 'aggregation.mean', (['batch_cost', 'batch_size'], {}), '(batch_cost, batch_size)\n', (6544, 6568), False, 'from blocks.monitoring import aggregation\n'), ((6717, 6728), 'blocks.model.Model', 'Model', (['cost'], {}), '(cost)\n', (6722, 6728), False, 'from blocks.model import Model\n'), ((7171, 7193), 'blocks.graph.ComputationGraph', 'ComputationGraph', (['cost'], {}), '(cost)\n', (7187, 7193), False, 'from blocks.graph import ComputationGraph\n'), ((8699, 8772), 'blocks.extensions.monitoring.TrainingDataMonitoring', 'TrainingDataMonitoring', (['observables'], {'prefix': '"""average"""', 'every_n_batches': '(10)'}), "(observables, prefix='average', every_n_batches=10)\n", (8721, 8772), False, 'from blocks.extensions.monitoring import TrainingDataMonitoring\n'), ((5297, 5335), 'fuel.datasets.TextFile', 'TextFile', (['data_path'], {}), '(data_path, **dataset_options)\n', (5305, 5335), False, 'from fuel.datasets import OneBillionWord, TextFile\n'), ((5372, 5423), 'fuel.datasets.OneBillionWord', 'OneBillionWord', (['"""training"""', '[99]'], {}), "('training', [99], **dataset_options)\n", (5386, 5423), False, 'from fuel.datasets import OneBillionWord, TextFile\n'), ((7461, 7538), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'applications': '[generator.readout.readout]', 'name_regex': '"""output"""'}), "(applications=[generator.readout.readout], name_regex='output')\n", (7475, 7538), False, 'from blocks.filter import VariableFilter\n'), ((7603, 7708), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'applications': '[generator.transition.apply]', 'name': 'generator.transition.apply.states[0]'}), '(applications=[generator.transition.apply], name=generator.\n transition.apply.states[0])\n', (7617, 7708), False, 'from blocks.filter import VariableFilter\n'), ((9701, 9724), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""input"""'], {}), "('input')\n", (9715, 9724), False, 'from theano import tensor\n'), ((9786, 9802), 'blocks.model.Model', 'Model', (['generated'], {}), '(generated)\n', (9791, 9802), False, 'from blocks.model import Model\n'), ((3324, 3330), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (3328, 3330), False, 'from blocks.bricks import Tanh, Initializable\n'), ((3755, 3785), 'blocks.bricks.sequence_generators.SoftmaxEmitter', 'SoftmaxEmitter', ([], {'name': '"""emitter"""'}), "(name='emitter')\n", (3769, 3785), False, 'from blocks.bricks.sequence_generators import SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback\n'), ((3814, 3854), 'blocks.bricks.sequence_generators.LookupFeedback', 'LookupFeedback', (['alphabet_size', 'dimension'], {}), '(alphabet_size, dimension)\n', (3828, 3854), False, 'from blocks.bricks.sequence_generators import SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback\n'), ((4921, 4945), 'theano.tensor.ones', 'tensor.ones', (['chars.shape'], {}), '(chars.shape)\n', (4932, 4945), False, 'from theano import tensor\n'), ((5703, 5721), 'fuel.schemes.ConstantScheme', 'ConstantScheme', (['(10)'], {}), '(10)\n', (5717, 5721), False, 'from fuel.schemes import ConstantScheme\n'), ((7832, 7885), 'blocks.monitoring.aggregation.mean', 'aggregation.mean', (['batch_cost', '(batch_size * max_length)'], {}), '(batch_cost, batch_size * max_length)\n', (7848, 7885), False, 'from blocks.monitoring import aggregation\n'), ((9881, 9913), 'blocks.serialization.load_parameter_values', 'load_parameter_values', (['save_path'], {}), '(save_path)\n', (9902, 9913), False, 'from blocks.serialization import load_parameter_values\n'), ((12686, 12709), 'picklable_itertools.extras.equizip', 'equizip', (['samples', 'costs'], {}), '(samples, costs)\n', (12693, 12709), False, 'from picklable_itertools.extras import equizip\n'), ((2962, 2968), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (2966, 2968), False, 'from blocks.bricks import Tanh, Initializable\n'), ((8952, 8960), 'blocks.extensions.Timing', 'Timing', ([], {}), '()\n', (8958, 8960), False, 'from blocks.extensions import FinishAfter, Printing, Timing\n'), ((8978, 9031), 'blocks.extensions.monitoring.TrainingDataMonitoring', 'TrainingDataMonitoring', (['observables'], {'after_batch': '(True)'}), '(observables, after_batch=True)\n', (9000, 9031), False, 'from blocks.extensions.monitoring import TrainingDataMonitoring\n'), ((9458, 9534), 'blocks.extensions.saveload.Checkpoint', 'Checkpoint', (['save_path'], {'every_n_batches': '(500)', 'save_separately': "['model', 'log']"}), "(save_path, every_n_batches=500, save_separately=['model', 'log'])\n", (9468, 9534), False, 'from blocks.extensions.saveload import Checkpoint\n'), ((9579, 9606), 'blocks.extensions.Printing', 'Printing', ([], {'every_n_batches': '(1)'}), '(every_n_batches=1)\n', (9587, 9606), False, 'from blocks.extensions import FinishAfter, Printing, Timing\n'), ((10847, 10866), 'blocks.search.BeamSearch', 'BeamSearch', (['samples'], {}), '(samples)\n', (10857, 10866), False, 'from blocks.search import BeamSearch\n'), ((11724, 11751), 'six.moves.input', 'input', (['"""Enter a sentence\n"""'], {}), "('Enter a sentence\\n')\n", (11729, 11751), False, 'from six.moves import input\n'), ((7317, 7335), 'blocks.algorithms.StepClipping', 'StepClipping', (['(10.0)'], {}), '(10.0)\n', (7329, 7335), False, 'from blocks.algorithms import GradientDescent, Scale, StepClipping, CompositeRule\n'), ((7337, 7348), 'blocks.algorithms.Scale', 'Scale', (['(0.01)'], {}), '(0.01)\n', (7342, 7348), False, 'from blocks.algorithms import GradientDescent, Scale, StepClipping, CompositeRule\n'), ((10431, 10505), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'applications': '[reverser.generator.generate]', 'name': '"""outputs"""'}), "(applications=[reverser.generator.generate], name='outputs')\n", (10445, 10505), False, 'from blocks.filter import VariableFilter\n'), ((10552, 10582), 'blocks.graph.ComputationGraph', 'ComputationGraph', (['generated[1]'], {}), '(generated[1])\n', (10568, 10582), False, 'from blocks.graph import ComputationGraph\n'), ((11918, 11932), 'six.moves.input', 'input', (['message'], {}), '(message)\n', (11923, 11932), False, 'from six.moves import input\n'), ((12031, 12052), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12050, 12052), False, 'import traceback\n'), ((12987, 13009), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (13006, 13009), False, 'import operator\n'), ((9085, 9125), 'blocks.extensions.FinishAfter', 'FinishAfter', ([], {'after_n_batches': 'num_batches'}), '(after_n_batches=num_batches)\n', (9096, 9125), False, 'from blocks.extensions import FinishAfter, Printing, Timing\n'), ((12541, 12567), 'numpy.array', 'numpy.array', (['encoded_input'], {}), '(encoded_input)\n', (12552, 12567), False, 'import numpy\n')] |
import math
import random
import matplotlib.pyplot as plt
import numpy as np
from collections import deque, namedtuple
from PIL import Image
from th10.game import TH10
from config import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
game = TH10()
memory = deque()
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(IMG_CHANNELS, 32, kernel_size=8, stride=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=8, stride=2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2)
self.bn2 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(128, 256, kernel_size=4, stride=2)
self.conv5 = nn.Conv2d(256, 512, kernel_size=4, stride=1)
self.fc1 = nn.Linear(2048, 512)
self.head = nn.Linear(512, NUM_OF_ACTIONS)
def forward(self, x):
x = self.bn1(F.relu(self.conv1(x)))
x = F.relu(self.conv2(x))
x = self.bn2(F.relu(self.conv3(x)))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = x.view(x.size(0), -1) # <-- flatten
x = F.relu(self.fc1(x))
x = self.head(x)
return x
class ToTensorWithoutScaling(object):
def __call__(self, picture):
return torch.ByteTensor(np.array(picture)).unsqueeze(0)
transform = T.Compose([T.Grayscale(), T.Resize((TRANSFORM_HEIGHT, TRANSFORM_WIDTH)), ToTensorWithoutScaling()])
def transform_state(single_state):
# PIL -> Grayscale -> Resize -> ToTensor
single_state = transform(single_state)
single_state = single_state.unsqueeze(0)
single_state = single_state.to(device, dtype=torch.float)
return single_state
if __name__ == '__main__':
policy_net = DQN().to(device)
policy_net.load_state_dict(torch.load(f'./weights_{NUM_OF_ACTIONS}'))
target_net = DQN().to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
"""
test_input = torch.rand(1, 4, 128, 128).to(device, dtype=torch.float)
policy_net(test_input)
"""
optimizer = optim.Adam(policy_net.parameters())
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'is_terminal'))
state, _, _ = game.play(-1)
state = transform_state(state)
state = torch.cat((state, state, state, state), 1)
steps = 0
while True:
loss = 0
train_q = torch.tensor([0])
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps / EPS_DECAY)
if random.random() <= eps_threshold:
# choose a random action
action = torch.tensor([[random.randrange(NUM_OF_ACTIONS)]], device=device, dtype=torch.long)
q_val = 0
else:
# input a stack of 4 images, get the prediction
q = policy_net(state).max(1)
action = q[1].view(1, 1)
q_val = q[0].item()
next_state, reward, is_terminal = game.play(action.item())
if next_state is None:
continue
next_state = transform_state(next_state)
next_state = torch.cat((next_state, state[:, :3]), 1)
reward = torch.tensor([reward], device=device, dtype=torch.float)
is_terminal = torch.tensor([is_terminal], device=device, dtype=torch.uint8)
'''
We need enough states in our experience replay deque so that we can take a random sample from it of the size we declared.
Therefore we wait until a certain number and observe the environment until we're ready.
'''
memory.append((state, action, next_state, reward, is_terminal))
if len(memory) > EXP_REPLAY_MEMORY:
memory.popleft()
# Optimize
if len(memory) > BATCH_SIZE:
# Batches
transitions = random.sample(memory, BATCH_SIZE)
batch = Transition(*zip(*transitions))
# Current results
state_batch = torch.cat(batch.state)
next_state_batch = torch.cat(batch.next_state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
is_terminal_batch = torch.cat(batch.is_terminal)
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Non-final next state
non_final_mask = is_terminal_batch == 0
non_final_next_states = next_state_batch[non_final_mask]
# Non-final next state reward
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
# (current state reward) + (next state reward) * gamma
expected_state_action_values = reward_batch + (next_state_values * GAMMA)
train_q = expected_state_action_values
# Optimize with mean squared error
# loss = F.mse_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize with Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
state = next_state
steps += 1
if steps % STEPS_SAVE == 0:
torch.save(policy_net.state_dict(), f'./weights_{NUM_OF_ACTIONS}')
if steps % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
'''
img = state[0, 0:3]
img = img.data.cpu().numpy()
img = img.transpose((1, 2, 0))
plt.imshow(img)
plt.savefig(f'steps/{steps}.png')
'''
print("Timestep: %d, Action: %d, Reward: %.2f, q: %.2f, train_q_min: %.2f, train_q_max: %.2f, Loss: %.2f" %
(steps, action.item(), reward.item(), q_val, torch.min(train_q), torch.max(train_q), loss))
| [
"torch.max",
"torchvision.transforms.Grayscale",
"torch.min",
"numpy.array",
"torch.cuda.is_available",
"math.exp",
"torch.nn.BatchNorm2d",
"collections.deque",
"random.sample",
"collections.namedtuple",
"random.randrange",
"torchvision.transforms.Resize",
"torch.cat",
"torch.load",
"tor... | [((400, 406), 'th10.game.TH10', 'TH10', ([], {}), '()\n', (404, 406), False, 'from th10.game import TH10\n'), ((416, 423), 'collections.deque', 'deque', ([], {}), '()\n', (421, 423), False, 'from collections import deque, namedtuple\n'), ((2297, 2385), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward', 'is_terminal')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward',\n 'is_terminal'))\n", (2307, 2385), False, 'from collections import deque, namedtuple\n'), ((2463, 2505), 'torch.cat', 'torch.cat', (['(state, state, state, state)', '(1)'], {}), '((state, state, state, state), 1)\n', (2472, 2505), False, 'import torch\n'), ((355, 380), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (378, 380), False, 'import torch\n'), ((529, 581), 'torch.nn.Conv2d', 'nn.Conv2d', (['IMG_CHANNELS', '(32)'], {'kernel_size': '(8)', 'stride': '(2)'}), '(IMG_CHANNELS, 32, kernel_size=8, stride=2)\n', (538, 581), True, 'import torch.nn as nn\n'), ((601, 619), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (615, 619), True, 'import torch.nn as nn\n'), ((641, 683), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(8)', 'stride': '(2)'}), '(32, 64, kernel_size=8, stride=2)\n', (650, 683), True, 'import torch.nn as nn\n'), ((706, 749), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(4)', 'stride': '(2)'}), '(64, 128, kernel_size=4, stride=2)\n', (715, 749), True, 'import torch.nn as nn\n'), ((769, 788), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (783, 788), True, 'import torch.nn as nn\n'), ((810, 854), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(4)', 'stride': '(2)'}), '(128, 256, kernel_size=4, stride=2)\n', (819, 854), True, 'import torch.nn as nn\n'), ((876, 920), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(4)', 'stride': '(1)'}), '(256, 512, kernel_size=4, stride=1)\n', (885, 920), True, 'import torch.nn as nn\n'), ((941, 961), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(512)'], {}), '(2048, 512)\n', (950, 961), True, 'import torch.nn as nn\n'), ((982, 1012), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'NUM_OF_ACTIONS'], {}), '(512, NUM_OF_ACTIONS)\n', (991, 1012), True, 'import torch.nn as nn\n'), ((1517, 1530), 'torchvision.transforms.Grayscale', 'T.Grayscale', ([], {}), '()\n', (1528, 1530), True, 'import torchvision.transforms as T\n'), ((1532, 1577), 'torchvision.transforms.Resize', 'T.Resize', (['(TRANSFORM_HEIGHT, TRANSFORM_WIDTH)'], {}), '((TRANSFORM_HEIGHT, TRANSFORM_WIDTH))\n', (1540, 1577), True, 'import torchvision.transforms as T\n'), ((1956, 1997), 'torch.load', 'torch.load', (['f"""./weights_{NUM_OF_ACTIONS}"""'], {}), "(f'./weights_{NUM_OF_ACTIONS}')\n", (1966, 1997), False, 'import torch\n'), ((2572, 2589), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (2584, 2589), False, 'import torch\n'), ((3266, 3306), 'torch.cat', 'torch.cat', (['(next_state, state[:, :3])', '(1)'], {}), '((next_state, state[:, :3]), 1)\n', (3275, 3306), False, 'import torch\n'), ((3324, 3380), 'torch.tensor', 'torch.tensor', (['[reward]'], {'device': 'device', 'dtype': 'torch.float'}), '([reward], device=device, dtype=torch.float)\n', (3336, 3380), False, 'import torch\n'), ((3403, 3464), 'torch.tensor', 'torch.tensor', (['[is_terminal]'], {'device': 'device', 'dtype': 'torch.uint8'}), '([is_terminal], device=device, dtype=torch.uint8)\n', (3415, 3464), False, 'import torch\n'), ((2694, 2709), 'random.random', 'random.random', ([], {}), '()\n', (2707, 2709), False, 'import random\n'), ((3965, 3998), 'random.sample', 'random.sample', (['memory', 'BATCH_SIZE'], {}), '(memory, BATCH_SIZE)\n', (3978, 3998), False, 'import random\n'), ((4107, 4129), 'torch.cat', 'torch.cat', (['batch.state'], {}), '(batch.state)\n', (4116, 4129), False, 'import torch\n'), ((4161, 4188), 'torch.cat', 'torch.cat', (['batch.next_state'], {}), '(batch.next_state)\n', (4170, 4188), False, 'import torch\n'), ((4216, 4239), 'torch.cat', 'torch.cat', (['batch.action'], {}), '(batch.action)\n', (4225, 4239), False, 'import torch\n'), ((4267, 4290), 'torch.cat', 'torch.cat', (['batch.reward'], {}), '(batch.reward)\n', (4276, 4290), False, 'import torch\n'), ((4323, 4351), 'torch.cat', 'torch.cat', (['batch.is_terminal'], {}), '(batch.is_terminal)\n', (4332, 4351), False, 'import torch\n'), ((4666, 4704), 'torch.zeros', 'torch.zeros', (['BATCH_SIZE'], {'device': 'device'}), '(BATCH_SIZE, device=device)\n', (4677, 4704), False, 'import torch\n'), ((2649, 2683), 'math.exp', 'math.exp', (['(-1.0 * steps / EPS_DECAY)'], {}), '(-1.0 * steps / EPS_DECAY)\n', (2657, 2683), False, 'import math\n'), ((1460, 1477), 'numpy.array', 'np.array', (['picture'], {}), '(picture)\n', (1468, 1477), True, 'import numpy as np\n'), ((6161, 6179), 'torch.min', 'torch.min', (['train_q'], {}), '(train_q)\n', (6170, 6179), False, 'import torch\n'), ((6181, 6199), 'torch.max', 'torch.max', (['train_q'], {}), '(train_q)\n', (6190, 6199), False, 'import torch\n'), ((2801, 2833), 'random.randrange', 'random.randrange', (['NUM_OF_ACTIONS'], {}), '(NUM_OF_ACTIONS)\n', (2817, 2833), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
eTOX ALLIES Applicability Domain Analysis
As described in:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>. and <NAME>. "eTOX ALLIES: an automated pipeLine for linear
interaction energy-based simulations" Journal of Cheminformatics, 2017, 9, 58.
http://doi.org/10.1186/s13321-017-0243-x
"""
import pandas
import re
import logging
import numpy
from sklearn.covariance import EmpiricalCovariance
from pylie.model.liemdframe import LIEMDFrame
def parse_gromacs_decomp(ene_file, parse_rest=True):
"""
Parse Gromacs per residue decomposition file into a pandas DataFrame
and calculate trajectory average energy values
:param ene_file: Gromacs per-residue energy decomposition file
:type ene_file: :py:file-like object
:param parse_rest: parse the residual column (*-rest-*)
:type parse_rest: :py:bool
:return: pandas DataFrame
:rtype: :pandas:DataFrame
"""
# Import file
df = pandas.read_csv(ene_file, delim_whitespace=True)
# remove the # symbol from the columns
decomp = LIEMDFrame(df)
# Select columns containing residue numbers
residue_cols = decomp.get_columns('(\D+)?[0-9]+(\D+)?', regex=True)
# Parse the residuals column
if parse_rest:
residue_cols.extend(decomp.get_columns('(.*?)rest(.*?)', regex=True, flags=re.I))
# Select residue containing columns and calculate average
decomp = decomp[residue_cols]
select = decomp.mean()
# Reformat DataFrame, VdW and Elec in seperate columns
vdw_col = decomp.get_columns('(.*?)vdw(.*?)', regex=True, flags=re.I)
ele_col = decomp.get_columns('(.*?)ele(.*?)', regex=True, flags=re.I)
r = re.compile('\D')
numbers = [r.sub('', n) for n in ele_col]
return pandas.DataFrame({'residues': numbers, 'vdw': select[vdw_col].values, 'ele': select[ele_col].values})
def ad_residue_decomp(decomp_df_list, pca_vdw, pca_ele, cases=None):
"""
eTOX ALLIES per-residue energy decomposition AD analysis
- Note:
The `pca_*` argument needs to be a dictionary of a trained eTOX ALLIES
PCA model for the VdW and Elec component of the residue decomposition
energie profile.
:param decomp_df_list: list of DataFrames with average per-residue
decomposition energy values
:type decomp_df_list: :py:list
:param pca_vdw: VdW principle component model based on training set
:type pca_vdw: :py:dict
:param pca_ele: Ele principle component model based on training set
:type pca_ele: :py:dict
:param cases: List of case ID's
:type cases: :py:list
:return: decomposition AD test results
:rtype: :pandas:DataFrame
"""
# Create DataFrame to hold results
if cases is None or not len(cases):
cases = range(1, len(decomp_df_list) + 1)
assert len(cases) == len(decomp_df_list), AssertionError('Number of cases does not match number of data sets')
results = pandas.DataFrame({'cases': cases})
# PCA based decomposition AD analysis
columns = ('vdw', 'ele')
for i, pca in enumerate((pca_vdw, pca_ele)):
data_collection = []
for df in decomp_df_list:
data_collection.append(df[columns[i]].values)
if 'scale_' not in dir(pca['scaler']):
pca['scaler'].scale_ = None
if 'std_' not in dir(pca['scaler']):
pca['scaler'].std_ = None
x_sc = pca['scaler'].transform(numpy.array(data_collection))
p = pca['pca'].components_[:pca['n_pc']]
transform = pca['pca'].transform(x_sc)
transform = numpy.array(transform)[:, :pca['n_pc']]
sd = numpy.sqrt(numpy.sum(numpy.divide(transform**2, pca['sdev']**2), axis=1))
od = numpy.sqrt(numpy.sum(numpy.subtract(numpy.dot(transform, p), x_sc)**2, axis=1))
results['{0}_sd'.format(columns[i])] = sd
results['{0}_od'.format(columns[i])] = od
# Calculate applicability domain CI value
results['{0}_CI'.format(columns[i])] = ((results['{0}_od'.format(columns[i])] > pca['critOD']) | (
results['{0}_sd'.format(columns[i])] > pca['critSD'])).astype(int)
return results
def ad_dene(ene_df, cov, center=None, ci_cutoff=None, columns=['w_vdw', 'w_coul']):
"""
eTOX ALLIES deltaG VdW/Elec energy applicability domain analysis
Calculates the Mahalanobis distance for a set of deltaG VdW/Elec energy
values with respect to the distribution in the training set.
Requires a pandas DataFrame with a VdW and Elec column in it and
returns the frame with two new columns addad to it:
* mahal: the Mahalanobis distance with respect to the training set
* CI: the result of the AD test if a cutoff value is provided.
If the Mahalanobis distance is smaller then the cutoff
a 0 is listed (test passed) else 1.
- Note:
The `cov` argument needs to be a Sklearn EmpiricalCovariance instance that
is pre-trained using the training data.
:param ene_df: pandas DataFrame with energy values
:type ene_df: :pandas:DataFrame
:param cov: the emperical covariance matrix used to calculate the
Mahalanobis distance
:type cov: :sklearn:covariance:EmpiricalCovariance
:param center: Center each VdW/Elec value pair by subtracting a fixed
[VdW, Elec] value pair from it.
:type center: :py:list
:param ci_cutoff: The maximum Mahalanobis distance used as cutoff value for
the applicability domain test
:type ci_cutoff: :py:float
:param columns: pandas DataFrame column names for the VdW and Elec columns
:type columns: :py:list
:return: Mahalanobis distance and AD test results
:rtype: :pandas:DataFrame
"""
# Check if VdW and Elec are present in the DataFrame
assert set(columns).issubset(set(ene_df.columns)),\
KeyError('Energy DataFrame has no columns: {0}'.format(', '.join(columns)))
# Center data if needed
if center:
assert len(center) == 2, ValueError('Center should be list of length 2')
ene_df[columns] = ene_df[columns] - center
# Calculate Mahalanobis distance
assert isinstance(cov, EmpiricalCovariance),\
AssertionError('cov not of type EmpiricalCovariance: {0}'.format(type(cov)))
ene_df['mahal'] = cov.mahalanobis(ene_df[columns])
# Calculate applicability domain CI value if ci_cutoff defined
if ci_cutoff:
ene_df['CI'] = (ene_df['mahal'] >= ci_cutoff).astype(int)
logging.info('DeltaG AD analysis with cutoff {0}'.format(ci_cutoff))
return ene_df
def ad_dene_yrange(ene_df, ymin, ymax, column='dg_calc'):
"""
eTOX ALLIES deltaG range applicability domain test
Evaluates rather the calculated deltaG value is with a range defined
by `ymin` and `ymax`. Adds a CI column to the DataFrame with the test
results.
:param ene_df: pandas DataFrame with energy values
:type ene_df: :pandas:DataFrame
:param ymin: lower value for deltaG range
:type ymin: :py:float
:param ymax: upper value for deltaG range
:type ymax: :py:float
:param column: pandas DataFrame column name for deltaG
:type column: :py:str
:return: AD test results
:rtype: :pandas:DataFrame
"""
# Check if calculated deltaG column in DataFrame
assert column in ene_df.columns, KeyError('DataFrame contains no {0} column'.format(column))
ene_df['CI'] = ((ene_df[column] > ymin) & (ene_df[column] < ymax)).astype(int)
logging.info('DeltaG distribution AD analysis between {0} - {1}'.format(ymin, ymax))
return ene_df
| [
"pylie.model.liemdframe.LIEMDFrame",
"pandas.read_csv",
"re.compile",
"numpy.array",
"numpy.dot",
"pandas.DataFrame",
"numpy.divide"
] | [((988, 1036), 'pandas.read_csv', 'pandas.read_csv', (['ene_file'], {'delim_whitespace': '(True)'}), '(ene_file, delim_whitespace=True)\n', (1003, 1036), False, 'import pandas\n'), ((1094, 1108), 'pylie.model.liemdframe.LIEMDFrame', 'LIEMDFrame', (['df'], {}), '(df)\n', (1104, 1108), False, 'from pylie.model.liemdframe import LIEMDFrame\n'), ((1714, 1731), 're.compile', 're.compile', (['"""\\\\D"""'], {}), "('\\\\D')\n", (1724, 1731), False, 'import re\n'), ((1789, 1894), 'pandas.DataFrame', 'pandas.DataFrame', (["{'residues': numbers, 'vdw': select[vdw_col].values, 'ele': select[ele_col]\n .values}"], {}), "({'residues': numbers, 'vdw': select[vdw_col].values, 'ele':\n select[ele_col].values})\n", (1805, 1894), False, 'import pandas\n'), ((3059, 3093), 'pandas.DataFrame', 'pandas.DataFrame', (["{'cases': cases}"], {}), "({'cases': cases})\n", (3075, 3093), False, 'import pandas\n'), ((3548, 3576), 'numpy.array', 'numpy.array', (['data_collection'], {}), '(data_collection)\n', (3559, 3576), False, 'import numpy\n'), ((3694, 3716), 'numpy.array', 'numpy.array', (['transform'], {}), '(transform)\n', (3705, 3716), False, 'import numpy\n'), ((3769, 3815), 'numpy.divide', 'numpy.divide', (['(transform ** 2)', "(pca['sdev'] ** 2)"], {}), "(transform ** 2, pca['sdev'] ** 2)\n", (3781, 3815), False, 'import numpy\n'), ((3871, 3894), 'numpy.dot', 'numpy.dot', (['transform', 'p'], {}), '(transform, p)\n', (3880, 3894), False, 'import numpy\n')] |
import numpy as np
import argparse
import imutils
import sys
import cv2
from utils.recorder import Recorder
import os
from detectors.get_path import getPath
ap = argparse.ArgumentParser()
ap.add_argument("--video", type=str, default="",
help="optional path to video file")
args = vars(ap.parse_args())
recorder = Recorder()
labelsPath = getPath("kinetics-400/action_recognition_kinetics.txt")
CLASSES = open(labelsPath).read().strip().split("\n")
SAMPLE_DURATION = 16
SAMPLE_SIZE = 112
print("[INFO] loading human activity recognition model...")
net = cv2.dnn.readNet(getPath("kinetics-400/resnet-34_kinetics.onnx"))
print("[INFO] accessing video stream...")
video = cv2.VideoCapture(args["video"] if args["video"] else 1)
while True:
frames = []
for i in range(0, SAMPLE_DURATION):
(grabbed, frame) = video.read()
if not grabbed:
print("[INFO] no frame read from stream - exiting")
sys.exit(0)
frame = imutils.resize(frame, width=400)
frames.append(frame)
blob = cv2.dnn.blobFromImages(frames, 1.0,
(SAMPLE_SIZE, SAMPLE_SIZE), (114.7748, 107.7354, 99.4750),
swapRB=True, crop=True)
blob = np.transpose(blob, (1, 0, 2, 3))
blob = np.expand_dims(blob, axis=0)
net.setInput(blob)
outputs = net.forward()
label = CLASSES[np.argmax(outputs)]
for frame in frames:
cv2.rectangle(frame, (0, 0), (300, 40), (0, 0, 0), -1)
cv2.putText(frame, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255), 2)
recorder.record(frame, os.path.join(os.path.dirname(__file__), '../benchmarks/video.kinetics.avi'))
cv2.imshow("Activity Recognition", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
recorder.stop()
cv2.destroyAllWindows()
break | [
"cv2.rectangle",
"argparse.ArgumentParser",
"utils.recorder.Recorder",
"numpy.argmax",
"cv2.dnn.blobFromImages",
"cv2.putText",
"cv2.imshow",
"imutils.resize",
"os.path.dirname",
"detectors.get_path.getPath",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"numpy.expand_dims",
"sys.exit",
"... | [((164, 189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (187, 189), False, 'import argparse\n'), ((317, 327), 'utils.recorder.Recorder', 'Recorder', ([], {}), '()\n', (325, 327), False, 'from utils.recorder import Recorder\n'), ((342, 397), 'detectors.get_path.getPath', 'getPath', (['"""kinetics-400/action_recognition_kinetics.txt"""'], {}), "('kinetics-400/action_recognition_kinetics.txt')\n", (349, 397), False, 'from detectors.get_path import getPath\n'), ((674, 729), 'cv2.VideoCapture', 'cv2.VideoCapture', (["(args['video'] if args['video'] else 1)"], {}), "(args['video'] if args['video'] else 1)\n", (690, 729), False, 'import cv2\n'), ((574, 621), 'detectors.get_path.getPath', 'getPath', (['"""kinetics-400/resnet-34_kinetics.onnx"""'], {}), "('kinetics-400/resnet-34_kinetics.onnx')\n", (581, 621), False, 'from detectors.get_path import getPath\n'), ((1003, 1125), 'cv2.dnn.blobFromImages', 'cv2.dnn.blobFromImages', (['frames', '(1.0)', '(SAMPLE_SIZE, SAMPLE_SIZE)', '(114.7748, 107.7354, 99.475)'], {'swapRB': '(True)', 'crop': '(True)'}), '(frames, 1.0, (SAMPLE_SIZE, SAMPLE_SIZE), (114.7748, \n 107.7354, 99.475), swapRB=True, crop=True)\n', (1025, 1125), False, 'import cv2\n'), ((1134, 1166), 'numpy.transpose', 'np.transpose', (['blob', '(1, 0, 2, 3)'], {}), '(blob, (1, 0, 2, 3))\n', (1146, 1166), True, 'import numpy as np\n'), ((1175, 1203), 'numpy.expand_dims', 'np.expand_dims', (['blob'], {'axis': '(0)'}), '(blob, axis=0)\n', (1189, 1203), True, 'import numpy as np\n'), ((938, 970), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (952, 970), False, 'import imutils\n'), ((1269, 1287), 'numpy.argmax', 'np.argmax', (['outputs'], {}), '(outputs)\n', (1278, 1287), True, 'import numpy as np\n'), ((1314, 1368), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(0, 0)', '(300, 40)', '(0, 0, 0)', '(-1)'], {}), '(frame, (0, 0), (300, 40), (0, 0, 0), -1)\n', (1327, 1368), False, 'import cv2\n'), ((1371, 1462), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(10, 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(2)'], {}), '(frame, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, \n 255, 255), 2)\n', (1382, 1462), False, 'import cv2\n'), ((1567, 1608), 'cv2.imshow', 'cv2.imshow', (['"""Activity Recognition"""', 'frame'], {}), "('Activity Recognition', frame)\n", (1577, 1608), False, 'import cv2\n'), ((912, 923), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (920, 923), False, 'import sys\n'), ((1617, 1631), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1628, 1631), False, 'import cv2\n'), ((1684, 1707), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1705, 1707), False, 'import cv2\n'), ((1500, 1525), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1515, 1525), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for psychrometric_calculations PhaseChangeLevel."""
import unittest
import iris
import numpy as np
import pytest
from cf_units import Unit
from iris.cube import CubeList
from iris.tests import IrisTest
from improver.psychrometric_calculations.psychrometric_calculations import (
PhaseChangeLevel,
)
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_variable_cube,
)
from improver.utilities.cube_manipulation import sort_coord_in_cube
class Test__init__(IrisTest):
"""Test the init method."""
def test_snow_sleet(self):
"""Test that the __init__ method configures the plugin as expected
for the snow-sleet phase change."""
phase_change = "snow-sleet"
plugin = PhaseChangeLevel(phase_change, grid_point_radius=3)
self.assertEqual(plugin.falling_level_threshold, 90.0)
self.assertEqual(plugin.phase_change_name, "snow_falling")
self.assertEqual(plugin.grid_point_radius, 3)
def test_sleet_rain(self):
"""Test that the __init__ method configures the plugin as expected
for the sleet_rain phase change."""
phase_change = "sleet-rain"
plugin = PhaseChangeLevel(phase_change, grid_point_radius=3)
self.assertEqual(plugin.falling_level_threshold, 202.5)
self.assertEqual(plugin.phase_change_name, "rain_falling")
self.assertEqual(plugin.grid_point_radius, 3)
def test_unknown_phase_change(self):
"""Test that the __init__ method raised an exception for an unknown
phase change argument."""
phase_change = "kittens-puppies"
msg = (
"Unknown phase change 'kittens-puppies' requested.\n"
"Available options are: snow-sleet, sleet-rain"
)
with self.assertRaisesRegex(ValueError, msg):
PhaseChangeLevel(phase_change)
class Test__repr__(IrisTest):
"""Test the repr method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
result = str(PhaseChangeLevel(phase_change="snow-sleet"))
msg = (
"<PhaseChangeLevel: "
"falling_level_threshold:90.0,"
" grid_point_radius: 2>"
)
self.assertEqual(result, msg)
class Test_find_falling_level(IrisTest):
"""Test the find_falling_level method."""
def setUp(self):
"""Set up arrays."""
pytest.importorskip("stratify")
self.wb_int_data = np.array(
[
[[80.0, 80.0], [70.0, 50.0]],
[[90.0, 100.0], [80.0, 60.0]],
[[100.0, 110.0], [90.0, 100.0]],
]
)
self.orog_data = np.array([[0.0, 0.0], [5.0, 3.0]])
self.height_points = np.array([5.0, 10.0, 20.0])
def test_basic(self):
"""Test method returns an array with correct data"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
expected = np.array([[10.0, 7.5], [25.0, 20.5]])
result = plugin.find_falling_level(
self.wb_int_data, self.orog_data, self.height_points
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
def test_outside_range(self):
"""Test method returns an nan if data outside range"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
wb_int_data = self.wb_int_data
wb_int_data[2, 1, 1] = 70.0
result = plugin.find_falling_level(
wb_int_data, self.orog_data, self.height_points
)
self.assertTrue(np.isnan(result[1, 1]))
class Test_fill_in_high_phase_change_falling_levels(IrisTest):
"""Test the fill_in_high_phase_change_falling_levels method."""
def setUp(self):
""" Set up arrays for testing."""
self.phase_change_level_data = np.array(
[[1.0, 1.0, 2.0], [1.0, np.nan, 2.0], [1.0, 2.0, 2.0]]
)
self.phase_change_data_no_interp = np.array(
[[np.nan, np.nan, np.nan], [1.0, np.nan, 2.0], [1.0, 2.0, np.nan]]
)
self.orog = np.ones((3, 3))
self.highest_wb_int = np.ones((3, 3))
self.highest_height = 300.0
def test_basic(self):
"""Test fills in missing data with orography + highest height"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
self.highest_wb_int[1, 1] = 100.0
expected = np.array([[1.0, 1.0, 2.0], [1.0, 301.0, 2.0], [1.0, 2.0, 2.0]])
plugin.fill_in_high_phase_change_falling_levels(
self.phase_change_level_data,
self.orog,
self.highest_wb_int,
self.highest_height,
)
self.assertArrayEqual(self.phase_change_level_data, expected)
def test_no_fill_if_conditions_not_met(self):
"""Test it doesn't fill in NaN if the heighest wet bulb integral value
is less than the threshold."""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
expected = np.array([[1.0, 1.0, 2.0], [1.0, np.nan, 2.0], [1.0, 2.0, 2.0]])
plugin.fill_in_high_phase_change_falling_levels(
self.phase_change_level_data,
self.orog,
self.highest_wb_int,
self.highest_height,
)
self.assertArrayEqual(self.phase_change_level_data, expected)
class Test_linear_wet_bulb_fit(IrisTest):
"""Test the linear_wet_bulb_fit method."""
def setUp(self):
"""
Set up arrays for testing.
Set up a wet bulb temperature array with a linear trend near sea
level. Some of the straight line fits of wet bulb temperature will
cross the height axis above zero and some below.
"""
data = np.ones((5, 3, 3)) * -0.8
self.heights = np.array([5, 10, 20, 30, 50])
for i in range(5):
data[i] = data[i] * self.heights[i]
data[:, :, 0] = data[:, :, 0] - 10
data[:, :, 2] = data[:, :, 2] + 20
self.wet_bulb_temperature = data
self.sea_points = np.array(
[[True, True, True], [False, False, False], [True, True, True]]
)
self.expected_gradients = np.array(
[[-0.8, -0.8, -0.8], [0.0, 0.0, 0.0], [-0.8, -0.8, -0.8]]
)
self.expected_intercepts = np.array(
[[-10, 0.0, 20.0], [0.0, 0.0, 0.0], [-10, 0.0, 20.0]]
)
def test_basic(self):
"""Test we find the correct gradient and intercepts for simple case"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
gradients, intercepts = plugin.linear_wet_bulb_fit(
self.wet_bulb_temperature, self.heights, self.sea_points
)
self.assertArrayAlmostEqual(self.expected_gradients, gradients)
self.assertArrayAlmostEqual(self.expected_intercepts, intercepts)
def test_land_points(self):
"""Test it returns arrays of zeros if points are land."""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
sea_points = np.ones((3, 3)) * False
gradients, intercepts = plugin.linear_wet_bulb_fit(
self.wet_bulb_temperature, self.heights, sea_points
)
self.assertArrayAlmostEqual(np.zeros((3, 3)), gradients)
self.assertArrayAlmostEqual(np.zeros((3, 3)), intercepts)
class Test_find_extrapolated_falling_level(IrisTest):
"""Test the find_extrapolated_falling_level method."""
def setUp(self):
"""
Set up arrays for testing.
Set up a wet bulb temperature array with a linear trend near sea
level. Some of the straight line fits of wet bulb temperature will
cross the height axis above zero and some below.
"""
self.phase_change_level = np.ones((3, 3)) * np.nan
self.max_wb_integral = np.array(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [10.0, 10.0, 10.0]]
)
self.sea_points = np.array(
[[True, True, True], [False, False, False], [True, True, True]]
)
self.gradients = np.array(
[[-0.8, -0.8, -0.8], [0.0, 0.0, 0.0], [-0.8, -0.8, -0.8]]
)
self.intercepts = np.array(
[[-10, 0.0, 20.0], [0.0, 0.0, 0.0], [-10, 0.0, 20.0]]
)
self.expected_phase_change_level = np.array(
[
[-27.5, -15.0, -4.154759],
[np.nan, np.nan, np.nan],
[-26.642136, -14.142136, -3.722813],
]
)
def test_basic(self):
"""Test we fill in the correct snow falling levels for a simple case"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
plugin.find_extrapolated_falling_level(
self.max_wb_integral,
self.gradients,
self.intercepts,
self.phase_change_level,
self.sea_points,
)
self.assertArrayAlmostEqual(
self.expected_phase_change_level, self.phase_change_level
)
def test_gradients_zero(self):
"""Test we do nothing if all gradients are zero"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
gradients = np.zeros((3, 3))
plugin.find_extrapolated_falling_level(
self.max_wb_integral,
gradients,
self.intercepts,
self.phase_change_level,
self.sea_points,
)
expected_phase_change_level = np.ones((3, 3)) * np.nan
self.assertArrayAlmostEqual(
expected_phase_change_level, self.phase_change_level
)
class Test_fill_sea_points(IrisTest):
"""Test the fill_in_sea_points method."""
def setUp(self):
""" Set up arrays for testing."""
self.phase_change_level = np.ones((3, 3)) * np.nan
self.max_wb_integral = np.array(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [10.0, 10.0, 10.0]]
)
self.land_sea = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
data = np.ones((5, 3, 3)) * -0.8
self.heights = np.array([5, 10, 20, 30, 50])
for i in range(5):
data[i] = data[i] * self.heights[i]
data[:, :, 0] = data[:, :, 0] - 10
data[:, :, 2] = data[:, :, 2] + 20
self.wet_bulb_temperature = data
self.expected_phase_change_level = np.array(
[
[-27.5, -15.0, -4.154759],
[np.nan, np.nan, np.nan],
[-26.642136, -14.142136, -3.722813],
]
)
def test_basic(self):
"""Test it fills in the points it's meant to."""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
plugin.fill_in_sea_points(
self.phase_change_level,
self.land_sea,
self.max_wb_integral,
self.wet_bulb_temperature,
self.heights,
)
self.assertArrayAlmostEqual(
self.phase_change_level.data, self.expected_phase_change_level
)
def test_no_sea(self):
"""Test it only fills in sea points, and ignores a land point"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
expected = np.ones((3, 3)) * np.nan
land_sea = np.ones((3, 3))
plugin.fill_in_sea_points(
self.phase_change_level,
land_sea,
self.max_wb_integral,
self.wet_bulb_temperature,
self.heights,
)
self.assertArrayAlmostEqual(self.phase_change_level.data, expected)
def test_all_above_threshold(self):
"""Test it doesn't change points that are all above the threshold"""
plugin = PhaseChangeLevel(phase_change="snow-sleet")
self.max_wb_integral[0, 1] = 100
self.phase_change_level[0, 1] = 100
self.expected_phase_change_level[0, 1] = 100
plugin.fill_in_sea_points(
self.phase_change_level,
self.land_sea,
self.max_wb_integral,
self.wet_bulb_temperature,
self.heights,
)
self.assertArrayAlmostEqual(
self.phase_change_level.data, self.expected_phase_change_level
)
class Test_find_max_in_nbhood_orography(IrisTest):
"""Test the find_max_in_nbhood_orography method"""
def setUp(self):
"""Set up a cube with x and y coordinates"""
data = np.array(
[
[0, 10, 20, 5, 0],
[0, 50, 20, 5, 0],
[0, 80, 90, 0, 0],
[0, 20, 5, 10, 0],
[0, 5, 10, 10, 0],
]
)
self.cube = set_up_variable_cube(
data,
name="orographic_height",
units="m",
spatial_grid="equalarea",
grid_spacing=2000.0,
)
self.expected_data = [
[50, 50, 50, 20, 5],
[80, 90, 90, 90, 5],
[80, 90, 90, 90, 10],
[80, 90, 90, 90, 10],
[20, 20, 20, 10, 10],
]
self.cube_latlon = set_up_variable_cube(
self.cube.data,
name="orographic_height",
units="m",
spatial_grid="latlon",
grid_spacing=0.01,
)
def test_basic(self):
"""Test the function does what it's meant to in a simple case."""
plugin = PhaseChangeLevel(phase_change="snow-sleet", grid_point_radius=1)
result = plugin.find_max_in_nbhood_orography(self.cube)
self.assertArrayAlmostEqual(result.data, self.expected_data)
def test_null(self):
"""Test the function does nothing when radius is zero."""
plugin = PhaseChangeLevel(phase_change="snow-sleet", grid_point_radius=0)
expected_data = self.cube.data.copy()
result = plugin.find_max_in_nbhood_orography(self.cube)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_null_lat_lon(self):
"""Test the function succeeds and does nothing when radius is zero and grid is
lat-lon."""
cube = self.cube_latlon
plugin = PhaseChangeLevel(phase_change="snow-sleet", grid_point_radius=0)
expected_data = self.cube.data.copy()
result = plugin.find_max_in_nbhood_orography(cube)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_error_lat_lon(self):
"""Test the function fails when radius is not zero and grid is lat-lon."""
cube = self.cube_latlon
plugin = PhaseChangeLevel(phase_change="snow-sleet", grid_point_radius=1)
with self.assertRaisesRegex(
ValueError,
r"Unable to convert from 'Unit\('degrees'\)' to 'Unit\('metres'\)'.",
):
plugin.find_max_in_nbhood_orography(cube)
class Test_process(IrisTest):
"""Test the PhaseChangeLevel processing works"""
def setUp(self):
"""Set up orography and land-sea mask cubes. Also create temperature,
pressure, and relative humidity cubes that contain multiple height
levels."""
pytest.importorskip("stratify")
self.setup_cubes_for_process()
def setup_cubes_for_process(self, spatial_grid="equalarea"):
data = np.ones((5, 5), dtype=np.float32)
data[2, 2] = 100.0
self.orog = set_up_variable_cube(
data, name="surface_altitude", units="m", spatial_grid=spatial_grid
)
self.land_sea = set_up_variable_cube(
np.ones_like(data, dtype=np.int8),
name="land_binary_mask",
units=1,
spatial_grid=spatial_grid,
)
# Note the values below are ordered at [5, 195, 200] m.
wbt_0 = np.full_like(data, fill_value=271.46216)
wbt_0[2, 2] = 270.20343
wbt_1 = np.full_like(data, fill_value=274.4207)
wbt_1[2, 2] = 271.46216
wbt_2 = np.full_like(data, fill_value=275.0666)
wbt_2[2, 2] = 274.4207
wbt_data = np.array(
[
np.broadcast_to(wbt_0, (3, 5, 5)),
np.broadcast_to(wbt_1, (3, 5, 5)),
np.broadcast_to(wbt_2, (3, 5, 5)),
],
dtype=np.float32,
)
# Note the values below are ordered at [5, 195] m.
wbti_0 = np.full_like(data, fill_value=128.68324)
wbti_0[2, 2] = 3.1767120
wbti_0[1:4, 1:4] = 100.0
wbti_1 = np.full_like(data, fill_value=7.9681854)
wbti_1[2, 2] = 3.1767120
wbti_data = np.array(
[np.broadcast_to(wbti_0, (3, 5, 5)), np.broadcast_to(wbti_1, (3, 5, 5))],
dtype=np.float32,
)
height_points = [5.0, 195.0, 200.0]
height_attribute = {"positive": "up"}
wet_bulb_temperature = set_up_variable_cube(
data, spatial_grid=spatial_grid, name="wet_bulb_temperature"
)
wet_bulb_temperature = add_coordinate(
wet_bulb_temperature, [0, 1, 2], "realization"
)
self.wet_bulb_temperature_cube = add_coordinate(
wet_bulb_temperature,
height_points,
"height",
coord_units="m",
attributes=height_attribute,
)
self.wet_bulb_temperature_cube.data = wbt_data
# Note that the iris cubelist merge_cube operation sorts the coordinate
# being merged into ascending order. The cube created below is thus
# in the incorrect height order, i.e. [5, 195] instead of [195, 5].
# There is a function in the the PhaseChangeLevel plugin that ensures
# the height coordinate is in descending order. This is tested here by
# creating test cubes with both orders.
height_attribute = {"positive": "down"}
wet_bulb_integral = set_up_variable_cube(
data,
spatial_grid=spatial_grid,
name="wet_bulb_temperature_integral",
units="K m",
)
wet_bulb_integral = add_coordinate(wet_bulb_integral, [0, 1, 2], "realization")
self.wet_bulb_integral_cube_inverted = add_coordinate(
wet_bulb_integral,
height_points[0:2],
"height",
coord_units="m",
attributes=height_attribute,
)
self.wet_bulb_integral_cube_inverted.data = wbti_data
self.wet_bulb_integral_cube = sort_coord_in_cube(
self.wet_bulb_integral_cube_inverted, "height", descending=True
)
self.expected_snow_sleet = np.full(
(3, 5, 5), fill_value=66.88566, dtype=np.float32
)
self.expected_snow_sleet[:, 1:4, 1:4] = 26.645035
self.expected_snow_sleet[:, 2, 2] = 124.623375
def test_snow_sleet_phase_change(self):
"""Test that process returns a cube with the right name, units and
values. In this instance the phase change is from snow to sleet. The
returned level has three values, all above orography."""
result = PhaseChangeLevel(phase_change="snow-sleet").process(
CubeList(
[
self.wet_bulb_temperature_cube,
self.wet_bulb_integral_cube,
self.orog,
self.land_sea,
]
)
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.name(), "altitude_of_snow_falling_level")
self.assertEqual(result.units, Unit("m"))
self.assertArrayAlmostEqual(result.data, self.expected_snow_sleet)
if hasattr(result.data, "mask"):
self.assertFalse(result.data.mask.any())
def test_snow_sleet_phase_change_reorder_cubes(self):
"""Same test as test_snow_sleet_phase_change but the cubes are in a
different order"""
result = PhaseChangeLevel(phase_change="snow-sleet").process(
CubeList(
[
self.wet_bulb_integral_cube,
self.wet_bulb_temperature_cube,
self.orog,
self.land_sea,
]
)
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.name(), "altitude_of_snow_falling_level")
self.assertEqual(result.units, Unit("m"))
self.assertArrayAlmostEqual(result.data, self.expected_snow_sleet)
def test_sleet_rain_phase_change(self):
"""Test that process returns a cube with the right name, units and
values. In this instance the phase change is from sleet to rain. Note
that the wet bulb temperature integral values are doubled such that the
rain threshold is reached above the surface.
The result has an odd pattern of 49.178673 around the edge and at the centre
point with a value of 1 forming a ring around the centre point. This arises
because the input data are not entirely realistic in this case. The ring
[fdf8:f53e:61e4::18, 1::4] has a sleet-rain-phase-level below the orography (1 m) but the
centre point is an unrealistic point-hill of 100m which is interpolated
from the outer ring due to the grid_point_radius default value of 2."""
self.wet_bulb_integral_cube.data *= 2.0
result = PhaseChangeLevel(phase_change="sleet-rain").process(
CubeList(
[
self.wet_bulb_temperature_cube,
self.wet_bulb_integral_cube,
self.orog,
self.land_sea,
]
)
)
expected = np.full_like(
self.expected_snow_sleet, fill_value=49.178673, dtype=np.float32
)
expected[:, 1:4, 1:4] = 1.0
expected[:, 2, 2] = 49.178673
self.assertIsInstance(result, iris.cube.Cube)
if hasattr(result.data, "mask"):
self.assertFalse(result.data.mask.any())
self.assertEqual(result.name(), "altitude_of_rain_falling_level")
self.assertEqual(result.units, Unit("m"))
self.assertArrayAlmostEqual(result.data, expected)
def test_inverted_input_cube(self):
"""Test that the phase change level process returns a cube
containing the expected data when the height coordinate is in
ascending order rather than the expected descending order."""
result = PhaseChangeLevel(phase_change="snow-sleet").process(
CubeList(
[
self.wet_bulb_temperature_cube,
self.wet_bulb_integral_cube,
self.orog,
self.land_sea,
]
)
)
self.assertArrayAlmostEqual(result.data, self.expected_snow_sleet)
def test_interpolation_from_sea_points(self):
"""Test that the phase change level process returns a cube
containing the expected data. In this case there is a single
non-sea-level point in the orography. The snow falling level is below
the surface of the sea, so for the single high point falling level is
interpolated from the surrounding sea-level points."""
orog = self.orog
orog.data = np.zeros_like(orog.data)
orog.data[2, 2] = 100.0
land_sea = self.land_sea
land_sea.data[1, 1] = 1
result = PhaseChangeLevel(
phase_change="snow-sleet", grid_point_radius=1
).process(
CubeList(
[
self.wet_bulb_temperature_cube,
self.wet_bulb_integral_cube,
orog,
land_sea,
]
)
)
expected = self.expected_snow_sleet - 1
expected[:, 2, 2] += 1
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayAlmostEqual(result.data, expected)
def test_too_many_cubes(self):
"""Tests that an error is raised if there are too many cubes."""
msg = "Expected 4"
with self.assertRaisesRegex(ValueError, msg):
PhaseChangeLevel(phase_change="snow-sleet").process(
CubeList(
[
self.wet_bulb_temperature_cube,
self.wet_bulb_integral_cube,
self.orog,
self.land_sea,
self.orog,
]
)
)
def test_empty_cube_list(self):
"""Tests that an error is raised if there is an empty list."""
msg = "Expected 4"
with self.assertRaisesRegex(ValueError, msg):
PhaseChangeLevel(phase_change="snow-sleet").process(CubeList([]))
if __name__ == "__main__":
unittest.main()
| [
"numpy.ones_like",
"numpy.broadcast_to",
"numpy.ones",
"numpy.full_like",
"improver.synthetic_data.set_up_test_cubes.set_up_variable_cube",
"iris.cube.CubeList",
"improver.synthetic_data.set_up_test_cubes.add_coordinate",
"cf_units.Unit",
"improver.psychrometric_calculations.psychrometric_calculatio... | [((26262, 26277), 'unittest.main', 'unittest.main', ([], {}), '()\n', (26275, 26277), False, 'import unittest\n'), ((2424, 2475), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', (['phase_change'], {'grid_point_radius': '(3)'}), '(phase_change, grid_point_radius=3)\n', (2440, 2475), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((2866, 2917), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', (['phase_change'], {'grid_point_radius': '(3)'}), '(phase_change, grid_point_radius=3)\n', (2882, 2917), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((4100, 4131), 'pytest.importorskip', 'pytest.importorskip', (['"""stratify"""'], {}), "('stratify')\n", (4119, 4131), False, 'import pytest\n'), ((4159, 4268), 'numpy.array', 'np.array', (['[[[80.0, 80.0], [70.0, 50.0]], [[90.0, 100.0], [80.0, 60.0]], [[100.0, \n 110.0], [90.0, 100.0]]]'], {}), '([[[80.0, 80.0], [70.0, 50.0]], [[90.0, 100.0], [80.0, 60.0]], [[\n 100.0, 110.0], [90.0, 100.0]]])\n', (4167, 4268), True, 'import numpy as np\n'), ((4375, 4409), 'numpy.array', 'np.array', (['[[0.0, 0.0], [5.0, 3.0]]'], {}), '([[0.0, 0.0], [5.0, 3.0]])\n', (4383, 4409), True, 'import numpy as np\n'), ((4439, 4466), 'numpy.array', 'np.array', (['[5.0, 10.0, 20.0]'], {}), '([5.0, 10.0, 20.0])\n', (4447, 4466), True, 'import numpy as np\n'), ((4572, 4615), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (4588, 4615), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((4635, 4672), 'numpy.array', 'np.array', (['[[10.0, 7.5], [25.0, 20.5]]'], {}), '([[10.0, 7.5], [25.0, 20.5]])\n', (4643, 4672), True, 'import numpy as np\n'), ((5005, 5048), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (5021, 5048), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((5523, 5587), 'numpy.array', 'np.array', (['[[1.0, 1.0, 2.0], [1.0, np.nan, 2.0], [1.0, 2.0, 2.0]]'], {}), '([[1.0, 1.0, 2.0], [1.0, np.nan, 2.0], [1.0, 2.0, 2.0]])\n', (5531, 5587), True, 'import numpy as np\n'), ((5653, 5729), 'numpy.array', 'np.array', (['[[np.nan, np.nan, np.nan], [1.0, np.nan, 2.0], [1.0, 2.0, np.nan]]'], {}), '([[np.nan, np.nan, np.nan], [1.0, np.nan, 2.0], [1.0, 2.0, np.nan]])\n', (5661, 5729), True, 'import numpy as np\n'), ((5772, 5787), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (5779, 5787), True, 'import numpy as np\n'), ((5818, 5833), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (5825, 5833), True, 'import numpy as np\n'), ((5987, 6030), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (6003, 6030), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((6092, 6155), 'numpy.array', 'np.array', (['[[1.0, 1.0, 2.0], [1.0, 301.0, 2.0], [1.0, 2.0, 2.0]]'], {}), '([[1.0, 1.0, 2.0], [1.0, 301.0, 2.0], [1.0, 2.0, 2.0]])\n', (6100, 6155), True, 'import numpy as np\n'), ((6613, 6656), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (6629, 6656), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((6676, 6740), 'numpy.array', 'np.array', (['[[1.0, 1.0, 2.0], [1.0, np.nan, 2.0], [1.0, 2.0, 2.0]]'], {}), '([[1.0, 1.0, 2.0], [1.0, np.nan, 2.0], [1.0, 2.0, 2.0]])\n', (6684, 6740), True, 'import numpy as np\n'), ((7452, 7481), 'numpy.array', 'np.array', (['[5, 10, 20, 30, 50]'], {}), '([5, 10, 20, 30, 50])\n', (7460, 7481), True, 'import numpy as np\n'), ((7710, 7783), 'numpy.array', 'np.array', (['[[True, True, True], [False, False, False], [True, True, True]]'], {}), '([[True, True, True], [False, False, False], [True, True, True]])\n', (7718, 7783), True, 'import numpy as np\n'), ((7840, 7907), 'numpy.array', 'np.array', (['[[-0.8, -0.8, -0.8], [0.0, 0.0, 0.0], [-0.8, -0.8, -0.8]]'], {}), '([[-0.8, -0.8, -0.8], [0.0, 0.0, 0.0], [-0.8, -0.8, -0.8]])\n', (7848, 7907), True, 'import numpy as np\n'), ((7965, 8028), 'numpy.array', 'np.array', (['[[-10, 0.0, 20.0], [0.0, 0.0, 0.0], [-10, 0.0, 20.0]]'], {}), '([[-10, 0.0, 20.0], [0.0, 0.0, 0.0], [-10, 0.0, 20.0]])\n', (7973, 8028), True, 'import numpy as np\n'), ((8174, 8217), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (8190, 8217), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((8620, 8663), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (8636, 8663), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((9466, 9530), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [10.0, 10.0, 10.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [10.0, 10.0, 10.0]])\n', (9474, 9530), True, 'import numpy as np\n'), ((9579, 9652), 'numpy.array', 'np.array', (['[[True, True, True], [False, False, False], [True, True, True]]'], {}), '([[True, True, True], [False, False, False], [True, True, True]])\n', (9587, 9652), True, 'import numpy as np\n'), ((9700, 9767), 'numpy.array', 'np.array', (['[[-0.8, -0.8, -0.8], [0.0, 0.0, 0.0], [-0.8, -0.8, -0.8]]'], {}), '([[-0.8, -0.8, -0.8], [0.0, 0.0, 0.0], [-0.8, -0.8, -0.8]])\n', (9708, 9767), True, 'import numpy as np\n'), ((9816, 9879), 'numpy.array', 'np.array', (['[[-10, 0.0, 20.0], [0.0, 0.0, 0.0], [-10, 0.0, 20.0]]'], {}), '([[-10, 0.0, 20.0], [0.0, 0.0, 0.0], [-10, 0.0, 20.0]])\n', (9824, 9879), True, 'import numpy as np\n'), ((9945, 10049), 'numpy.array', 'np.array', (['[[-27.5, -15.0, -4.154759], [np.nan, np.nan, np.nan], [-26.642136, -\n 14.142136, -3.722813]]'], {}), '([[-27.5, -15.0, -4.154759], [np.nan, np.nan, np.nan], [-26.642136,\n -14.142136, -3.722813]])\n', (9953, 10049), True, 'import numpy as np\n'), ((10255, 10298), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (10271, 10298), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((10744, 10787), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (10760, 10787), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((10808, 10824), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (10816, 10824), True, 'import numpy as np\n'), ((11451, 11515), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [10.0, 10.0, 10.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [10.0, 10.0, 10.0]])\n', (11459, 11515), True, 'import numpy as np\n'), ((11563, 11606), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 1, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 1, 1], [0, 0, 0]])\n', (11571, 11606), True, 'import numpy as np\n'), ((11671, 11700), 'numpy.array', 'np.array', (['[5, 10, 20, 30, 50]'], {}), '([5, 10, 20, 30, 50])\n', (11679, 11700), True, 'import numpy as np\n'), ((11946, 12050), 'numpy.array', 'np.array', (['[[-27.5, -15.0, -4.154759], [np.nan, np.nan, np.nan], [-26.642136, -\n 14.142136, -3.722813]]'], {}), '([[-27.5, -15.0, -4.154759], [np.nan, np.nan, np.nan], [-26.642136,\n -14.142136, -3.722813]])\n', (11954, 12050), True, 'import numpy as np\n'), ((12233, 12276), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (12249, 12276), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((12725, 12768), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (12741, 12768), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((12832, 12847), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (12839, 12847), True, 'import numpy as np\n'), ((13262, 13305), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (13278, 13305), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((13973, 14083), 'numpy.array', 'np.array', (['[[0, 10, 20, 5, 0], [0, 50, 20, 5, 0], [0, 80, 90, 0, 0], [0, 20, 5, 10, 0],\n [0, 5, 10, 10, 0]]'], {}), '([[0, 10, 20, 5, 0], [0, 50, 20, 5, 0], [0, 80, 90, 0, 0], [0, 20, \n 5, 10, 0], [0, 5, 10, 10, 0]])\n', (13981, 14083), True, 'import numpy as np\n'), ((14216, 14330), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['data'], {'name': '"""orographic_height"""', 'units': '"""m"""', 'spatial_grid': '"""equalarea"""', 'grid_spacing': '(2000.0)'}), "(data, name='orographic_height', units='m',\n spatial_grid='equalarea', grid_spacing=2000.0)\n", (14236, 14330), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((14634, 14753), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['self.cube.data'], {'name': '"""orographic_height"""', 'units': '"""m"""', 'spatial_grid': '"""latlon"""', 'grid_spacing': '(0.01)'}), "(self.cube.data, name='orographic_height', units='m',\n spatial_grid='latlon', grid_spacing=0.01)\n", (14654, 14753), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((14939, 15003), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""', 'grid_point_radius': '(1)'}), "(phase_change='snow-sleet', grid_point_radius=1)\n", (14955, 15003), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((15246, 15310), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""', 'grid_point_radius': '(0)'}), "(phase_change='snow-sleet', grid_point_radius=0)\n", (15262, 15310), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((15675, 15739), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""', 'grid_point_radius': '(0)'}), "(phase_change='snow-sleet', grid_point_radius=0)\n", (15691, 15739), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((16076, 16140), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""', 'grid_point_radius': '(1)'}), "(phase_change='snow-sleet', grid_point_radius=1)\n", (16092, 16140), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((16637, 16668), 'pytest.importorskip', 'pytest.importorskip', (['"""stratify"""'], {}), "('stratify')\n", (16656, 16668), False, 'import pytest\n'), ((16789, 16822), 'numpy.ones', 'np.ones', (['(5, 5)'], {'dtype': 'np.float32'}), '((5, 5), dtype=np.float32)\n', (16796, 16822), True, 'import numpy as np\n'), ((16870, 16964), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['data'], {'name': '"""surface_altitude"""', 'units': '"""m"""', 'spatial_grid': 'spatial_grid'}), "(data, name='surface_altitude', units='m', spatial_grid\n =spatial_grid)\n", (16890, 16964), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((17262, 17302), 'numpy.full_like', 'np.full_like', (['data'], {'fill_value': '(271.46216)'}), '(data, fill_value=271.46216)\n', (17274, 17302), True, 'import numpy as np\n'), ((17351, 17390), 'numpy.full_like', 'np.full_like', (['data'], {'fill_value': '(274.4207)'}), '(data, fill_value=274.4207)\n', (17363, 17390), True, 'import numpy as np\n'), ((17439, 17478), 'numpy.full_like', 'np.full_like', (['data'], {'fill_value': '(275.0666)'}), '(data, fill_value=275.0666)\n', (17451, 17478), True, 'import numpy as np\n'), ((17837, 17877), 'numpy.full_like', 'np.full_like', (['data'], {'fill_value': '(128.68324)'}), '(data, fill_value=128.68324)\n', (17849, 17877), True, 'import numpy as np\n'), ((17961, 18001), 'numpy.full_like', 'np.full_like', (['data'], {'fill_value': '(7.9681854)'}), '(data, fill_value=7.9681854)\n', (17973, 18001), True, 'import numpy as np\n'), ((18312, 18399), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['data'], {'spatial_grid': 'spatial_grid', 'name': '"""wet_bulb_temperature"""'}), "(data, spatial_grid=spatial_grid, name=\n 'wet_bulb_temperature')\n", (18332, 18399), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((18448, 18510), 'improver.synthetic_data.set_up_test_cubes.add_coordinate', 'add_coordinate', (['wet_bulb_temperature', '[0, 1, 2]', '"""realization"""'], {}), "(wet_bulb_temperature, [0, 1, 2], 'realization')\n", (18462, 18510), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((18574, 18686), 'improver.synthetic_data.set_up_test_cubes.add_coordinate', 'add_coordinate', (['wet_bulb_temperature', 'height_points', '"""height"""'], {'coord_units': '"""m"""', 'attributes': 'height_attribute'}), "(wet_bulb_temperature, height_points, 'height', coord_units=\n 'm', attributes=height_attribute)\n", (18588, 18686), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((19321, 19430), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['data'], {'spatial_grid': 'spatial_grid', 'name': '"""wet_bulb_temperature_integral"""', 'units': '"""K m"""'}), "(data, spatial_grid=spatial_grid, name=\n 'wet_bulb_temperature_integral', units='K m')\n", (19341, 19430), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((19513, 19572), 'improver.synthetic_data.set_up_test_cubes.add_coordinate', 'add_coordinate', (['wet_bulb_integral', '[0, 1, 2]', '"""realization"""'], {}), "(wet_bulb_integral, [0, 1, 2], 'realization')\n", (19527, 19572), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((19620, 19734), 'improver.synthetic_data.set_up_test_cubes.add_coordinate', 'add_coordinate', (['wet_bulb_integral', 'height_points[0:2]', '"""height"""'], {'coord_units': '"""m"""', 'attributes': 'height_attribute'}), "(wet_bulb_integral, height_points[0:2], 'height', coord_units\n ='m', attributes=height_attribute)\n", (19634, 19734), False, 'from improver.synthetic_data.set_up_test_cubes import add_coordinate, set_up_variable_cube\n'), ((19901, 19988), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.wet_bulb_integral_cube_inverted', '"""height"""'], {'descending': '(True)'}), "(self.wet_bulb_integral_cube_inverted, 'height',\n descending=True)\n", (19919, 19988), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((20042, 20099), 'numpy.full', 'np.full', (['(3, 5, 5)'], {'fill_value': '(66.88566)', 'dtype': 'np.float32'}), '((3, 5, 5), fill_value=66.88566, dtype=np.float32)\n', (20049, 20099), True, 'import numpy as np\n'), ((23122, 23200), 'numpy.full_like', 'np.full_like', (['self.expected_snow_sleet'], {'fill_value': '(49.178673)', 'dtype': 'np.float32'}), '(self.expected_snow_sleet, fill_value=49.178673, dtype=np.float32)\n', (23134, 23200), True, 'import numpy as np\n'), ((24721, 24745), 'numpy.zeros_like', 'np.zeros_like', (['orog.data'], {}), '(orog.data)\n', (24734, 24745), True, 'import numpy as np\n'), ((3517, 3547), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', (['phase_change'], {}), '(phase_change)\n', (3533, 3547), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((3727, 3770), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (3743, 3770), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((5262, 5284), 'numpy.isnan', 'np.isnan', (['result[1, 1]'], {}), '(result[1, 1])\n', (5270, 5284), True, 'import numpy as np\n'), ((7403, 7421), 'numpy.ones', 'np.ones', (['(5, 3, 3)'], {}), '((5, 3, 3))\n', (7410, 7421), True, 'import numpy as np\n'), ((8685, 8700), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (8692, 8700), True, 'import numpy as np\n'), ((8879, 8895), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (8887, 8895), True, 'import numpy as np\n'), ((8944, 8960), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (8952, 8960), True, 'import numpy as np\n'), ((9410, 9425), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (9417, 9425), True, 'import numpy as np\n'), ((11073, 11088), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (11080, 11088), True, 'import numpy as np\n'), ((11395, 11410), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (11402, 11410), True, 'import numpy as np\n'), ((11622, 11640), 'numpy.ones', 'np.ones', (['(5, 3, 3)'], {}), '((5, 3, 3))\n', (11629, 11640), True, 'import numpy as np\n'), ((12788, 12803), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (12795, 12803), True, 'import numpy as np\n'), ((17040, 17073), 'numpy.ones_like', 'np.ones_like', (['data'], {'dtype': 'np.int8'}), '(data, dtype=np.int8)\n', (17052, 17073), True, 'import numpy as np\n'), ((20579, 20681), 'iris.cube.CubeList', 'CubeList', (['[self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self.orog,\n self.land_sea]'], {}), '([self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self\n .orog, self.land_sea])\n', (20587, 20681), False, 'from iris.cube import CubeList\n'), ((20983, 20992), 'cf_units.Unit', 'Unit', (['"""m"""'], {}), "('m')\n", (20987, 20992), False, 'from cf_units import Unit\n'), ((21407, 21509), 'iris.cube.CubeList', 'CubeList', (['[self.wet_bulb_integral_cube, self.wet_bulb_temperature_cube, self.orog,\n self.land_sea]'], {}), '([self.wet_bulb_integral_cube, self.wet_bulb_temperature_cube, self\n .orog, self.land_sea])\n', (21415, 21509), False, 'from iris.cube import CubeList\n'), ((21811, 21820), 'cf_units.Unit', 'Unit', (['"""m"""'], {}), "('m')\n", (21815, 21820), False, 'from cf_units import Unit\n'), ((22866, 22968), 'iris.cube.CubeList', 'CubeList', (['[self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self.orog,\n self.land_sea]'], {}), '([self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self\n .orog, self.land_sea])\n', (22874, 22968), False, 'from iris.cube import CubeList\n'), ((23558, 23567), 'cf_units.Unit', 'Unit', (['"""m"""'], {}), "('m')\n", (23562, 23567), False, 'from cf_units import Unit\n'), ((23958, 24060), 'iris.cube.CubeList', 'CubeList', (['[self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self.orog,\n self.land_sea]'], {}), '([self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self\n .orog, self.land_sea])\n', (23966, 24060), False, 'from iris.cube import CubeList\n'), ((24968, 25059), 'iris.cube.CubeList', 'CubeList', (['[self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, orog, land_sea]'], {}), '([self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, orog,\n land_sea])\n', (24976, 25059), False, 'from iris.cube import CubeList\n'), ((17569, 17602), 'numpy.broadcast_to', 'np.broadcast_to', (['wbt_0', '(3, 5, 5)'], {}), '(wbt_0, (3, 5, 5))\n', (17584, 17602), True, 'import numpy as np\n'), ((17620, 17653), 'numpy.broadcast_to', 'np.broadcast_to', (['wbt_1', '(3, 5, 5)'], {}), '(wbt_1, (3, 5, 5))\n', (17635, 17653), True, 'import numpy as np\n'), ((17671, 17704), 'numpy.broadcast_to', 'np.broadcast_to', (['wbt_2', '(3, 5, 5)'], {}), '(wbt_2, (3, 5, 5))\n', (17686, 17704), True, 'import numpy as np\n'), ((18078, 18112), 'numpy.broadcast_to', 'np.broadcast_to', (['wbti_0', '(3, 5, 5)'], {}), '(wbti_0, (3, 5, 5))\n', (18093, 18112), True, 'import numpy as np\n'), ((18114, 18148), 'numpy.broadcast_to', 'np.broadcast_to', (['wbti_1', '(3, 5, 5)'], {}), '(wbti_1, (3, 5, 5))\n', (18129, 18148), True, 'import numpy as np\n'), ((20514, 20557), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (20530, 20557), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((21342, 21385), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (21358, 21385), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((22801, 22844), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""sleet-rain"""'}), "(phase_change='sleet-rain')\n", (22817, 22844), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((23893, 23936), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (23909, 23936), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((24860, 24924), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""', 'grid_point_radius': '(1)'}), "(phase_change='snow-sleet', grid_point_radius=1)\n", (24876, 24924), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((25658, 25771), 'iris.cube.CubeList', 'CubeList', (['[self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self.orog,\n self.land_sea, self.orog]'], {}), '([self.wet_bulb_temperature_cube, self.wet_bulb_integral_cube, self\n .orog, self.land_sea, self.orog])\n', (25666, 25771), False, 'from iris.cube import CubeList\n'), ((26215, 26227), 'iris.cube.CubeList', 'CubeList', (['[]'], {}), '([])\n', (26223, 26227), False, 'from iris.cube import CubeList\n'), ((25589, 25632), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (25605, 25632), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n'), ((26163, 26206), 'improver.psychrometric_calculations.psychrometric_calculations.PhaseChangeLevel', 'PhaseChangeLevel', ([], {'phase_change': '"""snow-sleet"""'}), "(phase_change='snow-sleet')\n", (26179, 26206), False, 'from improver.psychrometric_calculations.psychrometric_calculations import PhaseChangeLevel\n')] |
"""
author:zhangyu
email:<EMAIL>
"""
from __future__ import division
from scipy.sparse import coo_matrix
import numpy as np
import PR.read as read
import sys
def graph_to_m(graph):
"""
Args:
graph:用户商品图
Return:
coo_matrix
list
dict
"""
vertex = graph.keys()
address_dict = {}
total_len = len(vertex)
for index in range(len(vertex)):
address_dict[vertex[index]] = index
row = []
col = []
data = []
for element_i in graph:
weight = round(1/len(graph[element_i]), 3)
row_index = address_dict[element_i]
for element_j in graph[element_i]:
col_index = address_dict[element_j]
row.append(row_index)
col.append(col_index)
data.append(weight)
row = np.array(row)
col = np.array(col)
data = np.array(data)
m = coo_matrix((data, (row, col)), shape=(total_len, total_len))
return m, vertex, address_dict
def mat_all_point(m_mat, vertex, alpha):
"""
get E-alpha*m_mat.T
Args:
m_mat: m_mat
vertex: 总共用户点
alpha: 随机图像
Return:
a sparse
"""
total_len = len(vertex)
row = []
col = []
data = []
for index in range(total_len):
row.append(index)
col.append(index)
data.append(1)
row = np.array(row)
col = np.array(col)
data = np.array(data)
eye_t = coo_matrix((data, (row, col)), shape=(total_len, total_len))
return eye_t.tocsr() - alpha*m_mat.tocsr().transpose()
if __name__ == "__main__":
graph = read.get_graph_from_data("../data/log.txt")
m, vertex, address_dict = graph_to_m(graph)
mat_all_point(m, vertex, 0.8)
| [
"PR.read.get_graph_from_data",
"numpy.array",
"scipy.sparse.coo_matrix"
] | [((805, 818), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (813, 818), True, 'import numpy as np\n'), ((829, 842), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (837, 842), True, 'import numpy as np\n'), ((854, 868), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (862, 868), True, 'import numpy as np\n'), ((877, 937), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (row, col))'], {'shape': '(total_len, total_len)'}), '((data, (row, col)), shape=(total_len, total_len))\n', (887, 937), False, 'from scipy.sparse import coo_matrix\n'), ((1348, 1361), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (1356, 1361), True, 'import numpy as np\n'), ((1372, 1385), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (1380, 1385), True, 'import numpy as np\n'), ((1397, 1411), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1405, 1411), True, 'import numpy as np\n'), ((1424, 1484), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (row, col))'], {'shape': '(total_len, total_len)'}), '((data, (row, col)), shape=(total_len, total_len))\n', (1434, 1484), False, 'from scipy.sparse import coo_matrix\n'), ((1584, 1627), 'PR.read.get_graph_from_data', 'read.get_graph_from_data', (['"""../data/log.txt"""'], {}), "('../data/log.txt')\n", (1608, 1627), True, 'import PR.read as read\n')] |
__all__ = ['FixedDelay']
import numpy as np
from .core import Signal, signal
from .misc import Ramp # NOQA
class CircularBuffer:
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = np.zeros([self.buffer_size])
self.buffer_p = 0
def add(self, samples):
bz = self.buffer_size
fz = samples.shape[0]
self.buffer_p = (self.buffer_p + fz) % bz
p = self.buffer_p
idx = min(p, fz)
self.buffer[p - idx:p] = samples[fz-idx:]
self.buffer[bz - fz + idx:] = samples[:fz-idx]
def head(self, out):
p = self.buffer_p
fz = out.shape[0]
bz = self.buffer_size
idx = min(bz - p, fz)
out[:idx] = self.buffer[p:min(bz, p + fz)]
out[idx:] = self.buffer[0:fz - idx]
def index(self, array):
return self.buffer[(array + self.buffer_p) % self.buffer_size]
@signal("fixed_delay")
class FixedDelay(Signal):
"""Delay a signal by a fixed amount of time.
>>> one = Ramp(1)
>>> delayed = one.fixed_delay(0.5)
>>> delayed.configure(10, 10)
>>> more_delayed = one.fixed_delay(1.5)
>>> more_delayed.configure(10, 10)
>>> one(); delayed(); more_delayed()
>>> one.output
array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ], dtype=float32)
>>> delayed.output
array([0. , 0. , 0. , 0. , 0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float32)
>>> more_delayed.output
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
>>> one(); delayed(); more_delayed()
>>> more_delayed.output
array([0. , 0. , 0. , 0. , 0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float32)
"""
signal: Signal
delay_s: float
def setup(self):
self.buffer_size = int(self.delay_s * self.samplerate) + self.framesize
self.buffer = CircularBuffer(self.buffer_size)
def __call__(self):
self.buffer.add(self.signal.output)
self.buffer.head(self.output)
| [
"numpy.zeros"
] | [((232, 260), 'numpy.zeros', 'np.zeros', (['[self.buffer_size]'], {}), '([self.buffer_size])\n', (240, 260), True, 'import numpy as np\n')] |
import sys
import numpy as np
from gcodeBuddy import angle, Arc, centers_from_params
class Command:
"""
represents line of Marlin g-code
:param init_string: line of Marlin g-code
:type init_string: str
"""
def __init__(self, init_string):
"""
initialization method
"""
err_msg = "Error in marlin.gcode_command.__init__(): "
no_parameter_commands = ["M84"] # list of commands that don't require a value after the parameters
if len(init_string) == 0:
print(err_msg + "argument passed to 'init_string' can't be empty string")
sys.exit(1)
# removing extraneous spaces
command_string = init_string
while command_string[0] == " ":
command_string = command_string[1:]
while command_string[-1] == " ":
command_string = command_string[:-1]
ind = 0
while (ind + 1) < len(command_string):
if command_string[ind] == " " and command_string[ind + 1] == " ":
command_string = command_string[:ind] + command_string[(ind + 1):]
else:
ind += 1
# ensuring valid command
command_list = command_string.split(" ")
if command_list[0] in marlin_commands():
self.command = command_list[0]
command_list = command_list[1:]
else:
print(err_msg + "Unrecognized Marlin command passed in argument 'init_string'")
sys.exit(1)
self.params = dict() # a dictionary storing param - values pairs (ie. {"x": 0, ... }
for parameter_str in command_list:
if parameter_str[0].isalpha():
if self.command in no_parameter_commands:
self.params[parameter_str.upper()] = 0
else:
try:
float(parameter_str[1:])
except ValueError:
print(err_msg + "Marlin parameter passed in argument 'init_string' of non-int/non-float type")
sys.exit(1)
else:
self.params[parameter_str[0].upper()] = float(parameter_str[1:])
else:
print(err_msg + "Unrecognized Marlin parameter passed in argument 'init_string'")
sys.exit(1)
def get_command(self):
"""
:return: g-code command
:rtype: str
"""
return self.command
def has_param(self, param_char):
"""
:param param_char: parameter character to search for in g-code command
:type param_char: str
:return: whether the Command object has the given parameter
:rtype: bool
"""
err_msg = "Error in marlin.gcode_command.has_param(): "
# ensuring string passed
if isinstance(param_char, str):
return param_char.upper() in self.params
else:
print(err_msg + "Argument 'param_char' of non-string type")
sys.exit(1)
def get_param(self, param_char):
"""
:param param_char: parameter character to search for in g-code command
:type param_char: str
:return: value of parameter character stored in g-code command
:rtype: float
"""
err_msg = "Error in marlin.gcode_command.get_param(): "
# ensuring param_char is string, and is in self.params
if isinstance(param_char, str):
if param_char in self.params:
return self.params[param_char]
else:
print(err_msg + "Command does not contain Marlin parameter given in argument 'param_char'")
sys.exit(1)
else:
print(err_msg + "Argument 'param_char' of non-string type")
sys.exit(1)
def set_param(self, param_char, param_val):
"""
sets parameter value
:param param_char: parameter character to change value
:type param_char: str
:param param_val: parameter value to set
:type param_val: int, float
"""
err_msg = "Error in marlin.gcode_command.set_param(): "
# ensuring param_char is string and is in self.params and param_val is number
if isinstance(param_char, str):
if isinstance(param_val, (int, float)):
if param_char in self.params:
self.params[param_char] = param_val
else:
print(err_msg + "Command does not contain Marlin parameter given in argument 'param_char'")
sys.exit(1)
else:
print(err_msg + "Argument 'param_val' of non-int/non-float type")
sys.exit(1)
else:
print(err_msg + "Argument 'param_char' of non-string type")
sys.exit(1)
def get_string(self):
"""
:return: entire g-code command in line form
:rtype: string
"""
ret_val = self.command
for param_key in self.params:
ret_val += " " + param_key + str(self.params[param_key])
return ret_val
def command_to_arc(curr_pos, command):
"""
converts G2/G3 Marlin g-code command to Arc object
:param curr_pos: position of toolhead before given command
:type curr_pos: list[int, float], tuple(int, float)
:param command: G2/G3 command
:type command: Command
:return: arc toolpath travel corresponding to given g-code command
:rtype: Arc
"""
err_msg = "Error in marlin.command_to_arc(): "
# error checking curr_pos
if isinstance(curr_pos, (list, tuple)):
if len(curr_pos) == 2:
valid_types = True
for coord in curr_pos:
if not isinstance(coord, (int, float)):
valid_types = False
if not valid_types:
print(err_msg + "Element in argument 'curr_pos' of non-int/non-float type")
sys.exit(1)
else:
print(err_msg + "Argument 'curr_pos' does not contain two elements")
sys.exit(1)
else:
print(err_msg + "Argument 'curr_pos' of non-list/non-tuple type")
sys.exit(1)
# error checking command - error checking done in Command.__init__(), just need to make sure command is passed
if not isinstance(command, Command):
print(err_msg + "Argument 'command' of non-Command type")
sys.exit(1)
if command.get_command() not in ("G2", "G3"):
print(err_msg + "Command must be 'G2' or 'G3' for arc conversion")
sys.exit(1)
# organizing parameters into list (for error checking)
param_list =[]
for letter in "XYIJR":
if command.has_param(letter):
param_list.append(letter)
# setting direction
direction = "c"
if command.get_command() == "G3":
direction = "cc"
if ("I" in param_list) or ("J" in param_list): # I and J parameters
# more error checking
if "R" in param_list:
print(err_msg + "Command cannot mix parameter 'R' with parameters 'I' and 'J' for arc conversion")
sys.exit(1)
# if only given I, J, or I and J
if ("X" not in param_list) and ("Y" not in param_list):
if param_list == ["I"]: # I
I = command.get_param("I")
center = [curr_pos[0] + I, curr_pos[1]]
radius = I
start_angle = angle(center, curr_pos)
end_angle = angle(center, curr_pos)
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
elif param_list == ["J"]: # J
J = command.get_param("J")
center = [curr_pos[0], curr_pos[1] + J]
radius = J
start_angle = angle(center, curr_pos)
end_angle = angle(center, curr_pos)
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
else: # I J
I = command.get_param("I")
J = command.get_param("J")
center = [curr_pos[0] + I, curr_pos[1] + J]
radius = np.sqrt(I**2 + J**2)
start_angle = angle(center, curr_pos)
end_angle = angle(center, curr_pos)
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
# if given X and I or Y and J (require more intricate handling)
if param_list == ["X", "I"]:
X = command.get_param("X")
I = command.get_param("I")
if curr_pos[0] + (2 * I) - X < 0.001:
center = [curr_pos[0] + I, curr_pos[1]]
radius = abs(I)
start_angle = angle(center, curr_pos)
end_angle = angle(center, [X, curr_pos[1]])
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
else:
print(err_msg + "Invalid Command parameters for arc conversion (cannot create arc from given X and I values)")
sys.exit(1)
elif param_list == ["Y", "J"]:
Y = command.get_param("Y")
J = command.get_param("J")
if curr_pos[1] + (2 * J) - Y < 0.001:
center = [curr_pos[0], curr_pos[1] + J]
radius = abs(J)
start_angle = angle(center, curr_pos)
end_angle = angle(center, [curr_pos[0], Y])
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
else:
print(err_msg + "Invalid Command parameters for arc conversion (cannot create arc from given Y and J values)")
sys.exit(1)
# must have X or Y, I or J
# setting I parameter
I = 0
if "I" in param_list:
I = command.get_param("I")
# setting J parameter
J = 0
if "J" in param_list:
J = command.get_param("J")
# setting X parameter
X = curr_pos[0]
if "X" in param_list:
X = command.get_param("X")
# setting Y parameter
Y = curr_pos[1]
if "Y" in param_list:
Y = command.get_param("Y")
# returning arc object
center = [curr_pos[0] + I, curr_pos[1] + J]
radius = np.sqrt(I**2 + J**2)
start_angle = angle(center, curr_pos)
end_angle = angle(center, [X, Y])
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
elif "R" in param_list: # R parameter
if "X" in param_list or "Y" in param_list:
# setting X parameter
X = curr_pos[0]
if "X" in param_list:
X = command.get_param("X")
# setting Y parameter
Y = curr_pos[1]
if "Y" in param_list:
Y = command.get_param("Y")
# setting R parameter
R = command.get_param("R")
need_smaller = R > 0 # if smaller angle arc necessary
R = np.abs(R)
# creating test arc, if is smaller than 180 deg then return, otherwise choose other center and create and return that arc
if (np.abs(np.sqrt((X - curr_pos[0])**2 + (Y - curr_pos[1])**2)) / 2) > R: # distance between points greater than radius
R = (np.abs(np.sqrt((X - curr_pos[0])**2 + (Y - curr_pos[1])**2)) / 2)
centers = centers_from_params(curr_pos, (X, Y), R)
# creating test arc
test_arc = Arc(center=centers[0],
radius=R,
start_angle=angle(centers[0], curr_pos),
end_angle=angle(centers[0], (X, Y)),
direction=direction)
if need_smaller:
if test_arc.get_angle() <= 180:
return test_arc
else:
return Arc(center=centers[1],
radius=R,
start_angle=angle(centers[1], curr_pos),
end_angle=angle(centers[1], (X, Y)),
direction=direction)
else:
if test_arc.get_angle() <= 180:
return Arc(center=centers[1],
radius=R,
start_angle=angle(centers[1], curr_pos),
end_angle=angle(centers[1], (X, Y)),
direction=direction)
else:
return test_arc
else:
print(err_msg + "Invalid Command parameters for arc conversion (X or Y required with R)")
sys.exit(1)
else: # no required parameters
print(err_msg + "Invalid Command parameters for arc conversion (I, J, or R is required)")
sys.exit(1)
def marlin_commands():
"""
:returns: up-to-date Marlin commands, periodically scraped from the Marlin website
:rtype: tuple(str)
"""
"""
The following code should be uncommented, and the main function of this file should be run periodically.
This will pull commands from the marlin website, which will then be printed to console. This will be
in a tuple format, which can then be copied and pasted in the return value of the function. If it has
to pull from the website every time this function is called, it takes an extremely long time and is
an easy source of a difficult to detect error.
"""
## opening site and getting BeautifulSoup object
# gcode_index_url = "https://marlinfw.org/meta/gcode/"
# gcode_index_client = urlopen(gcode_index_url)
# gcode_index_html = gcode_index_client.read()
# gcode_index_client.close()
#
# first_command = "G0"
# last_command = "T6"
#
# # parsing through website and extracting commands into list
# gcode_index_soup = soup(gcode_index_html, "html.parser")
# commands = gcode_index_soup.findAll("strong")
# i = 0
# while True:
# if not isinstance(commands[i], str): # if isn't already string, get text from tag and convert
# commands[i] = str(commands[i].get_text())
# # splitting up website entries than encompass multiple commands. Will change as Marlin site is updated
# multiple_command_entries = (
# ( "G0-G1", "G2-G3", "G17-G19", "G38.2-G38.5", "G54-G59.3", "M0-M1", "M7-M9", "M10-M11", "M18, M84", "M810-M819", "M860-M869", "M993-M994", "T0-T6"),
# (("G1", "G0"), ("G3", "G2"), ("G19", "G18", "G17"), ("G38.5", "G38.4", "G38.3", "G38.2"), ("G59.3", "G59.2", "G59.1", "G59", "G58", "G57", "G56", "G55", "G54"), ("M1", "M0"), ("M9", "M8", "M7"), ("M11", "M10"), ("M84", "M18"), ("M819", "M818", "M817", "M816", "M815", "M814", "M813", "M812", "M811", "M810"), ("M869", "M868", "M867", "M866", "M865", "M864", "M863", "M862", "M861", "M860"), ("M994", "M993"), ("T6", "T5", "T4", "T3", "T2", "T1", "T0"))
# )
# if commands[i] in multiple_command_entries[0]:
# specific_commands = multiple_command_entries[1][multiple_command_entries[0].index(commands[i])]
# for command in specific_commands:
# commands.insert(i, command)
# commands.pop(i + len(specific_commands))
# if (len(commands) > (i + 1)) and commands[i] == last_command:
# commands = commands[:(i + 1)]
# break
# if i >= len(commands) - 1: # safety measure, in case of unexpected website updates
# break
# i += 1
#
# return (tuple(commands))
# ________________________________________
return ("G0",
"G1",
"G2",
"G3",
"G4",
"G5",
"G6",
"G10",
"G11",
"G12",
"G17",
"G18",
"G19",
"G20",
"G21",
"G26",
"G27",
"G28",
"G29",
"G29",
"G29",
"G29",
"G29",
"G29",
"G30",
"G31",
"G32",
"G33",
"G34",
"G35",
"G38.2",
"G38.3",
"G38.4",
"G38.5",
"G42",
"G53",
"G54",
"G55",
"G56",
"G57",
"G58",
"G59",
"G59.1",
"G59.2",
"G59.3",
"G60",
"G61",
"G76",
"G80",
"G90",
"G91",
"G92",
"G425",
"M0",
"M1",
"M3",
"M4",
"M5",
"M7",
"M8",
"M9",
"M10",
"M11",
"M16",
"M17",
"M18",
"M84",
"M20",
"M21",
"M22",
"M23",
"M24",
"M25",
"M26",
"M27",
"M28",
"M29",
"M30",
"M31",
"M32",
"M33",
"M34",
"M42",
"M43",
"M43 T",
"M48",
"M73",
"M75",
"M76",
"M77",
"M78",
"M80",
"M81",
"M82",
"M83",
"M85",
"M92",
"M100",
"M104",
"M105",
"M106",
"M107",
"M108",
"M109",
"M110",
"M111",
"M112",
"M113",
"M114",
"M115",
"M117",
"M118",
"M119",
"M120",
"M121",
"M122",
"M125",
"M126",
"M127",
"M128",
"M129",
"M140",
"M141",
"M143",
"M145",
"M149",
"M150",
"M154",
"M155",
"M163",
"M164",
"M165",
"M166",
"M190",
"M191",
"M192",
"M193",
"M200",
"M201",
"M203",
"M204",
"M205",
"M206",
"M207",
"M208",
"M209",
"M211",
"M217",
"M218",
"M220",
"M221",
"M226",
"M240",
"M250",
"M256",
"M260",
"M261",
"M280",
"M281",
"M282",
"M290",
"M300",
"M301",
"M302",
"M303",
"M304",
"M305",
"M350",
"M351",
"M355",
"M360",
"M361",
"M362",
"M363",
"M364",
"M380",
"M381",
"M400",
"M401",
"M402",
"M403",
"M404",
"M405",
"M406",
"M407",
"M410",
"M412",
"M413",
"M420",
"M421",
"M422",
"M425",
"M428",
"M430",
"M486",
"M500",
"M501",
"M502",
"M503",
"M504",
"M510",
"M511",
"M512",
"M524",
"M540",
"M569",
"M575",
"M600",
"M603",
"M605",
"M665",
"M665",
"M666",
"M666",
"M672",
"M701",
"M702",
"M710",
"M808",
"M810",
"M811",
"M812",
"M813",
"M814",
"M815",
"M816",
"M817",
"M818",
"M819",
"M851",
"M852",
"M860",
"M861",
"M862",
"M863",
"M864",
"M865",
"M866",
"M867",
"M868",
"M869",
"M871",
"M876",
"M900",
"M906",
"M907",
"M908",
"M909",
"M910",
"M911",
"M912",
"M913",
"M914",
"M915",
"M916",
"M917",
"M918",
"M928",
"M951",
"M993",
"M994",
"M995",
"M997",
"M999",
"M7219",
"T0",
"T1",
"T2",
"T3",
"T4",
"T5",
"T6")
# station to pull commands periodically, to update return value of marlin_commands
if __name__ == "__main__":
commands = marlin_commands()
print("(", sep="", end="")
for item in commands[:-1]:
print("\"" + item + "\", ")
print("\"" + commands[-1] + "\")") | [
"numpy.abs",
"numpy.sqrt",
"sys.exit",
"gcodeBuddy.centers_from_params",
"gcodeBuddy.angle",
"gcodeBuddy.Arc"
] | [((6212, 6223), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6220, 6223), False, 'import sys\n'), ((6455, 6466), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6463, 6466), False, 'import sys\n'), ((6600, 6611), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6608, 6611), False, 'import sys\n'), ((11068, 11092), 'numpy.sqrt', 'np.sqrt', (['(I ** 2 + J ** 2)'], {}), '(I ** 2 + J ** 2)\n', (11075, 11092), True, 'import numpy as np\n'), ((11111, 11134), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (11116, 11134), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((11155, 11176), 'gcodeBuddy.angle', 'angle', (['center', '[X, Y]'], {}), '(center, [X, Y])\n', (11160, 11176), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((11192, 11297), 'gcodeBuddy.Arc', 'Arc', ([], {'center': 'center', 'radius': 'radius', 'start_angle': 'start_angle', 'end_angle': 'end_angle', 'direction': 'direction'}), '(center=center, radius=radius, start_angle=start_angle, end_angle=\n end_angle, direction=direction)\n', (11195, 11297), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((629, 640), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (637, 640), False, 'import sys\n'), ((1498, 1509), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1506, 1509), False, 'import sys\n'), ((3037, 3048), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3045, 3048), False, 'import sys\n'), ((3821, 3832), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3829, 3832), False, 'import sys\n'), ((4850, 4861), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4858, 4861), False, 'import sys\n'), ((6108, 6119), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6116, 6119), False, 'import sys\n'), ((7159, 7170), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7167, 7170), False, 'import sys\n'), ((13727, 13738), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13735, 13738), False, 'import sys\n'), ((2345, 2356), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2353, 2356), False, 'import sys\n'), ((3711, 3722), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3719, 3722), False, 'import sys\n'), ((4740, 4751), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4748, 4751), False, 'import sys\n'), ((5989, 6000), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5997, 6000), False, 'import sys\n'), ((7473, 7496), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (7478, 7496), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((7525, 7548), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (7530, 7548), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((7572, 7677), 'gcodeBuddy.Arc', 'Arc', ([], {'center': 'center', 'radius': 'radius', 'start_angle': 'start_angle', 'end_angle': 'end_angle', 'direction': 'direction'}), '(center=center, radius=radius, start_angle=start_angle, end_angle=\n end_angle, direction=direction)\n', (7575, 7677), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((9198, 9221), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (9203, 9221), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((9250, 9281), 'gcodeBuddy.angle', 'angle', (['center', '[X, curr_pos[1]]'], {}), '(center, [X, curr_pos[1]])\n', (9255, 9281), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((9305, 9410), 'gcodeBuddy.Arc', 'Arc', ([], {'center': 'center', 'radius': 'radius', 'start_angle': 'start_angle', 'end_angle': 'end_angle', 'direction': 'direction'}), '(center=center, radius=radius, start_angle=start_angle, end_angle=\n end_angle, direction=direction)\n', (9308, 9410), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((9675, 9686), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9683, 9686), False, 'import sys\n'), ((11897, 11906), 'numpy.abs', 'np.abs', (['R'], {}), '(R)\n', (11903, 11906), True, 'import numpy as np\n'), ((12284, 12324), 'gcodeBuddy.centers_from_params', 'centers_from_params', (['curr_pos', '(X, Y)', 'R'], {}), '(curr_pos, (X, Y), R)\n', (12303, 12324), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((13573, 13584), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13581, 13584), False, 'import sys\n'), ((4612, 4623), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4620, 4623), False, 'import sys\n'), ((7980, 8003), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (7985, 8003), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((8032, 8055), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (8037, 8055), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((8079, 8184), 'gcodeBuddy.Arc', 'Arc', ([], {'center': 'center', 'radius': 'radius', 'start_angle': 'start_angle', 'end_angle': 'end_angle', 'direction': 'direction'}), '(center=center, radius=radius, start_angle=start_angle, end_angle=\n end_angle, direction=direction)\n', (8082, 8184), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((8484, 8508), 'numpy.sqrt', 'np.sqrt', (['(I ** 2 + J ** 2)'], {}), '(I ** 2 + J ** 2)\n', (8491, 8508), True, 'import numpy as np\n'), ((8535, 8558), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (8540, 8558), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((8587, 8610), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (8592, 8610), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((8634, 8739), 'gcodeBuddy.Arc', 'Arc', ([], {'center': 'center', 'radius': 'radius', 'start_angle': 'start_angle', 'end_angle': 'end_angle', 'direction': 'direction'}), '(center=center, radius=radius, start_angle=start_angle, end_angle=\n end_angle, direction=direction)\n', (8637, 8739), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((9972, 9995), 'gcodeBuddy.angle', 'angle', (['center', 'curr_pos'], {}), '(center, curr_pos)\n', (9977, 9995), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((10024, 10055), 'gcodeBuddy.angle', 'angle', (['center', '[curr_pos[0], Y]'], {}), '(center, [curr_pos[0], Y])\n', (10029, 10055), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((10079, 10184), 'gcodeBuddy.Arc', 'Arc', ([], {'center': 'center', 'radius': 'radius', 'start_angle': 'start_angle', 'end_angle': 'end_angle', 'direction': 'direction'}), '(center=center, radius=radius, start_angle=start_angle, end_angle=\n end_angle, direction=direction)\n', (10082, 10184), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((10449, 10460), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10457, 10460), False, 'import sys\n'), ((12479, 12506), 'gcodeBuddy.angle', 'angle', (['centers[0]', 'curr_pos'], {}), '(centers[0], curr_pos)\n', (12484, 12506), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((12545, 12570), 'gcodeBuddy.angle', 'angle', (['centers[0]', '(X, Y)'], {}), '(centers[0], (X, Y))\n', (12550, 12570), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((2086, 2097), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2094, 2097), False, 'import sys\n'), ((12064, 12120), 'numpy.sqrt', 'np.sqrt', (['((X - curr_pos[0]) ** 2 + (Y - curr_pos[1]) ** 2)'], {}), '((X - curr_pos[0]) ** 2 + (Y - curr_pos[1]) ** 2)\n', (12071, 12120), True, 'import numpy as np\n'), ((12203, 12259), 'numpy.sqrt', 'np.sqrt', (['((X - curr_pos[0]) ** 2 + (Y - curr_pos[1]) ** 2)'], {}), '((X - curr_pos[0]) ** 2 + (Y - curr_pos[1]) ** 2)\n', (12210, 12259), True, 'import numpy as np\n'), ((12889, 12916), 'gcodeBuddy.angle', 'angle', (['centers[1]', 'curr_pos'], {}), '(centers[1], curr_pos)\n', (12894, 12916), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((12959, 12984), 'gcodeBuddy.angle', 'angle', (['centers[1]', '(X, Y)'], {}), '(centers[1], (X, Y))\n', (12964, 12984), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((13238, 13265), 'gcodeBuddy.angle', 'angle', (['centers[1]', 'curr_pos'], {}), '(centers[1], curr_pos)\n', (13243, 13265), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n'), ((13308, 13333), 'gcodeBuddy.angle', 'angle', (['centers[1]', '(X, Y)'], {}), '(centers[1], (X, Y))\n', (13313, 13333), False, 'from gcodeBuddy import angle, Arc, centers_from_params\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 12:43:03 2019
@author: bressler
"""
import SBCcode as sbc
import numpy as np
import matplotlib.pyplot as plt
dt = []
datadir = '/bluearc/storage/SBC-17-data'
run = '20170710_0'
for k in range(90):
en = k
mu = 4e7
e = sbc.DataHandling.GetSBCEvent.GetEvent(datadir+'/'+run,en)
cgate = e["fastDAQ"]["CAMgate"]
dcam = np.diff(cgate)
p1=e["fastDAQ"]["Piezo1"]
fdt = e["fastDAQ"]["time"]
runreconpath = "/pnfs/coupp/persistent/grid_output/SBC-17/output/%s/"%run
camOnTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] < -0.5]
camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]
pmttracetime = e["PMTtraces"]["t0_sec"][:,0]+e["PMTtraces"]["t0_frac"][:,0]
d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)
pmtalign = d["PMT_trigt0_sec"]+d["PMT_trigt0_frac"]
tracetimes = pmttracetime - pmtalign
for t in tracetimes:
if t > -0.15 and t < 0.1:
lastCamOff = 0
for i in range(len(camOffTimes)):
if t > camOffTimes[i]:
lastCamOff = camOffTimes[i]
elif t < camOffTimes[i]:
break
dt.append(t-lastCamOff)
plt.figure()
d,b,_ = plt.hist(dt,800)
plt.show
print("bin width: "+str((b[1]-b[0])*1e6) + " microseconds")
| [
"matplotlib.pyplot.hist",
"numpy.diff",
"matplotlib.pyplot.figure",
"SBCcode.DataHandling.GetSBCEvent.GetEvent",
"SBCcode.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment"
] | [((1299, 1311), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1309, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1337), 'matplotlib.pyplot.hist', 'plt.hist', (['dt', '(800)'], {}), '(dt, 800)\n', (1328, 1337), True, 'import matplotlib.pyplot as plt\n'), ((308, 370), 'SBCcode.DataHandling.GetSBCEvent.GetEvent', 'sbc.DataHandling.GetSBCEvent.GetEvent', (["(datadir + '/' + run)", 'en'], {}), "(datadir + '/' + run, en)\n", (345, 370), True, 'import SBCcode as sbc\n'), ((413, 427), 'numpy.diff', 'np.diff', (['cgate'], {}), '(cgate)\n', (420, 427), True, 'import numpy as np\n'), ((808, 873), 'SBCcode.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment', 'sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment', (['e'], {}), '(e)\n', (870, 873), True, 'import SBCcode as sbc\n')] |
from neuronmi.simulators.solver.aux import SiteCurrent, surface_normal
from neuronmi.simulators.solver.linear_algebra import LinearSystemSolver
from neuronmi.simulators.solver.transferring import SubMeshTransfer
from neuronmi.simulators.solver.embedding import EmbeddedMesh
from neuronmi.simulators.solver.membrane import MembraneODESolver
from neuronmi.mesh.mesh_utils import load_h5_mesh
import numpy as np
import itertools
from dolfin import *
# Optimizations
parameters['form_compiler']['representation'] = 'uflacs'
parameters['form_compiler']['cpp_optimize'] = True
parameters['form_compiler']['cpp_optimize_flags'] = '-O3 -ffast-math -march=native'
parameters['ghost_mode'] = 'shared_facet'
def neuron_solver(mesh_path, emi_map, problem_parameters, scale_factor=None, verbose=False):
'''
Solver for the Hdiv formulation of the EMI equations
mesh_path: str that is the path to HDF5File containing mesh, ...
emi_map: EMIEntityMap of the mesh
problem_parameters: dict specifying the following
For each neuron (neuron_i) the I_ion, cond[uctivity], Cm, parameters for stim[_*]ulation params
For exterior (external) cond[uctivity], names of insulated exterior boundaries
For probe stimulated_sites (named) and their currents
solver_parameters: time_step, dt (of EMI), dt_ode
'''
mesh_path = str(mesh_path)
mesh, volume_marking_f, facet_marking_f = load_h5_mesh(mesh_path, scale_factor)
solver_parameters = problem_parameters['solver']
neurons_parameters = problem_parameters['neurons']
ext_parameters = problem_parameters['ext']
probe_parameters = problem_parameters['probe']
# TODO use v_rest to initialize intracellular potential initial condition
v_rest = -75
I_ion = Constant(0)
num_neurons = emi_map.num_neurons
# Do we have properties for each one
if isinstance(neurons_parameters, list):
assert len(
neurons_parameters) == num_neurons, "If 'neurons' parameter is a list, the lentgh must be the same as" \
"the number of neurons in the mesh"
else:
neurons_parameters = [neurons_parameters] * num_neurons
# neuron_props = [problem_parameters['neuron_%d' % i] for i in range(num_neurons)]
# ext_props = problem_parameters['external']
cell = mesh.ufl_cell()
# We have 3 spaces S for sigma = -kappa*grad(u) [~electric field]
# U for potential u
# Q for transmebrane potential p;
Sel = FiniteElement('RT', cell, 1)
Vel = FiniteElement('DG', cell, 0)
Qel = FiniteElement('Discontinuous Lagrange Trace', cell, 0)
W = FunctionSpace(mesh, MixedElement([Sel, Vel, Qel]))
print('PDE part will be solved for %d unknowns' % W.dim())
sigma, u, p = TrialFunctions(W)
tau, v, q = TestFunctions(W)
# To integrate over inside and outside of the neurons we define a volume
dx = Measure('dx', domain=mesh, subdomain_data=volume_marking_f)
# We will also need a measure for integratin over the neuron surfaces
dS = Measure('dS', domain=mesh, subdomain_data=facet_marking_f)
# Orient normal so that it is outer normal of neurons
n = FacetNormal(mesh)('+')
# Everything is driven by membrane response. This will be updated
# by the ode solver. The ode solver will work on proper space defined
# only on the neuron. The solution shall then be taked to a facet
# space Q (think 3rd component of W). Finally W mu
Q = FunctionSpace(mesh, Qel) # Everywhere
p0 = Function(Q) # Previous transm potential now 0
# The weak form
# kappa**-1 * (sigma, tau)*dx - (div tau, u)*dx + (tau.n, p)*dS = 0
# -(div sigma, v)*dx = 0
# (sigma.n - Cm/dt*p, q)*dS = (I_ion - Cm/dt*p0)*dS
# Extract volumes tags for volume and neurons
ext_Vtag = emi_map.volume_physical_tags('external')['all']
n_Vtags = [emi_map.volume_physical_tags('neuron_%d' % i)['all'] for i in range(num_neurons)]
a = ((1 / Constant(ext_parameters['cond_ext'])) * inner(sigma, tau) * dx(ext_Vtag)
- inner(div(tau), u) * dx(ext_Vtag)
- inner(div(sigma), v) * dx(ext_Vtag))
# Add neurons
for n_Vtag, n_props in zip(n_Vtags, neurons_parameters):
a += ((1 / Constant(n_props['cond_int'])) * inner(sigma, tau) * dx(n_Vtag) +
- inner(div(tau), u) * dx(n_Vtag)
- inner(div(sigma), v) * dx(n_Vtag))
dt_fem = Constant(solver_parameters['dt_fem'])
# Extract surface tags for surface contribs of the neurons.
# NOTE: here the distanction between surfaces does of neuron does
# not matter
n_Stags = map(list,
[emi_map.surface_physical_tags('neuron_%d' % i).values() for i in range(num_neurons)])
n_Stags = list(n_Stags)
for n_Stag, n_props in zip(n_Stags, neurons_parameters):
a += sum(inner(p('+'), dot(tau('+'), n)) * dS(i) for i in n_Stag)
a += sum(inner(q('+'), dot(sigma('+'), n)) * dS(i) for i in n_Stag)
a += -sum(Constant(n_props['Cm'] / dt_fem) * inner(q('+'), p('+')) * dS(i) for i in n_Stag)
iterator = iter(zip(n_Stags, neurons_parameters))
# Rhs contributions
n_Stag, n_props = next(iterator)
L = sum(inner(q('+'), I_ion - Constant(n_props['Cm'] / dt_fem) * p0('+')) * dS(i)
for i in n_Stag)
for n_Stag, n_props in iterator:
L += sum(inner(q('+'), I_ion - Constant(n_props['Cm'] / dt_fem) * p0('+')) * dS(i)
for i in n_Stag)
# Boundary conditions: grounded surfaces are neumann and we don't do
# anything special there. Insulated sites and the stimated site(s) of
# the probe are Dirichlet. Additional Dirichlet bcs contrain DLT dofs
insulated_tags = [emi_map.surface_physical_tags('box')[name] for name in ext_parameters['insulated_bcs']]
# NOTE: (0, 0, 0) means that the dof is set based on (0, 0, 0).n
bc_insulated = [DirichletBC(W.sub(0), Constant((0, 0, 0)), facet_marking_f, tag)
for tag in insulated_tags]
# The site current are normal*magnitude where the normal is pointing
# into the inside of the probe. That is, wrt box that contains it it
# is outer normal.
inside_point = mesh.coordinates().min(axis=0) # In the exterior of probe
site_currents = []
# Add the stimulated site
if 'probe' in emi_map.surfaces:
probe_surfaces = emi_map.surface_physical_tags('probe') # Dict
stim_sites = [] # names
# Stimulated sites must be a list of contact_names
if 'stimulated_sites' in probe_parameters.keys():
if probe_parameters['stimulated_sites'] is not None:
if len(probe_parameters['stimulated_sites']) > 0:
site_currents = probe_parameters['current']
if isinstance(site_currents, (int, float)):
site_currents = [site_currents] * len(probe_parameters['stimulated_sites'])
else:
assert len(site_currents) == len(probe_parameters['stimulated_sites']), "Length of probe " \
"'currents' and " \
"'stimulated_sites' " \
"should correspond"
for name in probe_parameters['stimulated_sites']:
tag = probe_surfaces[name]
stim_sites.append(tag)
# Construct normal*I expressions for every site
site_currents = [SiteCurrent(I=current, n=surface_normal(site, facet_marking_f, inside_point),
degree=1)
for site, current in zip(stim_sites, site_currents)]
# Now the bcs
bc_stimulated = [DirichletBC(W.sub(0), current, facet_marking_f, site)
for site, current in zip(stim_sites, site_currents)]
# From the system they are the same
bc_insulated.extend(bc_stimulated)
# Sites of the probe that are not stimulated are insulated
insulated_probe_sites = set(probe_surfaces.values()) - set(stim_sites)
# Enforce on PDE
bc_insulated.extend(DirichletBC(W.sub(0), Constant((0, 0, 0)), facet_marking_f, site)
for site in insulated_probe_sites)
all_neuron_surfaces = set(sum(n_Stags, []))
not_neuron_surfaces = set(facet_marking_f.array()) - all_neuron_surfaces
# A specific of the setup is that the facet space is too large. It
# should be defined only on the neuron surfaces but it is defined
# everywhere instead. So the not neuron part should be set to 0
bc_constrained = [DirichletBC(W.sub(2), Constant(0), facet_marking_f, tag) for tag in not_neuron_surfaces]
assembler = SystemAssembler(a, L, bcs=bc_insulated + bc_constrained)
A, b = Matrix(), Vector()
assembler.assemble(A)
assembler.assemble(b)
# Not singular
# import numpy as np
# print np.min(np.abs(np.linalg.eigvalsh(A.array())))
la_solver = LinearSystemSolver(A, W, solver_parameters)
dt_ode = solver_parameters['dt_ode']
assert dt_ode <= dt_fem(0)
# Setup neuron
fem_ode_sync = int(dt_fem(0) / dt_ode)
# Mesh of all neurons; individual are its submesh
neuron_surf_mesh = EmbeddedMesh(facet_marking_f, list(all_neuron_surfaces))
neurons_subdomains = neuron_surf_mesh.marking_function
# It is on this mesh that ode will update transmemebrane current and
# talk with pde
Q_neuron = FunctionSpace(neuron_surf_mesh, 'DG', 0) # P0 on surface <-> DLT on facets
transfer = SubMeshTransfer(mesh, neuron_surf_mesh)
# The ODE solver talks to the worlk via chain: Q_neuron <-> Q <- W
p0_neuron = Function(Q_neuron)
# Between DLT mesh and submesh space
assign_toQ_neuron_fromQ = transfer.compute_map(Q_neuron, Q, strict=False)
assign_toQ_fromQ_neuron = transfer.compute_map(Q, Q_neuron, strict=False)
# From component to DLT on mesh
toQ_fromW2 = FunctionAssigner(Q, W.sub(2))
toQin_fromQns, toQn_fromQins, p0is = [], [], []
# p0i \in Qi <-> Q_neuron \ni p0_neuron
neuron_solutions = []
for i, neuron_surfaces in enumerate(n_Stags):
# Pick the nueuron from neuron collection
ni_mesh = EmbeddedMesh(neurons_subdomains, neuron_surfaces)
ni_subdomains = ni_mesh.marking_function
map_ = emi_map.surface_physical_tags('neuron_%d' % i)
soma = tuple(map_[k] for k in map_ if 'soma' in k)
dendrite = tuple(map_[k] for k in map_ if 'dend' in k)
axon = tuple(map_[k] for k in map_ if 'axon' in k)
ode_solver = MembraneODESolver(ni_subdomains,
soma=soma, axon=axon, dendrite=dendrite,
problem_parameters=neurons_parameters[i],
scale_factor=scale_factor)
sim_duration = solver_parameters['sim_duration']
assert sim_duration > 0.0
interval = (0.0, sim_duration)
# NOTE: a generator; nothing is computed so far
ode_solutions = ode_solver.solve(interval, dt_ode) # Potentials only
neuron_solutions.append(ode_solutions)
transfer = SubMeshTransfer(neuron_surf_mesh, ni_mesh)
# Communication between neuron and the collection
Qi_neuron = ode_solver.V
p0i_neuron = Function(Qi_neuron)
# Between DLT mesh and submesh space
assign_toQin_fromQn = transfer.compute_map(Qi_neuron, Q_neuron, strict=False)
assign_toQn_fromQin = transfer.compute_map(Q_neuron, Qi_neuron, strict=False)
toQin_fromQns.append(assign_toQin_fromQn)
toQn_fromQins.append(assign_toQn_fromQin)
p0is.append(p0i_neuron)
V = FunctionSpace(mesh, Vel)
# Finally for postprocessing we return the current time, potential
# and membrane current
u_out = Function(V)
u_out_values = u_out.vector().get_local()
array = volume_marking_f.array()
# Set potential inside neurons ...
for n_Vtag in n_Vtags:
# Rest if inside else the value that was there. So this is like or
u_out_values[:] = np.where(array == n_Vtag, v_rest, u_out_values)
u_out.vector().set_local(u_out_values)
u_out.vector().apply('insert')
w = Function(W)
FunctionAssigner(W.sub(1), V).assign(w.sub(1), u_out)
# Keep the assigner around as we'll go out in the loop
toV_fromW1 = FunctionAssigner(V, W.sub(1))
toV_fromW1.assign(u_out, w.sub(1))
# One value per cell of the neuron surface mesh
current_out, current_aux = map(Function, (Q_neuron, Q))
w_aux = Function(W)
current_form = sum(1. / FacetArea(mesh)('+') * inner(dot(w.sub(0)('+'), n), q('+')) * dS(i)
for i in all_neuron_surfaces)
current_form += inner(Constant(0), v) * dx(ext_Vtag) # Fancy zero for orientation
# The idea here is that assembling the current form gives the right
# dof values to assign to the DLT space (evals at cell midpoints).
# Then we reduce as normal to the subcomponent and submesh space
w_aux.vector()[:] = assemble(current_form)
toQ_fromW2.assign(current_aux, w_aux.sub(2))
assign_toQ_neuron_fromQ(current_out, current_aux)
# To get initial state
yield 0, u_out, current_out, p0_neuron
neuron_solutions = itertools.izip(*neuron_solutions)
step_count = 0
for odes in neuron_solutions:
step_count += 1
(t0, t1) = odes[0][0]
print('Time is (%g, %g)' % (t0, t1))
if step_count == fem_ode_sync:
step_count = 0
# From individual neuron to collection
for i in range(num_neurons):
# FIXME: does this override?
toQn_fromQins[i](p0_neuron, odes[i][1])
# Upscale p0_neuron->p0
assign_toQ_fromQ_neuron(p0, p0_neuron)
# We could have changing in time simulation
for I in site_currents:
if 't' in I:
I.t = float(t1)
# Assemble right-hand side (changes with time, so need to reassemble)
assembler.assemble(b) # Also applies bcs
# New (sigma, u, p) ...
if verbose:
print('\tSolving linear system of size %d' % A.size(0))
la_solver.solve(w.vector(), b)
# Update u_out and current_out for output
toV_fromW1.assign(u_out, w.sub(1))
# NOTE: the current_form is points to w which has been updated by solve
w_aux.vector()[:] = assemble(current_form)
toQ_fromW2.assign(current_aux, w_aux.sub(2))
assign_toQ_neuron_fromQ(current_out, current_aux)
# Now transfer the new transm potential down to ode ...
toQ_fromW2.assign(p0, w.sub(2)) # Compt to Q
assign_toQ_neuron_fromQ(p0_neuron, p0) # To membrane space
yield t1, u_out, current_out, p0_neuron
for i in range(num_neurons):
toQin_fromQns[i](p0is[i], p0_neuron)
odes[i][1].assign(p0is[i])
| [
"numpy.where",
"neuronmi.simulators.solver.transferring.SubMeshTransfer",
"neuronmi.simulators.solver.embedding.EmbeddedMesh",
"neuronmi.mesh.mesh_utils.load_h5_mesh",
"itertools.izip",
"neuronmi.simulators.solver.aux.surface_normal",
"neuronmi.simulators.solver.linear_algebra.LinearSystemSolver",
"ne... | [((1413, 1450), 'neuronmi.mesh.mesh_utils.load_h5_mesh', 'load_h5_mesh', (['mesh_path', 'scale_factor'], {}), '(mesh_path, scale_factor)\n', (1425, 1450), False, 'from neuronmi.mesh.mesh_utils import load_h5_mesh\n'), ((9421, 9464), 'neuronmi.simulators.solver.linear_algebra.LinearSystemSolver', 'LinearSystemSolver', (['A', 'W', 'solver_parameters'], {}), '(A, W, solver_parameters)\n', (9439, 9464), False, 'from neuronmi.simulators.solver.linear_algebra import LinearSystemSolver\n'), ((9994, 10033), 'neuronmi.simulators.solver.transferring.SubMeshTransfer', 'SubMeshTransfer', (['mesh', 'neuron_surf_mesh'], {}), '(mesh, neuron_surf_mesh)\n', (10009, 10033), False, 'from neuronmi.simulators.solver.transferring import SubMeshTransfer\n'), ((13738, 13771), 'itertools.izip', 'itertools.izip', (['*neuron_solutions'], {}), '(*neuron_solutions)\n', (13752, 13771), False, 'import itertools\n'), ((10661, 10710), 'neuronmi.simulators.solver.embedding.EmbeddedMesh', 'EmbeddedMesh', (['neurons_subdomains', 'neuron_surfaces'], {}), '(neurons_subdomains, neuron_surfaces)\n', (10673, 10710), False, 'from neuronmi.simulators.solver.embedding import EmbeddedMesh\n'), ((11027, 11173), 'neuronmi.simulators.solver.membrane.MembraneODESolver', 'MembraneODESolver', (['ni_subdomains'], {'soma': 'soma', 'axon': 'axon', 'dendrite': 'dendrite', 'problem_parameters': 'neurons_parameters[i]', 'scale_factor': 'scale_factor'}), '(ni_subdomains, soma=soma, axon=axon, dendrite=dendrite,\n problem_parameters=neurons_parameters[i], scale_factor=scale_factor)\n', (11044, 11173), False, 'from neuronmi.simulators.solver.membrane import MembraneODESolver\n'), ((11620, 11662), 'neuronmi.simulators.solver.transferring.SubMeshTransfer', 'SubMeshTransfer', (['neuron_surf_mesh', 'ni_mesh'], {}), '(neuron_surf_mesh, ni_mesh)\n', (11635, 11662), False, 'from neuronmi.simulators.solver.transferring import SubMeshTransfer\n'), ((12557, 12604), 'numpy.where', 'np.where', (['(array == n_Vtag)', 'v_rest', 'u_out_values'], {}), '(array == n_Vtag, v_rest, u_out_values)\n', (12565, 12604), True, 'import numpy as np\n'), ((7842, 7893), 'neuronmi.simulators.solver.aux.surface_normal', 'surface_normal', (['site', 'facet_marking_f', 'inside_point'], {}), '(site, facet_marking_f, inside_point)\n', (7856, 7893), False, 'from neuronmi.simulators.solver.aux import SiteCurrent, surface_normal\n')] |
import properties
import numpy as np
import matplotlib.pyplot as plt
import warnings
import os
import scipy.sparse as sp
from ..data_misfit import BaseDataMisfit
from ..objective_function import ComboObjectiveFunction
from ..maps import IdentityMap, Wires
from ..regularization import (
BaseComboRegularization,
BaseRegularization,
SimpleSmall,
Small,
SparseSmall,
Simple,
Tikhonov,
Sparse,
PGIsmallness,
PGIwithNonlinearRelationshipsSmallness,
PGI,
SmoothDeriv,
SimpleSmoothDeriv,
SparseDeriv,
PGIwithRelationships,
)
from ..utils import (
mkvc,
setKwargs,
sdiag,
diagEst,
spherical2cartesian,
cartesian2spherical,
Zero,
eigenvalue_by_power_iteration,
)
from ..utils.code_utils import deprecate_property
class InversionDirective(properties.HasProperties):
"""InversionDirective"""
_REGISTRY = {}
debug = False #: Print debugging information
_regPair = [BaseComboRegularization, BaseRegularization, ComboObjectiveFunction]
_dmisfitPair = [BaseDataMisfit, ComboObjectiveFunction]
def __init__(self, **kwargs):
setKwargs(self, **kwargs)
@property
def inversion(self):
"""This is the inversion of the InversionDirective instance."""
return getattr(self, "_inversion", None)
@inversion.setter
def inversion(self, i):
if getattr(self, "_inversion", None) is not None:
warnings.warn(
"InversionDirective {0!s} has switched to a new inversion.".format(
self.__class__.__name__
)
)
self._inversion = i
@property
def invProb(self):
return self.inversion.invProb
@property
def opt(self):
return self.invProb.opt
@property
def reg(self):
if getattr(self, "_reg", None) is None:
self.reg = self.invProb.reg # go through the setter
return self._reg
@reg.setter
def reg(self, value):
assert any(
[isinstance(value, regtype) for regtype in self._regPair]
), "Regularization must be in {}, not {}".format(self._regPair, type(value))
if isinstance(value, BaseComboRegularization):
value = 1 * value # turn it into a combo objective function
self._reg = value
@property
def dmisfit(self):
if getattr(self, "_dmisfit", None) is None:
self.dmisfit = self.invProb.dmisfit # go through the setter
return self._dmisfit
@dmisfit.setter
def dmisfit(self, value):
assert any(
[isinstance(value, dmisfittype) for dmisfittype in self._dmisfitPair]
), "Misfit must be in {}, not {}".format(self._dmisfitPair, type(value))
if not isinstance(value, ComboObjectiveFunction):
value = 1 * value # turn it into a combo objective function
self._dmisfit = value
@property
def survey(self):
"""
Assuming that dmisfit is always a ComboObjectiveFunction,
return a list of surveys for each dmisfit [survey1, survey2, ... ]
"""
return [objfcts.simulation.survey for objfcts in self.dmisfit.objfcts]
@property
def simulation(self):
"""
Assuming that dmisfit is always a ComboObjectiveFunction,
return a list of problems for each dmisfit [prob1, prob2, ...]
"""
return [objfcts.simulation for objfcts in self.dmisfit.objfcts]
prob = deprecate_property(
simulation,
"prob",
new_name="simulation",
removal_version="0.16.0",
future_warn=True,
)
def initialize(self):
pass
def endIter(self):
pass
def finish(self):
pass
def validate(self, directiveList=None):
return True
class DirectiveList(object):
dList = None #: The list of Directives
def __init__(self, *directives, **kwargs):
self.dList = []
for d in directives:
assert isinstance(
d, InversionDirective
), "All directives must be InversionDirectives not {}".format(type(d))
self.dList.append(d)
setKwargs(self, **kwargs)
@property
def debug(self):
return getattr(self, "_debug", False)
@debug.setter
def debug(self, value):
for d in self.dList:
d.debug = value
self._debug = value
@property
def inversion(self):
"""This is the inversion of the InversionDirective instance."""
return getattr(self, "_inversion", None)
@inversion.setter
def inversion(self, i):
if self.inversion is i:
return
if getattr(self, "_inversion", None) is not None:
warnings.warn(
"{0!s} has switched to a new inversion.".format(self.__class__.__name__)
)
for d in self.dList:
d.inversion = i
self._inversion = i
def call(self, ruleType):
if self.dList is None:
if self.debug:
print("DirectiveList is None, no directives to call!")
return
directives = ["initialize", "endIter", "finish"]
assert ruleType in directives, 'Directive type must be in ["{0!s}"]'.format(
'", "'.join(directives)
)
for r in self.dList:
getattr(r, ruleType)()
def validate(self):
[directive.validate(self) for directive in self.dList]
return True
class BetaEstimate_ByEig(InversionDirective):
"""
Estimate the trade-off parameter beta between the data misfit(s) and the
regularization as a multiple of the ratio between the highest eigenvalue of the
data misfit term and the highest eigenvalue of the regularization.
The highest eigenvalues are estimated through power iterations and Rayleigh quotient.
"""
beta0_ratio = 1.0 #: the estimated ratio is multiplied by this to obtain beta
n_pw_iter = 4 #: number of power iterations for estimation.
seed = None #: Random seed for the directive
def initialize(self):
"""
The initial beta is calculated by comparing the estimated
eigenvalues of JtJ and WtW.
To estimate the eigenvector of **A**, we will use one iteration
of the *Power Method*:
.. math::
\mathbf{x_1 = A x_0}
Given this (very course) approximation of the eigenvector, we can
use the *Rayleigh quotient* to approximate the largest eigenvalue.
.. math::
\lambda_0 = \\frac{\mathbf{x^\\top A x}}{\mathbf{x^\\top x}}
We will approximate the largest eigenvalue for both JtJ and WtW,
and use some ratio of the quotient to estimate beta0.
.. math::
\\beta_0 = \gamma \\frac{\mathbf{x^\\top J^\\top J x}}{\mathbf{x^\\top W^\\top W x}}
:rtype: float
:return: beta0
"""
if self.seed is not None:
np.random.seed(self.seed)
if self.debug:
print("Calculating the beta0 parameter.")
m = self.invProb.model
dm_eigenvalue = eigenvalue_by_power_iteration(
self.dmisfit, m, n_pw_iter=self.n_pw_iter,
)
reg_eigenvalue = eigenvalue_by_power_iteration(
self.reg, m, n_pw_iter=self.n_pw_iter,
)
self.ratio = dm_eigenvalue / reg_eigenvalue
self.beta0 = self.beta0_ratio * self.ratio
self.invProb.beta = self.beta0
class BetaSchedule(InversionDirective):
"""BetaSchedule"""
coolingFactor = 8.0
coolingRate = 3
def endIter(self):
if self.opt.iter > 0 and self.opt.iter % self.coolingRate == 0:
if self.debug:
print(
"BetaSchedule is cooling Beta. Iteration: {0:d}".format(
self.opt.iter
)
)
self.invProb.beta /= self.coolingFactor
class AlphasSmoothEstimate_ByEig(InversionDirective):
"""
Estimate the alphas multipliers for the smoothness terms of the regularization
as a multiple of the ratio between the highest eigenvalue of the
smallness term and the highest eigenvalue of each smoothness term of the regularization.
The highest eigenvalue are estimated through power iterations and Rayleigh quotient.
"""
alpha0_ratio = (
1.0 #: the estimated Alpha_smooth is multiplied by this ratio (int or array)
)
n_pw_iter = 4 #: number of power iterations for the estimate
verbose = False #: print the estimated alphas at the initialization
debug = False #: print the current process
seed = None # random seed for the directive
def initialize(self):
""""""
if self.seed is not None:
np.random.seed(self.seed)
if getattr(self.reg.objfcts[0], "objfcts", None) is not None:
nbr = np.sum(
[len(self.reg.objfcts[i].objfcts) for i in range(len(self.reg.objfcts))]
)
# Find the smallness terms in a two-levels combo-regularization.
smallness = []
alpha0 = []
for i, regobjcts in enumerate(self.reg.objfcts):
for j, regpart in enumerate(regobjcts.objfcts):
alpha0 += [self.reg.multipliers[i] * regobjcts.multipliers[j]]
smallness += [
[
i,
j,
isinstance(
regpart,
(
SimpleSmall,
Small,
SparseSmall,
PGIsmallness,
PGIwithNonlinearRelationshipsSmallness,
),
),
]
]
smallness = np.r_[smallness]
# Select the first, only considered, smallness term.
smallness = smallness[smallness[:, 2] == 1][:, :2][0]
# Find the smoothness terms in a two-levels combo-regularization.
smoothness = []
for i, regobjcts in enumerate(self.reg.objfcts):
for j, regpart in enumerate(regobjcts.objfcts):
smoothness += [
[
i,
j,
isinstance(
regpart, (SmoothDeriv, SimpleSmoothDeriv, SparseDeriv)
),
]
]
smoothness = np.r_[smoothness]
mode = 1
else:
nbr = len(self.reg.objfcts)
alpha0 = self.reg.multipliers
smoothness = np.r_[
[
isinstance(regpart, (SmoothDeriv, SimpleSmoothDeriv, SparseDeriv))
for regpart in self.reg.objfcts
]
]
mode = 2
if not isinstance(self.alpha0_ratio, np.ndarray):
self.alpha0_ratio = self.alpha0_ratio * np.ones(nbr)
if self.debug:
print("Calculating the Alpha0 parameter.")
m = self.invProb.model
if mode == 2:
smallness_eigenvalue = eigenvalue_by_power_iteration(
self.reg.objfcts[0], m, n_pw_iter=self.n_pw_iter,
)
for i in range(nbr):
if smoothness[i]:
smooth_i_eigenvalue = eigenvalue_by_power_iteration(
self.reg.objfcts[i], m, n_pw_iter=self.n_pw_iter,
)
ratio = smallness_eigenvalue / smooth_i_eigenvalue
alpha0[i] *= self.alpha0_ratio[i] * ratio
mtype = self.reg.objfcts[i]._multiplier_pair
setattr(self.reg, mtype, alpha0[i])
elif mode == 1:
smallness_eigenvalue = eigenvalue_by_power_iteration(
self.reg.objfcts[smallness[0]].objfcts[smallness[1]],
m,
n_pw_iter=self.n_pw_iter,
)
for i in range(nbr):
ratio = []
if smoothness[i, 2]:
idx = smoothness[i, :2]
smooth_i_eigenvalue = eigenvalue_by_power_iteration(
self.reg.objfcts[idx[0]].objfcts[idx[1]],
m,
n_pw_iter=self.n_pw_iter,
)
ratio = np.divide(
smallness_eigenvalue,
smooth_i_eigenvalue,
out=np.zeros_like(smallness_eigenvalue),
where=smooth_i_eigenvalue != 0,
)
alpha0[i] *= self.alpha0_ratio[i] * ratio
mtype = self.reg.objfcts[idx[0]].objfcts[idx[1]]._multiplier_pair
setattr(self.reg.objfcts[idx[0]], mtype, alpha0[i])
if self.verbose:
print("Alpha scales: ", self.reg.multipliers)
if mode == 1:
for objf in self.reg.objfcts:
print("Alpha scales: ", objf.multipliers)
class ScalingMultipleDataMisfits_ByEig(InversionDirective):
"""
For multiple data misfits only: multiply each data misfit term
by the inverse of its highest eigenvalue and then
normalize the sum of the data misfit multipliers to one.
The highest eigenvalue are estimated through power iterations and Rayleigh quotient.
"""
n_pw_iter = 4 #: number of power iterations for the estimate
chi0_ratio = None #: The initial scaling ratio (default is data misfit multipliers)
verbose = False #: print the estimated data misfits multipliers
debug = False #: print the current process
seed = None # random seed for the directive
def initialize(self):
""""""
if self.seed is not None:
np.random.seed(self.seed)
if self.debug:
print("Calculating the scaling parameter.")
if (
getattr(self.dmisfit, "objfcts", None) is None
or len(self.dmisfit.objfcts) == 1
):
raise TypeError(
"ScalingMultipleDataMisfits_ByEig only applies to joint inversion"
)
ndm = len(self.dmisfit.objfcts)
if self.chi0_ratio is not None:
self.chi0_ratio = self.chi0_ratio * np.ones(ndm)
else:
self.chi0_ratio = self.dmisfit.multipliers
m = self.invProb.model
dm_eigenvalue_list = []
for j, dm in enumerate(self.dmisfit.objfcts):
dm_eigenvalue_list += [eigenvalue_by_power_iteration(dm, m)]
self.chi0 = self.chi0_ratio / np.r_[dm_eigenvalue_list]
self.chi0 = self.chi0 / np.sum(self.chi0)
self.dmisfit.multipliers = self.chi0
if self.verbose:
print("Scale Multipliers: ", self.dmisfit.multipliers)
class JointScalingSchedule(InversionDirective):
"""
For multiple data misfits only: rebalance each data misfit term
during the inversion when some datasets are fit, and others not
using the ratios of current misfits and their respective target.
It implements the strategy described in https://doi.org/10.1093/gji/ggaa378.
"""
verbose = False
warmingFactor = 1.0
mode = 1
chimax = 1e10
chimin = 1e-10
update_rate = 1
def initialize(self):
if (
getattr(self.dmisfit, "objfcts", None) is None
or len(self.dmisfit.objfcts) == 1
):
raise TypeError("JointScalingSchedule only applies to joint inversion")
targetclass = np.r_[
[
isinstance(dirpart, MultiTargetMisfits)
for dirpart in self.inversion.directiveList.dList
]
]
if ~np.any(targetclass):
self.DMtarget = None
else:
self.targetclass = np.where(targetclass)[0][-1]
self.DMtarget = self.inversion.directiveList.dList[
self.targetclass
].DMtarget
if self.verbose:
print("Initial data misfit scales: ", self.dmisfit.multipliers)
def endIter(self):
self.dmlist = self.inversion.directiveList.dList[self.targetclass].dmlist
if np.any(self.dmlist < self.DMtarget):
self.mode = 2
else:
self.mode = 1
if self.opt.iter > 0 and self.opt.iter % self.update_rate == 0:
if self.mode == 2:
if np.all(np.r_[self.dmisfit.multipliers] > self.chimin) and np.all(
np.r_[self.dmisfit.multipliers] < self.chimax
):
indx = self.dmlist > self.DMtarget
if np.any(indx):
multipliers = self.warmingFactor * np.median(
self.DMtarget[~indx] / self.dmlist[~indx]
)
if np.sum(indx) == 1:
indx = np.where(indx)[0][0]
self.dmisfit.multipliers[indx] *= multipliers
self.dmisfit.multipliers /= np.sum(self.dmisfit.multipliers)
if self.verbose:
print("Updating scaling for data misfits by ", multipliers)
print("New scales:", self.dmisfit.multipliers)
class TargetMisfit(InversionDirective):
"""
... note:: Currently this target misfit is not set up for joint inversion.
Check out MultiTargetMisfits
"""
chifact = 1.0
phi_d_star = None
@property
def target(self):
if getattr(self, "_target", None) is None:
# the factor of 0.5 is because we do phid = 0.5*||dpred - dobs||^2
if self.phi_d_star is None:
nD = 0
for survey in self.survey:
nD += survey.nD
self.phi_d_star = 0.5 * nD
self._target = self.chifact * self.phi_d_star
return self._target
@target.setter
def target(self, val):
self._target = val
def endIter(self):
if self.invProb.phi_d < self.target:
self.opt.stopNextIteration = True
self.print_final_misfit()
def print_final_misfit(self):
if self.opt.print_type == "ubc":
self.opt.print_target = (
">> Target misfit: %.1f (# of data) is achieved"
) % (self.target * self.invProb.opt.factor)
class MultiTargetMisfits(InversionDirective):
WeightsInTarget = 0
verbose = False
# Chi factor for Geophsyical Data Misfit
chifact = 1.0
phi_d_star = None
# Chifact for Clustering/Smallness
TriggerSmall = True
chiSmall = 1.0
phi_ms_star = None
# Tolerance for parameters difference with their priors
TriggerTheta = False # deactivated by default
ToleranceTheta = 1.0
distance_norm = np.inf
AllStop = False
DM = False # geophysical fit condition
CL = False # petrophysical fit condition
DP = False # parameters difference with their priors condition
def initialize(self):
self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]]
if getattr(self.invProb.reg.objfcts[0], "objfcts", None) is not None:
smallness = np.r_[
[
(
np.r_[
i,
j,
(
isinstance(
regpart,
PGIwithNonlinearRelationshipsSmallness,
)
or isinstance(regpart, PGIsmallness)
),
]
)
for i, regobjcts in enumerate(self.invProb.reg.objfcts)
for j, regpart in enumerate(regobjcts.objfcts)
]
]
if smallness[smallness[:, 2] == 1][:, :2].size == 0:
warnings.warn(
"There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)"
)
self.smallness = -1
self.pgi_smallness = None
else:
self.smallness = smallness[smallness[:, 2] == 1][:, :2][0]
self.pgi_smallness = self.invProb.reg.objfcts[
self.smallness[0]
].objfcts[self.smallness[1]]
if self.debug:
print(
type(
self.invProb.reg.objfcts[self.smallness[0]].objfcts[
self.smallness[1]
]
)
)
self._regmode = 1
else:
smallness = np.r_[
[
(
np.r_[
j,
(
isinstance(
regpart,
PGIwithNonlinearRelationshipsSmallness,
)
or isinstance(regpart, PGIsmallness)
),
]
)
for j, regpart in enumerate(self.invProb.reg.objfcts)
]
]
if smallness[smallness[:, 1] == 1][:, :1].size == 0:
if self.TriggerSmall:
warnings.warn(
"There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)."
)
self.TriggerSmall = False
self.smallness = -1
else:
self.smallness = smallness[smallness[:, 1] == 1][:, :1][0]
self.pgi_smallness = self.invProb.reg.objfcts[self.smallness[0]]
if self.debug:
print(type(self.invProb.reg.objfcts[self.smallness[0]]))
self._regmode = 2
@property
def DMtarget(self):
if getattr(self, "_DMtarget", None) is None:
# the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2
if self.phi_d_star is None:
# Check if it is a ComboObjective
if isinstance(self.dmisfit, ComboObjectiveFunction):
self.phi_d_star = np.r_[[0.5 * survey.nD for survey in self.survey]]
else:
self.phi_d_star = np.r_[[0.5 * self.survey.nD]]
self._DMtarget = self.chifact * self.phi_d_star
return self._DMtarget
@DMtarget.setter
def DMtarget(self, val):
self._DMtarget = val
@property
def CLtarget(self):
if not getattr(self.pgi_smallness, "approx_eval", True):
# if nonlinear prior, compute targer numerically at each GMM update
samples, _ = self.pgi_smallness.gmm.sample(
len(self.pgi_smallness.gmm.cell_volumes)
)
self.phi_ms_star = self.pgi_smallness(
mkvc(samples), externalW=self.WeightsInTarget
)
self._CLtarget = self.chiSmall * self.phi_ms_star
elif getattr(self, "_CLtarget", None) is None:
# the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2
if self.phi_ms_star is None:
# Expected value is number of active cells * number of physical
# properties
self.phi_ms_star = 0.5 * len(self.invProb.model)
self._CLtarget = self.chiSmall * self.phi_ms_star
return self._CLtarget
@property
def CLnormalizedConstant(self):
if ~self.WeightsInTarget:
return 1.0
elif np.any(self.smallness == -1):
return np.sum(
sp.csr_matrix.diagonal(self.invProb.reg.objfcts[0].W) ** 2.0
) / len(self.invProb.model)
else:
return np.sum(sp.csr_matrix.diagonal(self.pgi_smallness.W) ** 2.0) / len(
self.invProb.model
)
@CLtarget.setter
def CLtarget(self, val):
self._CLtarget = val
def phims(self):
if np.any(self.smallness == -1):
return self.invProb.reg.objfcts[0](self.invProb.model)
else:
return (
self.pgi_smallness(self.invProb.model, externalW=self.WeightsInTarget,)
/ self.CLnormalizedConstant
)
def ThetaTarget(self):
maxdiff = 0.0
for i in range(self.invProb.reg.gmm.n_components):
meandiff = np.linalg.norm(
(self.invProb.reg.gmm.means_[i] - self.invProb.reg.gmmref.means_[i])
/ self.invProb.reg.gmmref.means_[i],
ord=self.distance_norm,
)
maxdiff = np.maximum(maxdiff, meandiff)
if (
self.invProb.reg.gmm.covariance_type == "full"
or self.invProb.reg.gmm.covariance_type == "spherical"
):
covdiff = np.linalg.norm(
(
self.invProb.reg.gmm.covariances_[i]
- self.invProb.reg.gmmref.covariances_[i]
)
/ self.invProb.reg.gmmref.covariances_[i],
ord=self.distance_norm,
)
else:
covdiff = np.linalg.norm(
(
self.invProb.reg.gmm.covariances_
- self.invProb.reg.gmmref.covariances_
)
/ self.invProb.reg.gmmref.covariances_,
ord=self.distance_norm,
)
maxdiff = np.maximum(maxdiff, covdiff)
pidiff = np.linalg.norm(
[
(
self.invProb.reg.gmm.weights_[i]
- self.invProb.reg.gmmref.weights_[i]
)
/ self.invProb.reg.gmmref.weights_[i]
],
ord=self.distance_norm,
)
maxdiff = np.maximum(maxdiff, pidiff)
return maxdiff
def endIter(self):
self.AllStop = False
self.DM = False
self.CL = True
self.DP = True
self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]]
self.targetlist = np.r_[
[dm < tgt for dm, tgt in zip(self.dmlist, self.DMtarget)]
]
if np.all(self.targetlist):
self.DM = True
if self.TriggerSmall and np.any(self.smallness != -1):
if self.phims() > self.CLtarget:
self.CL = False
if self.TriggerTheta:
if self.ThetaTarget() > self.ToleranceTheta:
self.DP = False
self.AllStop = self.DM and self.CL and self.DP
if self.verbose:
message = "geophys. misfits: " + "; ".join(
map(
str,
[
"{0} (target {1} [{2}])".format(val, tgt, cond)
for val, tgt, cond in zip(
np.round(self.dmlist, 1),
np.round(self.DMtarget, 1),
self.targetlist,
)
],
)
)
if self.TriggerSmall:
message += " | smallness misfit: {0:.1f} (target: {1:.1f} [{2}])".format(
self.phims(), self.CLtarget, self.CL
)
if self.TriggerTheta:
message += " | GMM parameters within tolerance: {}".format(self.DP)
print(message)
if self.AllStop:
self.opt.stopNextIteration = True
if self.verbose:
print("All targets have been reached")
class SaveEveryIteration(InversionDirective):
"""SaveEveryIteration
This directive saves an array at each iteration. The default
directory is the current directory and the models are saved as
``InversionModel-YYYY-MM-DD-HH-MM-iter.npy``
"""
directory = properties.String("directory to save results in", default=".")
name = properties.String(
"root of the filename to be saved", default="InversionModel"
)
@properties.validator("directory")
def _ensure_abspath(self, change):
val = change["value"]
fullpath = os.path.abspath(os.path.expanduser(val))
if not os.path.isdir(fullpath):
os.mkdir(fullpath)
@property
def fileName(self):
if getattr(self, "_fileName", None) is None:
from datetime import datetime
self._fileName = "{0!s}-{1!s}".format(
self.name, datetime.now().strftime("%Y-%m-%d-%H-%M")
)
return self._fileName
class SaveModelEveryIteration(SaveEveryIteration):
"""SaveModelEveryIteration
This directive saves the model as a numpy array at each iteration. The
default directory is the current directoy and the models are saved as
``InversionModel-YYYY-MM-DD-HH-MM-iter.npy``
"""
def initialize(self):
print(
"SimPEG.SaveModelEveryIteration will save your models as: "
"'{0!s}###-{1!s}.npy'".format(self.directory + os.path.sep, self.fileName)
)
def endIter(self):
np.save(
"{0!s}{1:03d}-{2!s}".format(
self.directory + os.path.sep, self.opt.iter, self.fileName
),
self.opt.xc,
)
class SaveOutputEveryIteration(SaveEveryIteration):
"""SaveOutputEveryIteration"""
header = None
save_txt = True
beta = None
phi_d = None
phi_m = None
phi_m_small = None
phi_m_smooth_x = None
phi_m_smooth_y = None
phi_m_smooth_z = None
phi = None
def initialize(self):
if self.save_txt is True:
print(
"SimPEG.SaveOutputEveryIteration will save your inversion "
"progress as: '###-{0!s}.txt'".format(self.fileName)
)
f = open(self.fileName + ".txt", "w")
self.header = " # beta phi_d phi_m phi_m_small phi_m_smoomth_x phi_m_smoomth_y phi_m_smoomth_z phi\n"
f.write(self.header)
f.close()
# Create a list of each
self.beta = []
self.phi_d = []
self.phi_m = []
self.phi_m_small = []
self.phi_m_smooth_x = []
self.phi_m_smooth_y = []
self.phi_m_smooth_z = []
self.phi = []
def endIter(self):
phi_s, phi_x, phi_y, phi_z = 0, 0, 0, 0
if getattr(self.reg.objfcts[0], "objfcts", None) is not None:
for reg in self.reg.objfcts:
phi_s += reg.objfcts[0](self.invProb.model) * reg.alpha_s
phi_x += reg.objfcts[1](self.invProb.model) * reg.alpha_x
if reg.regmesh.dim == 2:
phi_y += reg.objfcts[2](self.invProb.model) * reg.alpha_y
elif reg.regmesh.dim == 3:
phi_y += reg.objfcts[2](self.invProb.model) * reg.alpha_y
phi_z += reg.objfcts[3](self.invProb.model) * reg.alpha_z
elif getattr(self.reg.objfcts[0], "objfcts", None) is None:
phi_s += self.reg.objfcts[0](self.invProb.model) * self.reg.alpha_s
phi_x += self.reg.objfcts[1](self.invProb.model) * self.reg.alpha_x
if self.reg.regmesh.dim == 2:
phi_y += self.reg.objfcts[2](self.invProb.model) * self.reg.alpha_y
elif self.reg.regmesh.dim == 3:
phi_y += self.reg.objfcts[2](self.invProb.model) * self.reg.alpha_y
phi_z += self.reg.objfcts[3](self.invProb.model) * self.reg.alpha_z
self.beta.append(self.invProb.beta)
self.phi_d.append(self.invProb.phi_d)
self.phi_m.append(self.invProb.phi_m)
self.phi_m_small.append(phi_s)
self.phi_m_smooth_x.append(phi_x)
self.phi_m_smooth_y.append(phi_y)
self.phi_m_smooth_z.append(phi_z)
self.phi.append(self.opt.f)
if self.save_txt:
f = open(self.fileName + ".txt", "a")
f.write(
" {0:3d} {1:1.4e} {2:1.4e} {3:1.4e} {4:1.4e} {5:1.4e} "
"{6:1.4e} {7:1.4e} {8:1.4e}\n".format(
self.opt.iter,
self.beta[self.opt.iter - 1],
self.phi_d[self.opt.iter - 1],
self.phi_m[self.opt.iter - 1],
self.phi_m_small[self.opt.iter - 1],
self.phi_m_smooth_x[self.opt.iter - 1],
self.phi_m_smooth_y[self.opt.iter - 1],
self.phi_m_smooth_z[self.opt.iter - 1],
self.phi[self.opt.iter - 1],
)
)
f.close()
def load_results(self):
results = np.loadtxt(self.fileName + str(".txt"), comments="#")
self.beta = results[:, 1]
self.phi_d = results[:, 2]
self.phi_m = results[:, 3]
self.phi_m_small = results[:, 4]
self.phi_m_smooth_x = results[:, 5]
self.phi_m_smooth_y = results[:, 6]
self.phi_m_smooth_z = results[:, 7]
self.phi_m_smooth = (
self.phi_m_smooth_x + self.phi_m_smooth_y + self.phi_m_smooth_z
)
self.f = results[:, 7]
self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0
self.i_target = None
if self.invProb.phi_d < self.target_misfit:
i_target = 0
while self.phi_d[i_target] > self.target_misfit:
i_target += 1
self.i_target = i_target
def plot_misfit_curves(
self,
fname=None,
dpi=300,
plot_small_smooth=False,
plot_phi_m=True,
plot_small=False,
plot_smooth=False,
):
self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0
self.i_target = None
if self.invProb.phi_d < self.target_misfit:
i_target = 0
while self.phi_d[i_target] > self.target_misfit:
i_target += 1
self.i_target = i_target
fig = plt.figure(figsize=(5, 2))
ax = plt.subplot(111)
ax_1 = ax.twinx()
ax.semilogy(
np.arange(len(self.phi_d)), self.phi_d, "k-", lw=2, label="$\phi_d$"
)
if plot_phi_m:
ax_1.semilogy(
np.arange(len(self.phi_d)), self.phi_m, "r", lw=2, label="$\phi_m$"
)
if plot_small_smooth or plot_small:
ax_1.semilogy(
np.arange(len(self.phi_d)), self.phi_m_small, "ro", label="small"
)
if plot_small_smooth or plot_smooth:
ax_1.semilogy(
np.arange(len(self.phi_d)), self.phi_m_smooth_x, "rx", label="smooth_x"
)
ax_1.semilogy(
np.arange(len(self.phi_d)), self.phi_m_smooth_y, "rx", label="smooth_y"
)
ax_1.semilogy(
np.arange(len(self.phi_d)), self.phi_m_smooth_z, "rx", label="smooth_z"
)
ax.legend(loc=1)
ax_1.legend(loc=2)
ax.plot(
np.r_[ax.get_xlim()[0], ax.get_xlim()[1]],
np.ones(2) * self.target_misfit,
"k:",
)
ax.set_xlabel("Iteration")
ax.set_ylabel("$\phi_d$")
ax_1.set_ylabel("$\phi_m$", color="r")
ax_1.tick_params(axis="y", which="both", colors="red")
plt.show()
if fname is not None:
fig.savefig(fname, dpi=dpi)
def plot_tikhonov_curves(self, fname=None, dpi=200):
self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0
self.i_target = None
if self.invProb.phi_d < self.target_misfit:
i_target = 0
while self.phi_d[i_target] > self.target_misfit:
i_target += 1
self.i_target = i_target
fig = plt.figure(figsize=(5, 8))
ax1 = plt.subplot(311)
ax2 = plt.subplot(312)
ax3 = plt.subplot(313)
ax1.plot(self.beta, self.phi_d, "k-", lw=2, ms=4)
ax1.set_xlim(np.hstack(self.beta).min(), np.hstack(self.beta).max())
ax1.set_xlabel("$\\beta$", fontsize=14)
ax1.set_ylabel("$\phi_d$", fontsize=14)
ax2.plot(self.beta, self.phi_m, "k-", lw=2)
ax2.set_xlim(np.hstack(self.beta).min(), np.hstack(self.beta).max())
ax2.set_xlabel("$\\beta$", fontsize=14)
ax2.set_ylabel("$\phi_m$", fontsize=14)
ax3.plot(self.phi_m, self.phi_d, "k-", lw=2)
ax3.set_xlim(np.hstack(self.phi_m).min(), np.hstack(self.phi_m).max())
ax3.set_xlabel("$\phi_m$", fontsize=14)
ax3.set_ylabel("$\phi_d$", fontsize=14)
if self.i_target is not None:
ax1.plot(self.beta[self.i_target], self.phi_d[self.i_target], "k*", ms=10)
ax2.plot(self.beta[self.i_target], self.phi_m[self.i_target], "k*", ms=10)
ax3.plot(self.phi_m[self.i_target], self.phi_d[self.i_target], "k*", ms=10)
for ax in [ax1, ax2, ax3]:
ax.set_xscale("linear")
ax.set_yscale("linear")
plt.tight_layout()
plt.show()
if fname is not None:
fig.savefig(fname, dpi=dpi)
class SaveOutputDictEveryIteration(SaveEveryIteration):
"""
Saves inversion parameters at every iteration.
"""
# Initialize the output dict
outDict = None
saveOnDisk = False
def initialize(self):
self.outDict = {}
if self.saveOnDisk:
print(
"SimPEG.SaveOutputDictEveryIteration will save your inversion progress as dictionary: '###-{0!s}.npz'".format(
self.fileName
)
)
def endIter(self):
# regCombo = ["phi_ms", "phi_msx"]
# if self.simulation[0].mesh.dim >= 2:
# regCombo += ["phi_msy"]
# if self.simulation[0].mesh.dim == 3:
# regCombo += ["phi_msz"]
# Initialize the output dict
iterDict = {}
# Save the data.
iterDict["iter"] = self.opt.iter
iterDict["beta"] = self.invProb.beta
iterDict["phi_d"] = self.invProb.phi_d
iterDict["phi_m"] = self.invProb.phi_m
# for label, fcts in zip(regCombo, self.reg.objfcts[0].objfcts):
# iterDict[label] = fcts(self.invProb.model)
iterDict["f"] = self.opt.f
iterDict["m"] = self.invProb.model
iterDict["dpred"] = self.invProb.dpred
if hasattr(self.reg.objfcts[0], "eps_p") is True:
iterDict["eps_p"] = self.reg.objfcts[0].eps_p
iterDict["eps_q"] = self.reg.objfcts[0].eps_q
if hasattr(self.reg.objfcts[0], "norms") is True:
iterDict["lps"] = self.reg.objfcts[0].norms[0][0]
iterDict["lpx"] = self.reg.objfcts[0].norms[0][1]
# Save the file as a npz
if self.saveOnDisk:
np.savez("{:03d}-{:s}".format(self.opt.iter, self.fileName), iterDict)
self.outDict[self.opt.iter] = iterDict
class Update_IRLS(InversionDirective):
f_old = 0
f_min_change = 1e-2
beta_tol = 1e-1
beta_ratio_l2 = None
prctile = 100
chifact_start = 1.0
chifact_target = 1.0
# Solving parameter for IRLS (mode:2)
irls_iteration = 0
minGNiter = 1
max_irls_iterations = properties.Integer("maximum irls iterations", default=20)
iterStart = 0
sphericalDomain = False
# Beta schedule
update_beta = properties.Bool("Update beta", default=True)
beta_search = properties.Bool("Do a beta search", default=False)
coolingFactor = properties.Float("Cooling factor", default=2.0)
coolingRate = properties.Integer("Cooling rate", default=1)
ComboObjFun = False
mode = 1
coolEpsOptimized = True
coolEps_p = True
coolEps_q = True
floorEps_p = 1e-8
floorEps_q = 1e-8
coolEpsFact = 1.2
silent = False
fix_Jmatrix = False
maxIRLSiters = deprecate_property(
max_irls_iterations,
"maxIRLSiters",
new_name="max_irls_iterations",
removal_version="0.16.0",
future_warn=True,
)
updateBeta = deprecate_property(
update_beta,
"updateBeta",
new_name="update_beta",
removal_version="0.16.0",
future_warn=True,
)
betaSearch = deprecate_property(
beta_search,
"betaSearch",
new_name="beta_search",
removal_version="0.16.0",
future_warn=True,
)
@property
def target(self):
if getattr(self, "_target", None) is None:
nD = 0
for survey in self.survey:
nD += survey.nD
self._target = nD * 0.5 * self.chifact_target
return self._target
@target.setter
def target(self, val):
self._target = val
@property
def start(self):
if getattr(self, "_start", None) is None:
if isinstance(self.survey, list):
self._start = 0
for survey in self.survey:
self._start += survey.nD * 0.5 * self.chifact_start
else:
self._start = self.survey.nD * 0.5 * self.chifact_start
return self._start
@start.setter
def start(self, val):
self._start = val
def initialize(self):
if self.mode == 1:
self.norms = []
for reg in self.reg.objfcts:
self.norms.append(reg.norms)
reg.norms = np.c_[2.0, 2.0, 2.0, 2.0]
reg.model = self.invProb.model
# Update the model used by the regularization
for reg in self.reg.objfcts:
reg.model = self.invProb.model
if self.sphericalDomain:
self.angleScale()
def endIter(self):
if self.sphericalDomain:
self.angleScale()
# Check if misfit is within the tolerance, otherwise scale beta
if np.all(
[
np.abs(1.0 - self.invProb.phi_d / self.target) > self.beta_tol,
self.update_beta,
self.mode != 1,
]
):
ratio = self.target / self.invProb.phi_d
if ratio > 1:
ratio = np.mean([2.0, ratio])
else:
ratio = np.mean([0.75, ratio])
self.invProb.beta = self.invProb.beta * ratio
if np.all([self.mode != 1, self.beta_search]):
print("Beta search step")
# self.update_beta = False
# Re-use previous model and continue with new beta
self.invProb.model = self.reg.objfcts[0].model
self.opt.xc = self.reg.objfcts[0].model
self.opt.iter -= 1
return
elif np.all([self.mode == 1, self.opt.iter % self.coolingRate == 0]):
self.invProb.beta = self.invProb.beta / self.coolingFactor
phim_new = 0
for reg in self.reg.objfcts:
for comp, multipier in zip(reg.objfcts, reg.multipliers):
if multipier > 0:
phim_new += np.sum(
comp.f_m ** 2.0
/ (comp.f_m ** 2.0 + comp.epsilon ** 2.0)
** (1 - comp.norm / 2.0)
)
# Update the model used by the regularization
phi_m_last = []
for reg in self.reg.objfcts:
reg.model = self.invProb.model
phi_m_last += [reg(self.invProb.model)]
# After reaching target misfit with l2-norm, switch to IRLS (mode:2)
if np.all([self.invProb.phi_d < self.start, self.mode == 1]):
self.startIRLS()
# Only update after GN iterations
if np.all(
[(self.opt.iter - self.iterStart) % self.minGNiter == 0, self.mode != 1]
):
if self.fix_Jmatrix:
print(">> Fix Jmatrix")
self.invProb.dmisfit.simulation.fix_Jmatrix = True
# Check for maximum number of IRLS cycles
if self.irls_iteration == self.max_irls_iterations:
if not self.silent:
print(
"Reach maximum number of IRLS cycles:"
+ " {0:d}".format(self.max_irls_iterations)
)
self.opt.stopNextIteration = True
return
# Print to screen
for reg in self.reg.objfcts:
if reg.eps_p > self.floorEps_p and self.coolEps_p:
reg.eps_p /= self.coolEpsFact
# print('Eps_p: ' + str(reg.eps_p))
if reg.eps_q > self.floorEps_q and self.coolEps_q:
reg.eps_q /= self.coolEpsFact
# print('Eps_q: ' + str(reg.eps_q))
# Remember the value of the norm from previous R matrices
# self.f_old = self.reg(self.invProb.model)
self.irls_iteration += 1
# Reset the regularization matrices so that it is
# recalculated for current model. Do it to all levels of comboObj
for reg in self.reg.objfcts:
# If comboObj, go down one more level
for comp in reg.objfcts:
comp.stashedR = None
for dmis in self.dmisfit.objfcts:
if getattr(dmis, "stashedR", None) is not None:
dmis.stashedR = None
# Compute new model objective function value
f_change = np.abs(self.f_old - phim_new) / (self.f_old + 1e-12)
# Check if the function has changed enough
if np.all(
[
f_change < self.f_min_change,
self.irls_iteration > 1,
np.abs(1.0 - self.invProb.phi_d / self.target) < self.beta_tol,
]
):
print("Minimum decrease in regularization." + "End of IRLS")
self.opt.stopNextIteration = True
return
self.f_old = phim_new
self.update_beta = True
self.invProb.phi_m_last = self.reg(self.invProb.model)
def startIRLS(self):
if not self.silent:
print(
"Reached starting chifact with l2-norm regularization:"
+ " Start IRLS steps..."
)
self.mode = 2
if getattr(self.opt, "iter", None) is None:
self.iterStart = 0
else:
self.iterStart = self.opt.iter
self.invProb.phi_m_last = self.reg(self.invProb.model)
# Either use the supplied epsilon, or fix base on distribution of
# model values
for reg in self.reg.objfcts:
if getattr(reg, "eps_p", None) is None:
reg.eps_p = np.percentile(
np.abs(reg.mapping * reg._delta_m(self.invProb.model)), self.prctile
)
if getattr(reg, "eps_q", None) is None:
reg.eps_q = np.percentile(
np.abs(reg.mapping * reg._delta_m(self.invProb.model)), self.prctile
)
# Re-assign the norms supplied by user l2 -> lp
for reg, norms in zip(self.reg.objfcts, self.norms):
reg.norms = norms
# Save l2-model
self.invProb.l2model = self.invProb.model.copy()
# Print to screen
for reg in self.reg.objfcts:
if not self.silent:
print("eps_p: " + str(reg.eps_p) + " eps_q: " + str(reg.eps_q))
def angleScale(self):
"""
Update the scales used by regularization for the
different block of models
"""
# Currently implemented for MVI-S only
max_p = []
for reg in self.reg.objfcts[0].objfcts:
eps_p = reg.epsilon
f_m = abs(reg.f_m)
max_p += [np.max(f_m)]
max_p = np.asarray(max_p).max()
max_s = [np.pi, np.pi]
for obj, var in zip(self.reg.objfcts[1:3], max_s):
obj.scales = np.ones(obj.scales.shape) * max_p / var
def validate(self, directiveList):
# check if a linear preconditioner is in the list, if not warn else
# assert that it is listed after the IRLS directive
dList = directiveList.dList
self_ind = dList.index(self)
lin_precond_ind = [isinstance(d, UpdatePreconditioner) for d in dList]
if any(lin_precond_ind):
assert lin_precond_ind.index(True) > self_ind, (
"The directive 'UpdatePreconditioner' must be after Update_IRLS "
"in the directiveList"
)
else:
warnings.warn(
"Without a Linear preconditioner, convergence may be slow. "
"Consider adding `Directives.UpdatePreconditioner` to your "
"directives list"
)
return True
class UpdatePreconditioner(InversionDirective):
"""
Create a Jacobi preconditioner for the linear problem
"""
update_every_iteration = True #: Update every iterations if False
def initialize(self):
# Create the pre-conditioner
regDiag = np.zeros_like(self.invProb.model)
m = self.invProb.model
for reg in self.reg.objfcts:
# Check if regularization has a projection
rdg = reg.deriv2(m)
if not isinstance(rdg, Zero):
regDiag += rdg.diagonal()
JtJdiag = np.zeros_like(self.invProb.model)
for sim, dmisfit in zip(self.simulation, self.dmisfit.objfcts):
if getattr(sim, "getJtJdiag", None) is None:
assert getattr(sim, "getJ", None) is not None, (
"Simulation does not have a getJ attribute."
+ "Cannot form the sensitivity explicitly"
)
JtJdiag += np.sum(np.power((dmisfit.W * sim.getJ(m)), 2), axis=0)
else:
JtJdiag += sim.getJtJdiag(m, W=dmisfit.W)
diagA = JtJdiag + self.invProb.beta * regDiag
diagA[diagA != 0] = diagA[diagA != 0] ** -1.0
PC = sdiag((diagA))
self.opt.approxHinv = PC
def endIter(self):
# Cool the threshold parameter
if self.update_every_iteration is False:
return
# Create the pre-conditioner
regDiag = np.zeros_like(self.invProb.model)
m = self.invProb.model
for reg in self.reg.objfcts:
# Check if he has wire
regDiag += reg.deriv2(m).diagonal()
JtJdiag = np.zeros_like(self.invProb.model)
for sim, dmisfit in zip(self.simulation, self.dmisfit.objfcts):
if getattr(sim, "getJtJdiag", None) is None:
assert getattr(sim, "getJ", None) is not None, (
"Simulation does not have a getJ attribute."
+ "Cannot form the sensitivity explicitly"
)
JtJdiag += np.sum(np.power((dmisfit.W * sim.getJ(m)), 2), axis=0)
else:
JtJdiag += sim.getJtJdiag(m, W=dmisfit.W)
diagA = JtJdiag + self.invProb.beta * regDiag
diagA[diagA != 0] = diagA[diagA != 0] ** -1.0
PC = sdiag((diagA))
self.opt.approxHinv = PC
class Update_Wj(InversionDirective):
"""
Create approx-sensitivity base weighting using the probing method
"""
k = None # Number of probing cycles
itr = None # Iteration number to update Wj, or always update if None
def endIter(self):
if self.itr is None or self.itr == self.opt.iter:
m = self.invProb.model
if self.k is None:
self.k = int(self.survey.nD / 10)
def JtJv(v):
Jv = self.simulation.Jvec(m, v)
return self.simulation.Jtvec(m, Jv)
JtJdiag = diagEst(JtJv, len(m), k=self.k)
JtJdiag = JtJdiag / max(JtJdiag)
self.reg.wght = JtJdiag
class UpdateSensitivityWeights(InversionDirective):
"""
Directive to take care of re-weighting
the non-linear problems. Assumes that the map of the regularization
function is either Wires or Identity.
Good for any problem where J is formed explicitly.
"""
everyIter = True
threshold = 1e-12
normalization: bool = True
def initialize(self):
"""
Calculate and update sensitivity
for optimization and regularization
"""
for reg in self.reg.objfcts:
if not isinstance(getattr(reg, "mapping"), (IdentityMap, Wires)):
raise TypeError(
f"Mapping for the regularization must be of type {IdentityMap} or {Wires}. "
+ f"Input mapping of type {type(reg.mapping)}."
)
self.update()
def endIter(self):
"""
Update inverse problem
"""
if self.everyIter:
self.update()
def update(self):
"""
Compute explicitly the main diagonal of JtJ
"""
jtj_diag = np.zeros_like(self.invProb.model)
m = self.invProb.model
for sim, dmisfit in zip(self.simulation, self.dmisfit.objfcts):
if getattr(sim, "getJtJdiag", None) is None:
if getattr(sim, "getJ", None) is None:
raise AttributeError(
"Simulation does not have a getJ attribute."
+ "Cannot form the sensitivity explicitly"
)
jtj_diag += mkvc(np.sum((dmisfit.W * sim.getJ(m)) ** 2.0, axis=0))
else:
jtj_diag += sim.getJtJdiag(m, W=dmisfit.W)
# Normalize and threshold weights
wr = np.zeros_like(self.invProb.model)
for reg in self.reg.objfcts:
wr += reg.mapping.deriv(self.invProb.model).T * (
(reg.mapping * jtj_diag) / reg.objfcts[0].regmesh.vol ** 2.
)
wr /= wr.max()
wr += self.threshold
wr **= 0.5
for reg in self.reg.objfcts:
reg.cell_weights = reg.mapping * wr
def validate(self, directiveList):
# check if a beta estimator is in the list after setting the weights
dList = directiveList.dList
self_ind = dList.index(self)
beta_estimator_ind = [isinstance(d, BetaEstimate_ByEig) for d in dList]
lin_precond_ind = [isinstance(d, UpdatePreconditioner) for d in dList]
if any(beta_estimator_ind):
assert beta_estimator_ind.index(True) > self_ind, (
"The directive 'BetaEstimate_ByEig' must be after UpdateSensitivityWeights "
"in the directiveList"
)
if any(lin_precond_ind):
assert lin_precond_ind.index(True) > self_ind, (
"The directive 'UpdatePreconditioner' must be after UpdateSensitivityWeights "
"in the directiveList"
)
return True
class ProjectSphericalBounds(InversionDirective):
"""
Trick for spherical coordinate system.
Project \theta and \phi angles back to [-\pi,\pi] using
back and forth conversion.
spherical->cartesian->spherical
"""
def initialize(self):
x = self.invProb.model
# Convert to cartesian than back to avoid over rotation
nC = int(len(x) / 3)
xyz = spherical2cartesian(x.reshape((nC, 3), order="F"))
m = cartesian2spherical(xyz.reshape((nC, 3), order="F"))
self.invProb.model = m
for sim in self.simulation:
sim.model = m
self.opt.xc = m
def endIter(self):
x = self.invProb.model
nC = int(len(x) / 3)
# Convert to cartesian than back to avoid over rotation
xyz = spherical2cartesian(x.reshape((nC, 3), order="F"))
m = cartesian2spherical(xyz.reshape((nC, 3), order="F"))
self.invProb.model = m
phi_m_last = []
for reg in self.reg.objfcts:
reg.model = self.invProb.model
phi_m_last += [reg(self.invProb.model)]
self.invProb.phi_m_last = phi_m_last
for sim in self.simulation:
sim.model = m
self.opt.xc = m
| [
"numpy.hstack",
"properties.Bool",
"numpy.linalg.norm",
"numpy.mean",
"numpy.where",
"numpy.asarray",
"properties.Float",
"numpy.max",
"os.path.isdir",
"numpy.random.seed",
"os.mkdir",
"warnings.warn",
"numpy.maximum",
"scipy.sparse.csr_matrix.diagonal",
"os.path.expanduser",
"properti... | [((28606, 28668), 'properties.String', 'properties.String', (['"""directory to save results in"""'], {'default': '"""."""'}), "('directory to save results in', default='.')\n", (28623, 28668), False, 'import properties\n'), ((28681, 28760), 'properties.String', 'properties.String', (['"""root of the filename to be saved"""'], {'default': '"""InversionModel"""'}), "('root of the filename to be saved', default='InversionModel')\n", (28698, 28760), False, 'import properties\n'), ((28781, 28814), 'properties.validator', 'properties.validator', (['"""directory"""'], {}), "('directory')\n", (28801, 28814), False, 'import properties\n'), ((39982, 40039), 'properties.Integer', 'properties.Integer', (['"""maximum irls iterations"""'], {'default': '(20)'}), "('maximum irls iterations', default=20)\n", (40000, 40039), False, 'import properties\n'), ((40125, 40169), 'properties.Bool', 'properties.Bool', (['"""Update beta"""'], {'default': '(True)'}), "('Update beta', default=True)\n", (40140, 40169), False, 'import properties\n'), ((40188, 40238), 'properties.Bool', 'properties.Bool', (['"""Do a beta search"""'], {'default': '(False)'}), "('Do a beta search', default=False)\n", (40203, 40238), False, 'import properties\n'), ((40259, 40306), 'properties.Float', 'properties.Float', (['"""Cooling factor"""'], {'default': '(2.0)'}), "('Cooling factor', default=2.0)\n", (40275, 40306), False, 'import properties\n'), ((40325, 40370), 'properties.Integer', 'properties.Integer', (['"""Cooling rate"""'], {'default': '(1)'}), "('Cooling rate', default=1)\n", (40343, 40370), False, 'import properties\n'), ((16513, 16548), 'numpy.any', 'np.any', (['(self.dmlist < self.DMtarget)'], {}), '(self.dmlist < self.DMtarget)\n', (16519, 16548), True, 'import numpy as np\n'), ((24612, 24640), 'numpy.any', 'np.any', (['(self.smallness == -1)'], {}), '(self.smallness == -1)\n', (24618, 24640), True, 'import numpy as np\n'), ((26948, 26971), 'numpy.all', 'np.all', (['self.targetlist'], {}), '(self.targetlist)\n', (26954, 26971), True, 'import numpy as np\n'), ((34741, 34767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2)'}), '(figsize=(5, 2))\n', (34751, 34767), True, 'import matplotlib.pyplot as plt\n'), ((34781, 34797), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (34792, 34797), True, 'import matplotlib.pyplot as plt\n'), ((36072, 36082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36080, 36082), True, 'import matplotlib.pyplot as plt\n'), ((36539, 36565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 8)'}), '(figsize=(5, 8))\n', (36549, 36565), True, 'import matplotlib.pyplot as plt\n'), ((36580, 36596), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (36591, 36596), True, 'import matplotlib.pyplot as plt\n'), ((36611, 36627), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (36622, 36627), True, 'import matplotlib.pyplot as plt\n'), ((36642, 36658), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (36653, 36658), True, 'import matplotlib.pyplot as plt\n'), ((37763, 37781), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (37779, 37781), True, 'import matplotlib.pyplot as plt\n'), ((37790, 37800), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37798, 37800), True, 'import matplotlib.pyplot as plt\n'), ((44258, 44315), 'numpy.all', 'np.all', (['[self.invProb.phi_d < self.start, self.mode == 1]'], {}), '([self.invProb.phi_d < self.start, self.mode == 1])\n', (44264, 44315), True, 'import numpy as np\n'), ((44400, 44485), 'numpy.all', 'np.all', (['[(self.opt.iter - self.iterStart) % self.minGNiter == 0, self.mode != 1]'], {}), '([(self.opt.iter - self.iterStart) % self.minGNiter == 0, self.mode != 1]\n )\n', (44406, 44485), True, 'import numpy as np\n'), ((49870, 49903), 'numpy.zeros_like', 'np.zeros_like', (['self.invProb.model'], {}), '(self.invProb.model)\n', (49883, 49903), True, 'import numpy as np\n'), ((50163, 50196), 'numpy.zeros_like', 'np.zeros_like', (['self.invProb.model'], {}), '(self.invProb.model)\n', (50176, 50196), True, 'import numpy as np\n'), ((51054, 51087), 'numpy.zeros_like', 'np.zeros_like', (['self.invProb.model'], {}), '(self.invProb.model)\n', (51067, 51087), True, 'import numpy as np\n'), ((51259, 51292), 'numpy.zeros_like', 'np.zeros_like', (['self.invProb.model'], {}), '(self.invProb.model)\n', (51272, 51292), True, 'import numpy as np\n'), ((53765, 53798), 'numpy.zeros_like', 'np.zeros_like', (['self.invProb.model'], {}), '(self.invProb.model)\n', (53778, 53798), True, 'import numpy as np\n'), ((54431, 54464), 'numpy.zeros_like', 'np.zeros_like', (['self.invProb.model'], {}), '(self.invProb.model)\n', (54444, 54464), True, 'import numpy as np\n'), ((6990, 7015), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (7004, 7015), True, 'import numpy as np\n'), ((8816, 8841), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (8830, 8841), True, 'import numpy as np\n'), ((14116, 14141), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (14130, 14141), True, 'import numpy as np\n'), ((14978, 14995), 'numpy.sum', 'np.sum', (['self.chi0'], {}), '(self.chi0)\n', (14984, 14995), True, 'import numpy as np\n'), ((16044, 16063), 'numpy.any', 'np.any', (['targetclass'], {}), '(targetclass)\n', (16050, 16063), True, 'import numpy as np\n'), ((24176, 24204), 'numpy.any', 'np.any', (['(self.smallness == -1)'], {}), '(self.smallness == -1)\n', (24182, 24204), True, 'import numpy as np\n'), ((25023, 25172), 'numpy.linalg.norm', 'np.linalg.norm', (['((self.invProb.reg.gmm.means_[i] - self.invProb.reg.gmmref.means_[i]) /\n self.invProb.reg.gmmref.means_[i])'], {'ord': 'self.distance_norm'}), '((self.invProb.reg.gmm.means_[i] - self.invProb.reg.gmmref.\n means_[i]) / self.invProb.reg.gmmref.means_[i], ord=self.distance_norm)\n', (25037, 25172), True, 'import numpy as np\n'), ((25253, 25282), 'numpy.maximum', 'np.maximum', (['maxdiff', 'meandiff'], {}), '(maxdiff, meandiff)\n', (25263, 25282), True, 'import numpy as np\n'), ((26157, 26185), 'numpy.maximum', 'np.maximum', (['maxdiff', 'covdiff'], {}), '(maxdiff, covdiff)\n', (26167, 26185), True, 'import numpy as np\n'), ((26208, 26370), 'numpy.linalg.norm', 'np.linalg.norm', (['[(self.invProb.reg.gmm.weights_[i] - self.invProb.reg.gmmref.weights_[i]) /\n self.invProb.reg.gmmref.weights_[i]]'], {'ord': 'self.distance_norm'}), '([(self.invProb.reg.gmm.weights_[i] - self.invProb.reg.gmmref\n .weights_[i]) / self.invProb.reg.gmmref.weights_[i]], ord=self.\n distance_norm)\n', (26222, 26370), True, 'import numpy as np\n'), ((26558, 26585), 'numpy.maximum', 'np.maximum', (['maxdiff', 'pidiff'], {}), '(maxdiff, pidiff)\n', (26568, 26585), True, 'import numpy as np\n'), ((27034, 27062), 'numpy.any', 'np.any', (['(self.smallness != -1)'], {}), '(self.smallness != -1)\n', (27040, 27062), True, 'import numpy as np\n'), ((28919, 28942), 'os.path.expanduser', 'os.path.expanduser', (['val'], {}), '(val)\n', (28937, 28942), False, 'import os\n'), ((28960, 28983), 'os.path.isdir', 'os.path.isdir', (['fullpath'], {}), '(fullpath)\n', (28973, 28983), False, 'import os\n'), ((28997, 29015), 'os.mkdir', 'os.mkdir', (['fullpath'], {}), '(fullpath)\n', (29005, 29015), False, 'import os\n'), ((43054, 43096), 'numpy.all', 'np.all', (['[self.mode != 1, self.beta_search]'], {}), '([self.mode != 1, self.beta_search])\n', (43060, 43096), True, 'import numpy as np\n'), ((43441, 43504), 'numpy.all', 'np.all', (['[self.mode == 1, self.opt.iter % self.coolingRate == 0]'], {}), '([self.mode == 1, self.opt.iter % self.coolingRate == 0])\n', (43447, 43504), True, 'import numpy as np\n'), ((49354, 49512), 'warnings.warn', 'warnings.warn', (['"""Without a Linear preconditioner, convergence may be slow. Consider adding `Directives.UpdatePreconditioner` to your directives list"""'], {}), "(\n 'Without a Linear preconditioner, convergence may be slow. Consider adding `Directives.UpdatePreconditioner` to your directives list'\n )\n", (49367, 49512), False, 'import warnings\n'), ((11245, 11257), 'numpy.ones', 'np.ones', (['nbr'], {}), '(nbr)\n', (11252, 11257), True, 'import numpy as np\n'), ((14607, 14619), 'numpy.ones', 'np.ones', (['ndm'], {}), '(ndm)\n', (14614, 14619), True, 'import numpy as np\n'), ((20346, 20455), 'warnings.warn', 'warnings.warn', (['"""There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)"""'], {}), "(\n 'There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)'\n )\n", (20359, 20455), False, 'import warnings\n'), ((25476, 25648), 'numpy.linalg.norm', 'np.linalg.norm', (['((self.invProb.reg.gmm.covariances_[i] - self.invProb.reg.gmmref.\n covariances_[i]) / self.invProb.reg.gmmref.covariances_[i])'], {'ord': 'self.distance_norm'}), '((self.invProb.reg.gmm.covariances_[i] - self.invProb.reg.\n gmmref.covariances_[i]) / self.invProb.reg.gmmref.covariances_[i], ord=\n self.distance_norm)\n', (25490, 25648), True, 'import numpy as np\n'), ((25832, 25995), 'numpy.linalg.norm', 'np.linalg.norm', (['((self.invProb.reg.gmm.covariances_ - self.invProb.reg.gmmref.covariances_) /\n self.invProb.reg.gmmref.covariances_)'], {'ord': 'self.distance_norm'}), '((self.invProb.reg.gmm.covariances_ - self.invProb.reg.gmmref\n .covariances_) / self.invProb.reg.gmmref.covariances_, ord=self.\n distance_norm)\n', (25846, 25995), True, 'import numpy as np\n'), ((35823, 35833), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (35830, 35833), True, 'import numpy as np\n'), ((42891, 42912), 'numpy.mean', 'np.mean', (['[2.0, ratio]'], {}), '([2.0, ratio])\n', (42898, 42912), True, 'import numpy as np\n'), ((42956, 42978), 'numpy.mean', 'np.mean', (['[0.75, ratio]'], {}), '([0.75, ratio])\n', (42963, 42978), True, 'import numpy as np\n'), ((46189, 46218), 'numpy.abs', 'np.abs', (['(self.f_old - phim_new)'], {}), '(self.f_old - phim_new)\n', (46195, 46218), True, 'import numpy as np\n'), ((48559, 48570), 'numpy.max', 'np.max', (['f_m'], {}), '(f_m)\n', (48565, 48570), True, 'import numpy as np\n'), ((48589, 48606), 'numpy.asarray', 'np.asarray', (['max_p'], {}), '(max_p)\n', (48599, 48606), True, 'import numpy as np\n'), ((16143, 16164), 'numpy.where', 'np.where', (['targetclass'], {}), '(targetclass)\n', (16151, 16164), True, 'import numpy as np\n'), ((16741, 16794), 'numpy.all', 'np.all', (['(np.r_[self.dmisfit.multipliers] > self.chimin)'], {}), '(np.r_[self.dmisfit.multipliers] > self.chimin)\n', (16747, 16794), True, 'import numpy as np\n'), ((16799, 16852), 'numpy.all', 'np.all', (['(np.r_[self.dmisfit.multipliers] < self.chimax)'], {}), '(np.r_[self.dmisfit.multipliers] < self.chimax)\n', (16805, 16852), True, 'import numpy as np\n'), ((16971, 16983), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (16977, 16983), True, 'import numpy as np\n'), ((21885, 21995), 'warnings.warn', 'warnings.warn', (['"""There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)."""'], {}), "(\n 'There is no PGI regularization. Smallness target is turned off (TriggerSmall flag).'\n )\n", (21898, 21995), False, 'import warnings\n'), ((36739, 36759), 'numpy.hstack', 'np.hstack', (['self.beta'], {}), '(self.beta)\n', (36748, 36759), True, 'import numpy as np\n'), ((36767, 36787), 'numpy.hstack', 'np.hstack', (['self.beta'], {}), '(self.beta)\n', (36776, 36787), True, 'import numpy as np\n'), ((36965, 36985), 'numpy.hstack', 'np.hstack', (['self.beta'], {}), '(self.beta)\n', (36974, 36985), True, 'import numpy as np\n'), ((36993, 37013), 'numpy.hstack', 'np.hstack', (['self.beta'], {}), '(self.beta)\n', (37002, 37013), True, 'import numpy as np\n'), ((37192, 37213), 'numpy.hstack', 'np.hstack', (['self.phi_m'], {}), '(self.phi_m)\n', (37201, 37213), True, 'import numpy as np\n'), ((37221, 37242), 'numpy.hstack', 'np.hstack', (['self.phi_m'], {}), '(self.phi_m)\n', (37230, 37242), True, 'import numpy as np\n'), ((42631, 42677), 'numpy.abs', 'np.abs', (['(1.0 - self.invProb.phi_d / self.target)'], {}), '(1.0 - self.invProb.phi_d / self.target)\n', (42637, 42677), True, 'import numpy as np\n'), ((43773, 43868), 'numpy.sum', 'np.sum', (['(comp.f_m ** 2.0 / (comp.f_m ** 2.0 + comp.epsilon ** 2.0) ** (1 - comp.\n norm / 2.0))'], {}), '(comp.f_m ** 2.0 / (comp.f_m ** 2.0 + comp.epsilon ** 2.0) ** (1 - \n comp.norm / 2.0))\n', (43779, 43868), True, 'import numpy as np\n'), ((48730, 48755), 'numpy.ones', 'np.ones', (['obj.scales.shape'], {}), '(obj.scales.shape)\n', (48737, 48755), True, 'import numpy as np\n'), ((17375, 17407), 'numpy.sum', 'np.sum', (['self.dmisfit.multipliers'], {}), '(self.dmisfit.multipliers)\n', (17381, 17407), True, 'import numpy as np\n'), ((29229, 29243), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (29241, 29243), False, 'from datetime import datetime\n'), ((46454, 46500), 'numpy.abs', 'np.abs', (['(1.0 - self.invProb.phi_d / self.target)'], {}), '(1.0 - self.invProb.phi_d / self.target)\n', (46460, 46500), True, 'import numpy as np\n'), ((17044, 17096), 'numpy.median', 'np.median', (['(self.DMtarget[~indx] / self.dmlist[~indx])'], {}), '(self.DMtarget[~indx] / self.dmlist[~indx])\n', (17053, 17096), True, 'import numpy as np\n'), ((17178, 17190), 'numpy.sum', 'np.sum', (['indx'], {}), '(indx)\n', (17184, 17190), True, 'import numpy as np\n'), ((24249, 24302), 'scipy.sparse.csr_matrix.diagonal', 'sp.csr_matrix.diagonal', (['self.invProb.reg.objfcts[0].W'], {}), '(self.invProb.reg.objfcts[0].W)\n', (24271, 24302), True, 'import scipy.sparse as sp\n'), ((24390, 24434), 'scipy.sparse.csr_matrix.diagonal', 'sp.csr_matrix.diagonal', (['self.pgi_smallness.W'], {}), '(self.pgi_smallness.W)\n', (24412, 24434), True, 'import scipy.sparse as sp\n'), ((12803, 12838), 'numpy.zeros_like', 'np.zeros_like', (['smallness_eigenvalue'], {}), '(smallness_eigenvalue)\n', (12816, 12838), True, 'import numpy as np\n'), ((17232, 17246), 'numpy.where', 'np.where', (['indx'], {}), '(indx)\n', (17240, 17246), True, 'import numpy as np\n'), ((27617, 27641), 'numpy.round', 'np.round', (['self.dmlist', '(1)'], {}), '(self.dmlist, 1)\n', (27625, 27641), True, 'import numpy as np\n'), ((27671, 27697), 'numpy.round', 'np.round', (['self.DMtarget', '(1)'], {}), '(self.DMtarget, 1)\n', (27679, 27697), True, 'import numpy as np\n')] |
#Laget av <NAME> og <NAME>
import numpy as np
import matplotlib.pyplot as plt
#beregner generell polynomfunksjon
def g(koeffisienter, x):
res = 0
for i in range(len(koeffisienter)):
res += koeffisienter[i]*x**(len(koeffisienter)-i-1)
return res
#plotter generell polynomfunksjon
def plotFunc(params):
x = np.linspace(-5,5,1000)
plot_y = g(params,x)
plt.plot(x,plot_y)
#plotter punkter (x,y)
def plotPunkt(x,y):
plt.plot(x,y,"rx")
print("Hei! Nå skal vi lage et andregradspolynom på formen ax\u00b2+bx+c") #\u00b gir "opphøyd i"
a = float(input("Skriv inn din a: "))
b = float(input("Skriv inn din b: "))
c = float(input("Skriv inn din c: "))
plotFunc([a,b,c])
plt.grid()
plt.show()
N = int(input("Skriv inn antall punkter du ønsker: "))
sample_x = [-4.5,4.5]
sample_x = np.append(sample_x, np.random.uniform(-5,5,N-2)) #her har vi valgt intervall [-5,5]
sample_y = np.round(g([a,b,c],sample_x)) + np.random.uniform(-1,1,N) #runder av til nærmeste heltall
plotPunkt(sample_x,sample_y)
plotFunc([a,b,c])
plt.grid()
plt.show()
n_regresjon = int(input("Hvilken orden skal det være på regresjonsfunksjonen?"))
f_reg = np.polyfit(sample_x,sample_y,n_regresjon) #gir polynomfunksjon av grad n-1 ved regresjonsanalyse
plotPunkt(sample_x,sample_y)
plotFunc([a,b,c])
plotFunc(f_reg)
plt.grid()
plt.show()
| [
"matplotlib.pyplot.grid",
"numpy.polyfit",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.random.uniform",
"matplotlib.pyplot.show"
] | [((710, 720), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (718, 720), True, 'import matplotlib.pyplot as plt\n'), ((721, 731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (729, 731), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1075), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1073, 1075), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1086), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1084, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1222), 'numpy.polyfit', 'np.polyfit', (['sample_x', 'sample_y', 'n_regresjon'], {}), '(sample_x, sample_y, n_regresjon)\n', (1189, 1222), True, 'import numpy as np\n'), ((1340, 1350), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1348, 1350), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1359, 1361), True, 'import matplotlib.pyplot as plt\n'), ((332, 356), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(1000)'], {}), '(-5, 5, 1000)\n', (343, 356), True, 'import numpy as np\n'), ((384, 403), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'plot_y'], {}), '(x, plot_y)\n', (392, 403), True, 'import matplotlib.pyplot as plt\n'), ((460, 480), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""rx"""'], {}), "(x, y, 'rx')\n", (468, 480), True, 'import matplotlib.pyplot as plt\n'), ((843, 874), 'numpy.random.uniform', 'np.random.uniform', (['(-5)', '(5)', '(N - 2)'], {}), '(-5, 5, N - 2)\n', (860, 874), True, 'import numpy as np\n'), ((957, 984), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'N'], {}), '(-1, 1, N)\n', (974, 984), True, 'import numpy as np\n')] |
import quicklb
import numpy as np
import time
import copy
class Loadbalancer():
def __init__(self,objects, algorithm, max_abs_li, max_rel_li, max_it):
"""
Create a Loadbalancer object
Parameters
----------
objects: list<Cell>
A list of objects which implement all functions specified in the
mock **Cell** object.
algorithm: string
Specifies the loadbalancing algorithm, valid values are:
- `"GREEDY"`
- `"SORT"`
- `"SORT2"`
max_abs_li: float
specify the maximum absolute load-imbalance for the partitioning
algorithm. When this threshold is reached the partitioning concludes
`0.0` would be no load imbalance, `1.` would be a load imbalance of
100%
max_rel_li: float
specify the maximum relative load-imbalance for the partitioning
algorithm. Is not used in the GREEDY partitioning scheme. When this
threshold is reached the partitioning concludes.
max_it: int
Maximum number of iterations for the loadbalancing algorithm. Set this
somewhere between 1 and the number of processors used (or larger, it is
limited to the maximum number of processors anyway)
Returns
-------
Loadbalancer:
A very fresh loadbalancing object !
"""
self.cell_class = type(objects[0])
self.objects = objects
self.weights = np.array([1.]*len(objects),dtype=np.float32)
self.offloaded = [False for _ in range(len(objects))]
self.object_size = len( objects[0].serialize())
self.weight_size = len( self.weights[0].tobytes())
self.remote_objects = []
self.lb = quicklb.create(self.object_size, self.object_size
+ self.weight_size, len(objects), quicklb.init())
quicklb.set_partition_algorithm(self.lb, algorithm, max_abs_li, max_rel_li, max_it)
quicklb.info(self.lb)
def serialize_data_object(self,buffer, ids, buffer_size, ids_size):
for i in range(len(ids)):
id = ids[i]-1
buffer[:,i] = np.frombuffer(self.objects[id].serialize(),dtype=np.byte)
self.offloaded[id] = True
return buffer
def deserialize_data_object(self,buffer, ids, buffer_size, ids_size):
for i in range(ids_size):
self.remote_objects.append(copy.deepcopy(self.cell_class()))
self.remote_objects[-1].deserialize(buffer[:,i])
def serialize_result_object(self,buffer, ids, buffer_size, ids_size):
for i in range(ids_size):
buffer[:self.object_size,i] = np.frombuffer(self.remote_objects[i].serialize(),dtype=np.byte)
buffer[self.object_size:,i] = np.frombuffer(self.remote_weights[i],dtype=np.byte)
return buffer
def deserialize_result_object(self,buffer, ids, buffer_size, ids_size):
for i in range(ids_size):
id = ids[i]-1
self.objects[id].deserialize(buffer[:self.object_size,i])
self.weights[id] = np.frombuffer(buffer[self.object_size:,i],dtype=np.float32)
def iterate(self):
"""
Perform a single iteration, with computation offloading.
this eventually calls **compute** on every single cell
Returns
-------
None
"""
quicklb.communicate_data(self.lb
, lambda buffer,ids,buffer_size,ids_size:
Loadbalancer.serialize_data_object(self,buffer,ids,buffer_size,ids_size)
, lambda buffer,ids,buffer_size,ids_size:
Loadbalancer.deserialize_data_object(self,buffer,ids,buffer_size,ids_size)
)
self.remote_weights = np.empty(len(self.objects),dtype=np.float32)
for i in range(len(self.objects)):
if self.offloaded[i]:
continue
start = time.monotonic()
self.objects[i].compute()
self.weights[i] = time.monotonic() - start
for i in range(len(self.remote_objects)):
start = time.monotonic()
self.remote_objects[i].compute()
self.remote_weights[i] = time.monotonic() - start
quicklb.communicate_result(self.lb
, lambda buffer,ids,buffer_size,ids_size:
Loadbalancer.serialize_result_object(self,buffer,ids,buffer_size,ids_size)
, lambda buffer,ids,buffer_size,ids_size:
Loadbalancer.deserialize_result_object(self,buffer,ids,buffer_size,ids_size)
)
# Restore lists
self.remote_objects = []
self.offloaded = [False for _ in range(len(self.objects))]
def partition(self):
"""
Call this function to (re)-partition the cells over the processors, it is
recommended to call this once before **iterate()**
Returns
-------
None
"""
quicklb.set_weights(self.lb,self.weights)
quicklb.partition(self.lb)
def partitioning_info(self,detailed=False):
"""
Writes out partitioning info to the loadbalance.info file
Parameters
----------
detailed: bool
When set to `True` write detailed information about every cell to
loadbalance.info
Returns
-------
None
"""
quicklb.partitioning_info(self.lb,detailed)
| [
"quicklb.partitioning_info",
"quicklb.info",
"quicklb.init",
"time.monotonic",
"quicklb.set_weights",
"quicklb.set_partition_algorithm",
"numpy.frombuffer",
"quicklb.partition"
] | [((1778, 1865), 'quicklb.set_partition_algorithm', 'quicklb.set_partition_algorithm', (['self.lb', 'algorithm', 'max_abs_li', 'max_rel_li', 'max_it'], {}), '(self.lb, algorithm, max_abs_li, max_rel_li,\n max_it)\n', (1809, 1865), False, 'import quicklb\n'), ((1866, 1887), 'quicklb.info', 'quicklb.info', (['self.lb'], {}), '(self.lb)\n', (1878, 1887), False, 'import quicklb\n'), ((4548, 4590), 'quicklb.set_weights', 'quicklb.set_weights', (['self.lb', 'self.weights'], {}), '(self.lb, self.weights)\n', (4567, 4590), False, 'import quicklb\n'), ((4594, 4620), 'quicklb.partition', 'quicklb.partition', (['self.lb'], {}), '(self.lb)\n', (4611, 4620), False, 'import quicklb\n'), ((4945, 4989), 'quicklb.partitioning_info', 'quicklb.partitioning_info', (['self.lb', 'detailed'], {}), '(self.lb, detailed)\n', (4970, 4989), False, 'import quicklb\n'), ((1757, 1771), 'quicklb.init', 'quicklb.init', ([], {}), '()\n', (1769, 1771), False, 'import quicklb\n'), ((2603, 2655), 'numpy.frombuffer', 'np.frombuffer', (['self.remote_weights[i]'], {'dtype': 'np.byte'}), '(self.remote_weights[i], dtype=np.byte)\n', (2616, 2655), True, 'import numpy as np\n'), ((2887, 2948), 'numpy.frombuffer', 'np.frombuffer', (['buffer[self.object_size:, i]'], {'dtype': 'np.float32'}), '(buffer[self.object_size:, i], dtype=np.float32)\n', (2900, 2948), True, 'import numpy as np\n'), ((3628, 3644), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3642, 3644), False, 'import time\n'), ((3786, 3802), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3800, 3802), False, 'import time\n'), ((3701, 3717), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3715, 3717), False, 'import time\n'), ((3873, 3889), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3887, 3889), False, 'import time\n')] |
import os
import sys
import logging
import numpy as np
import json
import re
import random
work_dir = os.getcwd() # 当前路径
sys.path.extend([os.path.abspath(".."), work_dir])
from basic.basic_task import Basic_task, Task_Mode
from basic.register import register_task, find_task
from utils.build_vocab import Vocab
from utils.utils import check_dir
import torch
from torch import nn
from transformers import BertPreTrainedModel, BertConfig, BertTokenizer, BertModel
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
"""
成语完型填空式机器阅读理解任务:
模型:bert + linear
数据集:
Due to data copyright issues,please click the official link to download Chid dataset
https://drive.google.com/drive/folders/1qdcMgCuK9d93vLVYJRvaSLunHUsGf50u
数据比较大,这里只拿5000条数据训练来跑通模型。
"""
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
workdir = os.getcwd() # 当前路径
project_dir = os.path.split(workdir)[0]
class Config:
seed = 42 # 随机种子
gpuids = "0,1" # 设置显卡序号,若为None,则不使用gpu
nlog = 100 # 多少step打印一次记录(loss,评估指标)
early_stop = False
train_batch_size = 32
eval_batch_size = 32
epochs = 5
lr = 5e-5 # 学习率
do_train = True
do_eval = True
do_infer = False
# 新增超参数
margin = 1
max_len = 128
task_name = "IdiomCloze"
# 配置路径
train_data_path = "/workspace/data/idiom_cloze/train_data.txt" # 训练集数据的路径,建议绝对路径
dev_data_path = ["/workspace/data/idiom_cloze/dev_data.txt"] # 验证集数据的路径,建议绝对路径
test_data_path = ["/workspace/data/idiom_cloze/test_data.txt"] # 测试集数据的路径,建议绝对路径
# transformer结构(Bert, Albert, Roberta等)的预训练模型的配置, 路径也建议是绝对路径
bert_model_path = "/workspace/Idiom_cloze/pretrained_models/chinese_wwm_pytorch/pytorch_model.bin" # 预训练模型路径, 例如bert预训练模型
model_config_path = "/workspace/Idiom_cloze/pretrained_models/chinese_wwm_pytorch/config.json" # 预训练模型的config文件路径, 一般是json文件
vocab_path = "/workspace/Idiom_cloze/pretrained_models/chinese_wwm_pytorch/vocab.txt" # vocab文件路径,可以是预训练模型的vocab.txt文件
model_save_path = project_dir + f"/model_save/{task_name.lower()}_model" # 训练过程中最优模型或者训练结束后的模型保存路径
output_path = project_dir + f"/output/{task_name.lower()}_model" # 模型预测输出预测结果文件的路径
# 新增文件路径
idiom_list_path = "/workspace/data/idiom_cloze/idiomList_process.txt"
# 构建模型动态计算图
class Model(BertPreTrainedModel):
"""
模型说明:成语完形填空式阅读理解baseline模型
"""
def __init__(self, model_config, idiom_num):
super(Model, self).__init__(model_config)
# 768 is the dimensionality of bert-base-uncased's hidden representations
# Load the pretrained BERT model
self.bert = BertModel(config=model_config)
self.idiom_embedding = nn.Embedding(idiom_num, model_config.hidden_size)
self.dropout = nn.Dropout(0.5)
self.classifier = nn.Linear(model_config.hidden_size, 1)
self.init_weights()
def forward(self, inputs):
input_ids = inputs.get("input_ids", None)
attention_mask = inputs.get("input_masks", None)
token_type_ids = inputs.get("token_type_ids", None)
idiom_ids = inputs.get("idiom_ids", None)
positions = inputs.get("position", None)
label = inputs.get("label", None)
# input_ids [batch, max_seq_length] encoded_layer [batch, max_seq_length, hidden_state]
sequence_outputs, pooled_outputs = self.bert(input_ids, attention_mask, token_type_ids)
blank_states = sequence_outputs[[i for i in range(len(positions))], positions] # [batch, hidden_state]
encoded_idiom = self.idiom_embedding(idiom_ids) # [batch, 10, hidden_state]
multiply_result = torch.einsum('abc,ac->abc', encoded_idiom, blank_states) # [batch, 10, hidden_state]
pooled_output = self.dropout(multiply_result)
logits = self.classifier(pooled_output)
# logits = self.classifier(multiply_result) # [batch, 10, 1]
logits = logits.view(-1, idiom_ids.shape[-1]) # [batch, 10]
outputs = {
"logits": logits,
}
if label is not None:
# # Calculate batch loss based on CrossEntropy
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(logits, label)
outputs["loss"] = loss
return outputs
# 编写任务
@ register_task
class IdiomCloze(Basic_task):
def __init__(self, task_config):
super().__init__(task_config)
self.task_config = task_config
self.max_len = task_config.max_len
# model init 模型初始化,加载预训练模型
self.model_config = BertConfig.from_pretrained(self.task_config.model_config_path)
self.tokenizer = BertTokenizer.from_pretrained(self.task_config.vocab_path, lowercase=True)
self.idiom_vocab = Vocab(self.task_config.idiom_list_path)
self.model = Model.from_pretrained(pretrained_model_name_or_path=self.task_config.bert_model_path,
config=self.model_config, idiom_num=self.idiom_vocab.vocab_size)
if self.task_config.gpuids != None:
self.model.to(self.device)
# 单机多卡训练
if self.n_gpu > 1:
self.model = nn.DataParallel(self.model)
def evaluate(self, dataset, mode=Task_Mode.Eval, epoch=None):
data_loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
batch_size=self.task_config.eval_batch_size,
num_workers=0
)
self.model.eval()
pred_labels = []
true_labels = []
loss_buffer = 0
for bi, batch in enumerate(data_loader):
outputs = self.run_one_step(batch, self.model)
logits = outputs.pop("logits")
prob_outputs = torch.softmax(logits, dim=1).cpu().detach().numpy()
pred_label = np.argmax(prob_outputs, axis=1)
pred_labels.extend(pred_label.tolist())
if mode == Task_Mode.Eval:
loss = outputs.pop("loss")
loss = loss.mean()
loss_buffer += loss.item()
label = batch["label"].cpu()
true_labels.extend(label.numpy().tolist())
if mode == Task_Mode.Eval:
total_acc = accuracy_score(true_labels, pred_labels)
logger.info("Evaluate: epoch={}, step={}, acc = {:0.4f}".format(epoch, self.global_step, total_acc))
return total_acc
else:
return pred_labels
def train(self, dataset, valid_dataset=None):
data_loader = torch.utils.data.DataLoader(
dataset,
shuffle=True,
batch_size=self.task_config.train_batch_size,
num_workers=0
)
num_train_steps = int(len(dataset) / self.task_config.train_batch_size * self.task_config.epochs)
optimizer, scheduler = self.create_optimizer(self.model, use_scheduler=True, num_warmup_steps=1000,
num_train_steps=num_train_steps)
self.model.train()
# Train the model on each batch
# Reset gradients
loss_buffer = 0
for epoch in range(self.task_config.epochs):
for bi, batch in enumerate(data_loader):
self.model.zero_grad()
outputs = self.run_one_step(batch, self.model)
logits = outputs.pop("logits")
loss = outputs.pop("loss")
# Calculate gradients based on loss
loss = loss.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step() #更新模型参数
scheduler.step() # 更新learning rate
self.global_step += 1
loss_buffer += loss.item()
if self.global_step % self.task_config.nlog == 0:
logger.info("epoch={}, step={}, loss={:.4f}".format(epoch+1, self.global_step, loss_buffer / self.task_config.nlog))
loss_buffer = 0
if valid_dataset != None:
eval_acc = self.evaluate(valid_dataset, mode=Task_Mode.Eval, epoch=epoch+1)
self.model.train()
if self.task_config.early_stop:
self.es(epoch, eval_acc, self.model, model_path=self.task_config.model_save_path)
if self.es.early_stop:
logger.info("********** Early stopping ********")
break
def read_data(self, file, mode):
"""
根据不同任务编写数据处理,建议将原始数据进行预处理之后再在这里写数据处理成模型输入结构
"""
dataset = []
with open(file, "r", encoding="utf-8") as fin:
data_id = 100000000
lines = fin.readlines()[:5000]
tk0 = tqdm(lines, total=len(lines))
for line in tk0:
cur_data = json.loads(line)
groundTruth = cur_data["groundTruth"]
candidates = cur_data["candidates"]
content = cur_data["content"]
realCount = cur_data["realCount"]
for i in range(realCount):
content = content.replace("#idiom#", f"#idiom{i+1}#", 1)
tags = re.findall("#idiom\d+#", content)
for tag in tags:
tmp_context = content
for other_tag in tags:
if other_tag != tag:
tmp_context = tmp_context.replace(other_tag, self.tokenizer.unk_token)
feature_id = int(tag[6: -1])
left_part, right_part = re.split(tag, tmp_context)
left_ids = self.tokenizer.encode(left_part, add_special_tokens=False)
right_ids = self.tokenizer.encode(right_part, add_special_tokens=False)
half_length = int(self.max_len / 2)
if len(left_ids) < half_length: # cut at tail
st = 0
ed = min(len(left_ids) + 1 + len(right_ids), self.max_len - 2)
elif len(right_ids) < half_length: # cut at head
ed = len(left_ids) + 1 + len(right_ids)
st = max(0, ed - (self.max_len - 2))
else: # cut at both sides
st = len(left_ids) + 3 - half_length
ed = len(left_ids) + 1 + half_length
text_ids = left_ids + [self.tokenizer.mask_token_id] + right_ids
input_ids = [self.tokenizer.cls_token_id] + text_ids[st:ed] + [self.tokenizer.sep_token_id]
position = input_ids.index(self.tokenizer.mask_token_id)
token_type_ids = [0] * len(input_ids) + [0] * (self.max_len - len(input_ids))
input_masks = [1] * len(input_ids) + [0] * (self.max_len - len(input_ids))
input_ids = input_ids + [0] * (self.max_len - len(input_ids))
label = candidates[i].index(groundTruth[i])
idiom_ids = [self.idiom_vocab.word2id[each] for each in candidates[i]]
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
assert len(token_type_ids) == self.max_len
# Return the processed data where the lists are converted to `torch.tensor`s
dataset.append({
'data_id': torch.tensor(data_id, dtype=torch.long),
'feature_id': torch.tensor(feature_id, dtype=torch.long),
'input_ids': torch.tensor(input_ids, dtype=torch.long),
'input_masks': torch.tensor(input_masks, dtype=torch.long),
'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
'idiom_ids': torch.tensor(idiom_ids, dtype=torch.long),
'label': torch.tensor(label, dtype=torch.long),
'position': torch.tensor(position, dtype=torch.long)
})
data_id += 1
return dataset
def seed_set(seed):
'''
set random seed of cpu and gpu
:param seed:
:param n_gpu:
:return:
'''
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def run():
config = Config()
check_dir([config.model_save_path, config.output_path])
seed_set(config.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpuids # 设置gpu序号
task_cls = find_task(config.task_name)
task = task_cls(task_config=config)
if config.do_train:
dataset = task.read_data(config.train_data_path, mode=Task_Mode.Train)
if config.do_eval:
valid_dataset = task.read_data(config.dev_data_path[0], mode=Task_Mode.Eval)
task.train(dataset, valid_dataset=valid_dataset)
else:
task.train(dataset)
if config.do_eval:
task.load_model(config.model_save_path)
for dev_path in config.dev_data_path:
logging.info(f"Evaluating model in {dev_path}")
dataset = task.read_data(dev_path, mode=Task_Mode.Eval)
logging.info(f"dev dataset size = {len(dataset)}")
task.evaluate(dataset, mode=Task_Mode.Eval)
if config.do_infer:
task.load_model(config.model_save_path)
for test_path in config.test_data_path:
logging.info(f"Testing model in {test_path}")
dataset = task.read_data(test_path, mode=Task_Mode.Infer)
logging.info(f"test dataset size = {len(dataset)}")
task.evaluate(dataset, mode=Task_Mode.Infer)
if __name__ == '__main__':
run() | [
"logging.getLogger",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.softmax",
"logging.info",
"re.split",
"transformers.BertModel",
"os.path.split",
"numpy.random.seed",
"torch.nn.Embedding",
"basic.register.find_task",
"json.loads",
"numpy.argmax",
"torch.einsum",
"re.findall",... | [((103, 114), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (112, 114), False, 'import os\n'), ((820, 913), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s:%(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s:%(levelname)s: %(message)s', level=\n logging.INFO)\n", (839, 913), False, 'import logging\n'), ((918, 945), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (935, 945), False, 'import logging\n'), ((957, 968), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (966, 968), False, 'import os\n'), ((991, 1013), 'os.path.split', 'os.path.split', (['workdir'], {}), '(workdir)\n', (1004, 1013), False, 'import os\n'), ((12349, 12369), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (12363, 12369), True, 'import numpy as np\n'), ((12374, 12397), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (12391, 12397), False, 'import torch\n'), ((12402, 12434), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (12428, 12434), False, 'import torch\n'), ((12473, 12528), 'utils.utils.check_dir', 'check_dir', (['[config.model_save_path, config.output_path]'], {}), '([config.model_save_path, config.output_path])\n', (12482, 12528), False, 'from utils.utils import check_dir\n'), ((12636, 12663), 'basic.register.find_task', 'find_task', (['config.task_name'], {}), '(config.task_name)\n', (12645, 12663), False, 'from basic.register import register_task, find_task\n'), ((140, 161), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (155, 161), False, 'import os\n'), ((2724, 2754), 'transformers.BertModel', 'BertModel', ([], {'config': 'model_config'}), '(config=model_config)\n', (2733, 2754), False, 'from transformers import BertPreTrainedModel, BertConfig, BertTokenizer, BertModel\n'), ((2786, 2835), 'torch.nn.Embedding', 'nn.Embedding', (['idiom_num', 'model_config.hidden_size'], {}), '(idiom_num, model_config.hidden_size)\n', (2798, 2835), False, 'from torch import nn\n'), ((2859, 2874), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2869, 2874), False, 'from torch import nn\n'), ((2901, 2939), 'torch.nn.Linear', 'nn.Linear', (['model_config.hidden_size', '(1)'], {}), '(model_config.hidden_size, 1)\n', (2910, 2939), False, 'from torch import nn\n'), ((3729, 3785), 'torch.einsum', 'torch.einsum', (['"""abc,ac->abc"""', 'encoded_idiom', 'blank_states'], {}), "('abc,ac->abc', encoded_idiom, blank_states)\n", (3741, 3785), False, 'import torch\n'), ((4634, 4696), 'transformers.BertConfig.from_pretrained', 'BertConfig.from_pretrained', (['self.task_config.model_config_path'], {}), '(self.task_config.model_config_path)\n', (4660, 4696), False, 'from transformers import BertPreTrainedModel, BertConfig, BertTokenizer, BertModel\n'), ((4722, 4796), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['self.task_config.vocab_path'], {'lowercase': '(True)'}), '(self.task_config.vocab_path, lowercase=True)\n', (4751, 4796), False, 'from transformers import BertPreTrainedModel, BertConfig, BertTokenizer, BertModel\n'), ((4824, 4863), 'utils.build_vocab.Vocab', 'Vocab', (['self.task_config.idiom_list_path'], {}), '(self.task_config.idiom_list_path)\n', (4829, 4863), False, 'from utils.build_vocab import Vocab\n'), ((5350, 5466), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'self.task_config.eval_batch_size', 'num_workers': '(0)'}), '(dataset, shuffle=False, batch_size=self.\n task_config.eval_batch_size, num_workers=0)\n', (5377, 5466), False, 'import torch\n'), ((6593, 6709), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': 'self.task_config.train_batch_size', 'num_workers': '(0)'}), '(dataset, shuffle=True, batch_size=self.\n task_config.train_batch_size, num_workers=0)\n', (6620, 6709), False, 'import torch\n'), ((4229, 4250), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4248, 4250), False, 'from torch import nn\n'), ((5233, 5260), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.model'], {}), '(self.model)\n', (5248, 5260), False, 'from torch import nn\n'), ((5884, 5915), 'numpy.argmax', 'np.argmax', (['prob_outputs'], {'axis': '(1)'}), '(prob_outputs, axis=1)\n', (5893, 5915), True, 'import numpy as np\n'), ((6292, 6332), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (6306, 6332), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((13159, 13206), 'logging.info', 'logging.info', (['f"""Evaluating model in {dev_path}"""'], {}), "(f'Evaluating model in {dev_path}')\n", (13171, 13206), False, 'import logging\n'), ((13526, 13571), 'logging.info', 'logging.info', (['f"""Testing model in {test_path}"""'], {}), "(f'Testing model in {test_path}')\n", (13538, 13571), False, 'import logging\n'), ((8903, 8919), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (8913, 8919), False, 'import json\n'), ((9265, 9299), 're.findall', 're.findall', (['"""#idiom\\\\d+#"""', 'content'], {}), "('#idiom\\\\d+#', content)\n", (9275, 9299), False, 'import re\n'), ((9655, 9681), 're.split', 're.split', (['tag', 'tmp_context'], {}), '(tag, tmp_context)\n', (9663, 9681), False, 'import re\n'), ((11530, 11569), 'torch.tensor', 'torch.tensor', (['data_id'], {'dtype': 'torch.long'}), '(data_id, dtype=torch.long)\n', (11542, 11569), False, 'import torch\n'), ((11609, 11651), 'torch.tensor', 'torch.tensor', (['feature_id'], {'dtype': 'torch.long'}), '(feature_id, dtype=torch.long)\n', (11621, 11651), False, 'import torch\n'), ((11690, 11731), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (11702, 11731), False, 'import torch\n'), ((11772, 11815), 'torch.tensor', 'torch.tensor', (['input_masks'], {'dtype': 'torch.long'}), '(input_masks, dtype=torch.long)\n', (11784, 11815), False, 'import torch\n'), ((11859, 11905), 'torch.tensor', 'torch.tensor', (['token_type_ids'], {'dtype': 'torch.long'}), '(token_type_ids, dtype=torch.long)\n', (11871, 11905), False, 'import torch\n'), ((11944, 11985), 'torch.tensor', 'torch.tensor', (['idiom_ids'], {'dtype': 'torch.long'}), '(idiom_ids, dtype=torch.long)\n', (11956, 11985), False, 'import torch\n'), ((12020, 12057), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.long'}), '(label, dtype=torch.long)\n', (12032, 12057), False, 'import torch\n'), ((12095, 12135), 'torch.tensor', 'torch.tensor', (['position'], {'dtype': 'torch.long'}), '(position, dtype=torch.long)\n', (12107, 12135), False, 'import torch\n'), ((5807, 5835), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (5820, 5835), False, 'import torch\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import copy
from dace import properties, symbolic
import dace.library
import dace.sdfg.nodes
from dace.sdfg import SDFG, SDFGState
from dace import memlet as mm, data as dt
from dace.transformation.transformation import ExpandTransformation
from dace.libraries.blas.nodes.matmul import _get_matmul_operands
from dace.libraries.blas import blas_helpers
from dace.frontend.common import op_repository as oprepo
from dace.libraries.blas import environments
import numpy as np
import warnings
@dace.library.expansion
class ExpandGemvPure(ExpandTransformation):
environments = []
@staticmethod
def expansion(node, parent_state, parent_sdfg, **kwargs):
node.validate(parent_sdfg, parent_state)
sdfg = dace.SDFG(node.label + "_sdfg")
((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x,
shape_x, strides_x),
(edge_y, outer_array_y, shape_y,
strides_y)) = _get_matmul_operands(node,
parent_state,
parent_sdfg,
name_lhs="_A",
name_rhs="_x",
name_out="_y")
dtype_a = outer_array_a.dtype.type
dtype_x = outer_array_x.dtype.type
dtype_y = outer_array_y.dtype.type
if outer_array_a.dtype.veclen > 1 or outer_array_x.dtype.veclen > 1:
raise NotImplementedError("Vectorization for pure GEMV NYI.")
if node.transA:
trans_shape_a = list(reversed(shape_a))
else:
trans_shape_a = shape_a
if trans_shape_a[1] != shape_x[0]:
raise SyntaxError(
"Matrix-vector product size mismatch: {} vs. {}".format(
trans_shape_a[1], shape_x[0]))
N, M = trans_shape_a[0], trans_shape_a[1]
if outer_array_a.storage != outer_array_x.storage:
raise ValueError("Input matrices must have same storage")
storage = outer_array_a.storage
_, array_a = sdfg.add_array("_A",
shape_a,
dtype_a,
strides=strides_a,
storage=storage)
_, array_x = sdfg.add_array("_x",
shape_x,
dtype_x,
strides=strides_x,
storage=storage)
_, array_y = sdfg.add_array("_y",
shape_y,
dtype_y,
strides=strides_y,
storage=storage)
mul_program = "__out = {} * __A * __x".format(node.alpha)
init_state = sdfg.add_state(node.label + "_initstate")
state = sdfg.add_state_after(init_state, node.label + "_state")
if node.beta == 0:
mul_out, mul_out_array = "_y", array_y
output_nodes = None
else:
mul_out, mul_out_array = tmp, array_tmp = sdfg.add_temp_transient(
shape_y, dtype_y, storage=storage)
access_tmp = state.add_read(tmp)
output_nodes = {mul_out: access_tmp}
# Initialization map
init_state.add_mapped_tasklet(
"gemv_init", {
"_o%d" % i: "0:%s" % symbolic.symstr(d)
for i, d in enumerate(shape_y)
}, {},
"out = 0", {
"out":
dace.Memlet("{}[{}]".format(
mul_out, ",".join(["_o%d" % i
for i in range(len(shape_y))])))
},
external_edges=True)
# Multiplication map
state.add_mapped_tasklet("_GEMV_", {
"__i%d" % i: "0:%s" % s
for i, s in enumerate([N, M])
}, {
"__A":
dace.Memlet(
"_A[{}]".format("__i1, __i0" if node.transA else "__i0, __i1")),
"__x":
dace.Memlet("_x[__i1]")
},
mul_program, {
"__out":
dace.Memlet(f"{mul_out}[__i0]",
wcr="lambda x, y: x + y")
},
external_edges=True,
output_nodes=output_nodes)
add_program = "__y_out = ({} * __y_in) + __tmp".format(node.beta)
memlet_idx = "__i"
# addition map
if node.beta != 0:
state.add_mapped_tasklet("_Add_", {"__i": "0:{}".format(N)}, {
"__y_in": dace.Memlet(f"_y[{memlet_idx}]"),
"__tmp": dace.Memlet(f"{mul_out}[__i]"),
},
add_program,
{"__y_out": dace.Memlet("_y[__i]")},
external_edges=True,
input_nodes={mul_out: access_tmp})
return sdfg
@dace.library.expansion
class ExpandGemvFpgaAccumulate(ExpandTransformation):
"""
This FPGA-oriented expansion iterates over the input matrix A in simple
row-major order, with optional tiling in both dimensions, where the tiles
are also traversed in simple row-major order. This means that y is only
written once, but x is read for every tile in the y-dimension.
The implementation requires accumulation on the output, and does NOT assume
native accumulation for the given data type. Instead it uses multiple
partial sums to ensure that II=1, and only writes the final accumulated
value once it has been combined from the partial sums.
This works for both transposed and non-transposed A, but vectorization is
only implemented for non-transposed A.
"""
# The above corresponds to gemv_v1 in FBLAS
environments = []
@staticmethod
def expansion(node,
parent_state,
parent_sdfg,
tile_size_x=None,
tile_size_y=None,
num_partial_sums=16):
"""
:param node: Node to expand.
:param parent_state: State that the node is in.
:param parent_sdfg: SDFG that the node is in.
:param tile_size_x: Tile size along the dimension of the vector x. If
set to None, no tiling is used, corresponding to
setting the tile size equal to the full size of x.
:param tile_size_y: Tile size along the dimension of the vector y. If
set to None, no tiling is used, corresponding to
setting the tile size equal to the full size of y.
:param num_partial_sums: The number of distinct registers to accumulate
contributions to the final sum into. Should be
a power of two, and should be higher than the
latency of adding two numbers of the given
data type.
"""
node.validate(parent_sdfg, parent_state)
for e in parent_state.in_edges(node):
if e.dst_conn == "_A":
desc_a = parent_sdfg.arrays[e.data.data]
elif e.dst_conn == "_x":
desc_x = parent_sdfg.arrays[e.data.data]
for e in parent_state.out_edges(node):
if e.src_conn == "_y":
desc_y = parent_sdfg.arrays[e.data.data]
sdfg = dace.SDFG("gemv")
state = sdfg.add_state("gemv")
alpha = node.alpha
beta = node.beta
# Create local versions of input data nodes
desc_a = desc_a.clone()
desc_a.transient = False
sdfg.add_datadesc("_A", desc_a)
desc_x = desc_x.clone()
desc_x.transient = False
sdfg.add_datadesc("_x", desc_x)
desc_y = desc_y.clone()
desc_y.transient = False
sdfg.add_datadesc("_y", desc_y)
if node.transA and desc_a.dtype.veclen > 1:
raise NotImplementedError(
"Vectorization not implemented for transposed A.")
# Create accesses
read_a = state.add_read("_A")
read_x = state.add_read("_x")
if beta != 0:
read_y = state.add_read("_y")
write_y = state.add_write("_y")
size_x = desc_x.shape[0]
size_y = desc_y.shape[0]
if tile_size_x is None:
tile_size_x = size_x
if tile_size_y is None:
tile_size_y = size_y
num_tiles_y = f"{size_y}/{tile_size_y}"
num_tiles_x = f"{size_x}/{tile_size_x}"
veclen = desc_a.dtype.veclen
# Create tile map
y_tile_entry, y_tile_exit = state.add_map(
"y_tiles", {"ty": f"0:{num_tiles_y}"},
schedule=dace.ScheduleType.FPGA_Device)
x_tile_entry, x_tile_exit = state.add_map(
"x_tiles", {"tx": f"0:{num_tiles_x}"},
schedule=dace.ScheduleType.FPGA_Device)
# Create y map
y_entry, y_exit = state.add_map("y", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
# Create x map
x_entry, x_exit = state.add_map("x", {"ix": f"0:{tile_size_x}"},
schedule=dace.ScheduleType.FPGA_Device)
# Local buffer of x
sdfg.add_array("x_local", (tile_size_x, ),
desc_x.dtype,
storage=dace.StorageType.FPGA_Local,
transient=True)
x_local_access = state.add_read("x_local")
if beta != 0:
raise NotImplementedError("Not yet implemented.")
multiply_tasklet = state.add_tasklet("multiply", {"A_in", "x_in"},
{f"product": desc_a.dtype},
"product = A_in * x_in")
if isinstance(desc_a, dt.Stream):
subset = "0"
elif node.transA:
subset = f"tx * {tile_size_x} + ix, ty * {tile_size_y} + iy"
else:
subset = f"ty * {tile_size_y} + iy, tx * {tile_size_x} + ix"
state.add_memlet_path(read_a,
y_tile_entry,
x_tile_entry,
y_entry,
x_entry,
multiply_tasklet,
dst_conn="A_in",
memlet=dace.Memlet(f"_A[{subset}]"))
read_x_entry, read_x_exit = state.add_map(
"read_x", {"ix": f"0:{tile_size_x}"},
schedule=dace.ScheduleType.FPGA_Device)
subset = ("0" if isinstance(desc_x, dt.Stream) else
f"tx*{tile_size_x} + ix")
read_x_tasklet = state.add_tasklet("read_x", {"x_memory"}, {"x_buffer"},
"x_buffer = x_memory")
state.add_memlet_path(read_x,
y_tile_entry,
x_tile_entry,
read_x_entry,
read_x_tasklet,
dst_conn="x_memory",
memlet=dace.Memlet(f"_x[{subset}]"))
state.add_memlet_path(read_x_tasklet,
read_x_exit,
x_local_access,
src_conn="x_buffer",
memlet=dace.Memlet(f"x_local[ix]"))
state.add_memlet_path(x_local_access,
y_entry,
x_entry,
multiply_tasklet,
dst_conn="x_in",
memlet=dace.Memlet(f"x_local[ix]"))
# Write to buffer
sdfg.add_array("product_vector", (1, ),
desc_a.dtype,
transient=True,
storage=dace.StorageType.FPGA_Local)
product_vector = state.add_access("product_vector")
state.add_memlet_path(multiply_tasklet,
product_vector,
src_conn="product",
memlet=dace.Memlet(f"product_vector[0]"))
# Vector length conversion
sdfg.add_array("product_scalar", (veclen, ),
desc_a.dtype.base_type,
transient=True,
storage=dace.StorageType.FPGA_Local)
product_scalar = state.add_access("product_scalar")
state.add_memlet_path(product_vector,
product_scalar,
memlet=dace.Memlet(f"product_vector[0]",
other_subset=f"0:{veclen}"))
# Now we need to collapse this
reduce_vector_entry, reduce_vector_exit = state.add_map(
"reduce_vector", {"u": f"0:{veclen}"},
schedule=dace.ScheduleType.FPGA_Device,
unroll=True)
reduce_vector_tasklet = state.add_tasklet(
"reduce_vector", {"product_in", "acc_in"}, {"acc_out"},
"acc_out = product_in + acc_in")
state.add_memlet_path(product_scalar,
reduce_vector_entry,
reduce_vector_tasklet,
dst_conn="product_in",
memlet=dace.Memlet(f"{product_scalar}[u]"))
# Add accumulation register
sdfg.add_array("accumulate_product", (1, ),
desc_a.dtype.base_type,
transient=True,
storage=dace.StorageType.FPGA_Local)
accumulate_product_read = state.add_access("accumulate_product")
accumulate_product_write = state.add_access("accumulate_product")
# Initialize it to zero
init_reduce_vector_tasklet = state.add_tasklet("init_reduce_vector", {},
{"acc_out"},
"acc_out = 0")
state.add_memlet_path(x_entry,
init_reduce_vector_tasklet,
memlet=dace.Memlet())
state.add_memlet_path(init_reduce_vector_tasklet,
accumulate_product_read,
src_conn="acc_out",
memlet=dace.Memlet(f"accumulate_product[0]"))
# Connect it to the tasklet
state.add_memlet_path(accumulate_product_read,
reduce_vector_entry,
reduce_vector_tasklet,
dst_conn="acc_in",
memlet=dace.Memlet(f"accumulate_product[0]"))
state.add_memlet_path(reduce_vector_tasklet,
reduce_vector_exit,
accumulate_product_write,
src_conn="acc_out",
memlet=dace.Memlet(f"accumulate_product[0]"))
# Partial sums
sdfg.add_array("partial_sums", (num_partial_sums, ),
desc_y.dtype,
storage=dace.StorageType.FPGA_Registers,
transient=True)
partial_sum_read = state.add_read("partial_sums")
partial_sum_write = state.add_access("partial_sums")
# Output array
sdfg.add_array("y_local", (tile_size_y, ),
desc_y.dtype,
storage=dace.StorageType.FPGA_Local,
transient=True)
# Now we need to actually accumulate into a local register of y
y_local_read = state.add_read("y_local")
y_local_write = state.add_read("y_local")
update_y_tasklet = state.add_tasklet(
"update_y", {"y_in", "acc_in"}, {"acc_out"}, f"""\
prev = acc_in if ix >= {num_partial_sums} else 0
acc_out = prev + y_in""")
state.add_memlet_path(accumulate_product_write,
update_y_tasklet,
dst_conn="y_in",
memlet=dace.Memlet(f"accumulate_product[0]"))
state.add_memlet_path(
partial_sum_read,
x_entry,
update_y_tasklet,
dst_conn="acc_in",
memlet=dace.Memlet(f"partial_sums[ix%{num_partial_sums}]"))
state.add_memlet_path(y_tile_entry, y_local_read, memlet=dace.Memlet())
state.add_memlet_path(y_entry, partial_sum_read, memlet=dace.Memlet())
state.add_memlet_path(
update_y_tasklet,
x_exit,
partial_sum_write,
src_conn="acc_out",
memlet=dace.Memlet(f"partial_sums[ix%{num_partial_sums}]"))
# Reduce the partial sums
reduce_sums_entry, reduce_sums_exit = state.add_map(
"reduce_partial_sums", {"u": f"0:{num_partial_sums}"},
schedule=dace.ScheduleType.FPGA_Device,
unroll=True)
reduce_sums_tasklet = state.add_tasklet(
"reduce_partial_sums", {"sum_in", "val_in"}, {"sum_out"}, """
prev = sum_in if u > 0 else 0
sum_out = prev + val_in""")
sdfg.add_array("accumulate_sum", (1, ),
desc_y.dtype,
transient=True,
storage=dace.StorageType.FPGA_Local)
accumulate_sum_read = state.add_access("accumulate_sum")
accumulate_sum_write = state.add_access("accumulate_sum")
state.add_memlet_path(y_entry,
accumulate_sum_read,
memlet=dace.Memlet())
state.add_memlet_path(accumulate_sum_read,
reduce_sums_entry,
reduce_sums_tasklet,
dst_conn="sum_in",
memlet=dace.Memlet("accumulate_sum[0]"))
state.add_memlet_path(reduce_sums_tasklet,
reduce_sums_exit,
accumulate_sum_write,
src_conn="sum_out",
memlet=dace.Memlet("accumulate_sum[0]"))
state.add_memlet_path(partial_sum_write,
reduce_sums_entry,
reduce_sums_tasklet,
dst_conn="val_in",
memlet=dace.Memlet("partial_sums[u]"))
# Combine with y buffer
combine_tasklet = state.add_tasklet(
"combine_y", {"val", "buffer_in"}, {"buffer_out"}, """\
prev = buffer_in if tx > 0 else 0
buffer_out = prev + val""")
state.add_memlet_path(accumulate_sum_write,
combine_tasklet,
dst_conn="val",
memlet=dace.Memlet("accumulate_sum[0]"))
state.add_memlet_path(y_local_read,
x_tile_entry,
y_entry,
combine_tasklet,
dst_conn="buffer_in",
memlet=dace.Memlet("y_local[iy]"))
state.add_memlet_path(combine_tasklet,
y_exit,
x_tile_exit,
y_local_write,
src_conn="buffer_out",
memlet=dace.Memlet(f"y_local[iy]"))
subset = ("0" if isinstance(desc_y, dt.Stream) else
f"ty*{tile_size_y} + iy")
write_y_entry, write_y_exit = state.add_map(
"write_y", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
write_y_tasklet = state.add_tasklet("write_y", {"y_buffer"},
{"y_memory"}, "y_memory = y_buffer")
state.add_memlet_path(y_local_write,
write_y_entry,
write_y_tasklet,
dst_conn="y_buffer",
memlet=dace.Memlet(f"y_local[iy]"))
state.add_memlet_path(write_y_tasklet,
write_y_exit,
y_tile_exit,
write_y,
src_conn="y_memory",
memlet=dace.Memlet(f"_y[{subset}]"))
return sdfg
@dace.library.expansion
class ExpandGemvFpgaTilesByColumn(ExpandTransformation):
"""
FPGA-oriented expansion that reads the input matrix A in column-major
order, such that consecutive values are accumulated into different
registers, avoiding a loop-carried dependency due to accumulation.
The matrix can optionally be tiled, where the tiles will be traversed in
row-major order in order to bound the size of the output buffer to the tile
size. The tile size on y must be larger than the latency of addition for
the given data type.
This expansion supports both transposed A and non-transposed A, but
vectorization is only implemented for transposed A.
"""
# This corresponds to gemv_v2 in FBLAS
environments = []
@staticmethod
def expansion(node, state, sdfg, tile_size_x=None, tile_size_y=None):
"""
:param node: Node to expand.
:param parent_state: State that the node is in.
:param parent_sdfg: SDFG that the node is in.
:param tile_size_x: Tile size along the dimension of the vector x. If
set to None, no tiling is used, corresponding to
setting the tile size equal to the full size of x.
:param tile_size_y: Tile size along the dimension of the vector y. If
set to None, no tiling is used, corresponding to
setting the tile size equal to the full size of y.
"""
node.validate(sdfg, state)
for e in state.in_edges(node):
if e.dst_conn == "_A":
desc_a = sdfg.arrays[e.data.data]
elif e.dst_conn == "_x":
desc_x = sdfg.arrays[e.data.data]
for e in state.out_edges(node):
if e.src_conn == "_y":
desc_y = sdfg.arrays[e.data.data]
sdfg = dace.SDFG("gemv")
state = sdfg.add_state("gemv")
alpha = node.alpha
beta = node.beta
# Create local versions of input data nodes
desc_a = desc_a.clone()
desc_a.transient = False
sdfg.add_datadesc("_A", desc_a)
desc_x = desc_x.clone()
desc_x.transient = False
sdfg.add_datadesc("_x", desc_x)
desc_y = desc_y.clone()
desc_y.transient = False
sdfg.add_datadesc("_y", desc_y)
if not node.transA and desc_a.dtype.veclen > 1:
raise NotImplementedError(
"Vectorization not implemented for non-transposed A.")
# Create accesses
read_a = state.add_read("_A")
read_x = state.add_read("_x")
if beta != 0:
read_y = state.add_read("_y")
write_y = state.add_write("_y")
size_x = desc_x.shape[0]
size_y = desc_y.shape[0]
if tile_size_x is None:
tile_size_x = size_x
if tile_size_y is None:
tile_size_y = size_y
num_tiles_y = f"{size_y}/{tile_size_y}"
num_tiles_x = f"{size_x}/{tile_size_x}"
# Create y tile map
y_tile_entry, y_tile_exit = state.add_map(
"y_tiles", {"ty": f"0:{num_tiles_y}"},
schedule=dace.ScheduleType.FPGA_Device)
# Create buffer
sdfg.add_array("y_local", (tile_size_y, ),
desc_y.dtype,
storage=dace.StorageType.FPGA_Local,
transient=True)
y_local = state.add_access("y_local")
y_local_write = state.add_access("y_local")
# Initialize buffer
init_entry, init_exit = state.add_map(
"init", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
if beta != 0:
if isinstance(desc_y, dt.Stream):
subset = "0"
else:
subset = f"ty*{tile_size_y}+iy"
init_tasklet = state.add_tasklet(
"init", {"y_in"}, {"y_out"},
f"y_out = {desc_y.dtype.base_type.ctype}({beta}) * y_in")
state.add_memlet_path(read_y,
y_tile_entry,
init_entry,
init_tasklet,
dst_conn="y_in",
memlet=dace.Memlet(f"_y[{subset}]"))
state.add_memlet_path(init_tasklet,
init_exit,
y_local,
src_conn="y_out",
memlet=dace.Memlet(f"y_local[iy]"))
else:
state.add_memlet_path(y_tile_entry,
init_entry,
memlet=dace.Memlet())
init_tasklet = state.add_tasklet("init", {}, {"y_out"}, "y_out = 0")
state.add_memlet_path(init_entry,
init_tasklet,
memlet=dace.Memlet())
state.add_memlet_path(init_tasklet,
init_exit,
y_local,
src_conn="y_out",
memlet=dace.Memlet("y_local[iy]"))
# Create x tile map
x_tile_entry, x_tile_exit = state.add_map(
"x_tiles", {"tx": f"0:{num_tiles_x}"},
schedule=dace.ScheduleType.FPGA_Device)
# Create loop over tile size in x
x_entry, x_exit = state.add_map("x", {"ix": f"0:{tile_size_x}"},
schedule=dace.ScheduleType.FPGA_Device)
# Buffer a scalar value of x
sdfg.add_array("x_local", (1, ),
desc_x.dtype,
transient=True,
storage=dace.StorageType.FPGA_Local)
x_local = state.add_access("x_local")
subset = "0" if isinstance(desc_x,
dt.Stream) else f"tx*{tile_size_x}+ix"
state.add_memlet_path(read_x,
y_tile_entry,
x_tile_entry,
x_entry,
x_local,
memlet=dace.Memlet(f"_x[{subset}]"))
# Create loop over tile size in y
y_entry, y_exit = state.add_map("y", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
# Do computation
tasklet = state.add_tasklet("gemv", {"A_in", "x_in", "y_in"}, {"y_out"},
f"y_out = y_in + {alpha} * A_in * x_in")
state.add_memlet_path(y_local,
x_tile_entry,
x_entry,
y_entry,
tasklet,
dst_conn="y_in",
memlet=dace.Memlet("y_local[iy]"))
state.add_memlet_path(x_local,
y_entry,
tasklet,
dst_conn="x_in",
memlet=dace.Memlet("x_local[0]"))
state.add_memlet_path(tasklet,
y_exit,
x_exit,
x_tile_exit,
y_local_write,
src_conn="y_out",
memlet=dace.Memlet("y_local[iy]"))
if isinstance(desc_a, dt.Stream):
subset = "0"
elif node.transA:
subset = f"tx * {tile_size_x} + ix, ty * {tile_size_y} + iy"
else:
subset = f"ty * {tile_size_y} + iy, tx * {tile_size_x} + ix"
state.add_memlet_path(read_a,
y_tile_entry,
x_tile_entry,
x_entry,
y_entry,
tasklet,
dst_conn="A_in",
memlet=dace.Memlet(f"_A[{subset}]"))
# Write out tile of y
write_y_entry, write_y_exit = state.add_map(
"write_y", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
write_y_tasklet = state.add_tasklet("write_y", {"y_in"}, {"y_out"},
"y_out = y_in")
subset = ("0" if isinstance(desc_y, dt.Stream) else
f"ty * {tile_size_y} + iy")
state.add_memlet_path(y_local_write,
write_y_entry,
write_y_tasklet,
dst_conn="y_in",
memlet=dace.Memlet("y_local[iy]"))
state.add_memlet_path(write_y_tasklet,
write_y_exit,
y_tile_exit,
write_y,
src_conn="y_out",
memlet=dace.Memlet(f"_y[{subset}]"))
return sdfg
@dace.library.expansion
class ExpandGemvCuBLAS(ExpandTransformation):
environments = [environments.cublas.cuBLAS]
@staticmethod
def expansion(node: 'Gemv', state, sdfg, m=None, n=None, **kwargs):
node.validate(sdfg, state)
((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x,
shape_x, strides_x),
(edge_y, outer_array_y, shape_y,
strides_y)) = _get_matmul_operands(node,
state,
sdfg,
name_lhs="_A",
name_rhs="_x",
name_out="_y")
dtype_a = outer_array_a.dtype.type
dtype = outer_array_x.dtype.base_type
veclen = outer_array_x.dtype.veclen
m = m or node.m
n = n or node.n
if m is None:
m = shape_y[0]
if n is None:
n = shape_x[0]
transA = node.transA
if strides_a[0] == 1:
transA = not transA
lda = strides_a[1]
elif strides_a[1] == 1:
lda = strides_a[0]
else:
warnings.warn('Matrix must be contiguous in at least '
'one dimension. Falling back to pure expansion.')
return ExpandGemvPure.expansion(node,
state,
sdfg,
m=m,
n=n,
**kwargs)
trans = 'CUBLAS_OP_N' if transA else 'CUBLAS_OP_T'
if not node.transA:
m, n = n, m
if veclen != 1:
warnings.warn('Vector GEMV not supported, falling back to pure')
return ExpandGemvPure.expansion(node,
state,
sdfg,
m=m,
n=n,
**kwargs)
func, ctype, runtimetype = blas_helpers.cublas_type_metadata(dtype)
func += 'gemv'
# TODO: (alpha,beta) != (1,0)
if node.alpha != 1.0 or node.beta != 0.0:
raise NotImplementedError
alpha = (
'__state->cublas_handle.Constants(__dace_cuda_device).%sPone()' %
runtimetype)
beta = (
'__state->cublas_handle.Constants(__dace_cuda_device).%sZero()' %
runtimetype)
code = (environments.cublas.cuBLAS.handle_setup_code(node) + f"""
cublas{func}(__dace_cublas_handle, {trans}, {m}, {n}, {alpha}, _A, {lda},
_x, {strides_x[0]}, {beta}, _y, {strides_y[0]});""")
tasklet = dace.sdfg.nodes.Tasklet(node.name,
node.in_connectors,
node.out_connectors,
code,
language=dace.dtypes.Language.CPP)
return tasklet
@dace.library.expansion
class ExpandGemvOpenBLAS(ExpandTransformation):
environments = [environments.openblas.OpenBLAS]
@staticmethod
def expansion(node: 'Gemv', state, sdfg, m=None, n=None, **kwargs):
from dace.sdfg.scope import is_devicelevel_gpu
if is_devicelevel_gpu(sdfg, state, node):
return ExpandGemvPure.expansion(node, state, sdfg)
node.validate(sdfg, state)
((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x,
shape_x, strides_x),
(edge_y, outer_array_y, shape_y,
strides_y)) = _get_matmul_operands(node,
state,
sdfg,
name_lhs="_A",
name_rhs="_x",
name_out="_y")
dtype_a = outer_array_a.dtype.type
dtype = outer_array_x.dtype.base_type
veclen = outer_array_x.dtype.veclen
m = m or node.m
n = n or node.n
if m is None:
m = shape_y[0]
if n is None:
n = shape_x[0]
transA = node.transA
if strides_a[0] == 1:
transA = not transA
lda = strides_a[1]
elif strides_a[1] == 1:
lda = strides_a[0]
else:
warnings.warn('Matrix must be contiguous in at least '
'one dimension. Falling back to pure expansion.')
return ExpandGemvPure.expansion(node,
state,
sdfg,
m=m,
n=n,
**kwargs)
layout = 'CblasColMajor'
trans = 'CblasNoTrans' if transA else 'CblasTrans'
if not node.transA:
m, n = n, m
if veclen != 1:
warnings.warn('Vector GEMV not supported, falling back to pure.')
return ExpandGemvPure.expansion(node,
state,
sdfg,
m=m,
n=n,
**kwargs)
func, ctype, runtimetype = blas_helpers.cublas_type_metadata(dtype)
func = func.lower() + 'gemv'
code = f"""cblas_{func}({layout}, {trans}, {m}, {n}, {node.alpha}, _A, {lda},
_x, {strides_x[0]}, {node.beta}, _y, {strides_y[0]});"""
tasklet = dace.sdfg.nodes.Tasklet(node.name,
node.in_connectors,
node.out_connectors,
code,
language=dace.dtypes.Language.CPP)
return tasklet
@dace.library.expansion
class ExpandGemvMKL(ExpandTransformation):
environments = [environments.intel_mkl.IntelMKL]
@staticmethod
def expansion(*args, **kwargs):
return ExpandGemvOpenBLAS.expansion(*args, **kwargs)
@dace.library.expansion
class ExpandGemvPBLAS(ExpandTransformation):
environments = []
@staticmethod
def expansion(node: 'Gemv', state, sdfg, m=None, n=None, **kwargs):
node.validate(sdfg, state)
((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x,
shape_x, strides_x),
(edge_y, outer_array_y, shape_y,
strides_y)) = _get_matmul_operands(node,
state,
sdfg,
name_lhs="_A",
name_rhs="_x",
name_out="_y")
dtype_a = outer_array_a.dtype.type
dtype = outer_array_x.dtype.base_type
veclen = outer_array_x.dtype.veclen
m = m or node.m
n = n or node.n
if m is None:
m = shape_y[0]
if n is None:
n = shape_x[0]
transA = node.transA
Px = dace.symbol('Px', dtype=dace.int32, integer=True, positive=True)
Py = dace.symbol('Py', dtype=dace.int32, integer=True, positive=True)
try:
sdfg.add_symbol('Px', dace.int32)
sdfg.add_symbol('Py', dace.int32)
except FileExistsError:
pass
@dace.program
def _gemNv_pblas(_A: dtype[m, n], _x: dtype[n], _y: dtype[m]):
lA = np.empty((m // Px, n // Py), dtype=_A.dtype)
lx = np.empty((n // Px,), dtype=_x.dtype)
dace.comm.BCScatter(_A, lA, (m//Px, n//Py))
dace.comm.BCScatter(_x, lx, (n//Px, 1))
ly = distr.MatMult(_A, _x, lA, lx, (m//Px, n//Py), (n//Px, 1))
dace.comm.BCGather(ly, _y, (m//Px, 1))
@dace.program
def _gemTv_pblas(_A: dtype[m, n], _x: dtype[m], _y: dtype[n]):
lA = np.empty((m // Px, n // Py), dtype=_A.dtype)
lx = np.empty((m // Px,), dtype=_x.dtype)
dace.comm.BCScatter(_A, lA, (m//Px, n//Py))
dace.comm.BCScatter(_x, lx, (m//Px, 1))
ly = distr.MatMult(_x, _A, lx, lA, (m//Px, 1), (m//Px, n//Py))
dace.comm.BCGather(ly, _y, (n//Px, 1))
# NOTE: The following is done to avoid scalar promotion, which results
# in ValueError: Node type "BlockCyclicScatter" not supported for
# promotion
if transA:
sdfg = _gemTv_pblas.to_sdfg(strict=False)
else:
sdfg = _gemNv_pblas.to_sdfg(strict=False)
sdfg.apply_strict_transformations()
return sdfg
@dace.library.node
class Gemv(dace.sdfg.nodes.LibraryNode):
# Global properties
implementations = {
"pure": ExpandGemvPure,
"OpenBLAS": ExpandGemvOpenBLAS,
"MKL": ExpandGemvMKL,
"cuBLAS": ExpandGemvCuBLAS,
"FPGA_Accumulate": ExpandGemvFpgaAccumulate,
"FPGA_TilesByColumn": ExpandGemvFpgaTilesByColumn,
"PBLAS": ExpandGemvPBLAS
}
default_implementation = None
# Object fields
alpha = properties.SymbolicProperty(allow_none=False, default=1)
beta = properties.SymbolicProperty(allow_none=False, default=0)
transA = properties.Property(
dtype=bool, desc="Whether to transpose A before multiplying")
n = properties.SymbolicProperty(allow_none=True, default=None)
m = properties.SymbolicProperty(allow_none=True, default=None)
def __init__(self, name, location=None, transA=False, alpha=1, beta=0):
super().__init__(
name,
location=location,
inputs={"_A", "_x", "_y"} if beta != 0 else {"_A", "_x"},
outputs={"_y"})
self.transA = transA
self.alpha = alpha
self.beta = beta
def validate(self, sdfg, state):
in_edges = state.in_edges(self)
if len(in_edges) not in [2, 3]:
raise ValueError("Expected 2 or 3 inputs to GEMV")
size_y_in = None
for _, _, _, dst_conn, memlet in state.in_edges(self):
if dst_conn == "_A":
subset = copy.deepcopy(memlet.subset)
subset.squeeze()
size_a = subset.size()
if dst_conn == "_x":
subset = copy.deepcopy(memlet.subset)
subset.squeeze()
size_x = subset.size()
if dst_conn == "_y":
subset = copy.deepcopy(memlet.subset)
subset.squeeze()
size_y_in = subset.size()
if len(size_a) != 2 or len(size_x) != 1:
raise ValueError(
"Matrix-vector product only supported on matrix-vector input")
a_cols = size_a[1] if not self.transA else size_a[0]
a_rows = size_a[0] if not self.transA else size_a[1]
if a_cols != size_x[0]:
raise ValueError(f"Columns of A ({a_cols}) don't match "
f"size of x ({size_x[0]}).")
out_edges = state.out_edges(self)
if len(out_edges) != 1:
raise ValueError(
"Expected exactly one output from matrix-vector product")
out_memlet = out_edges[0].data
out_subset = copy.deepcopy(out_memlet.subset)
out_subset.squeeze()
size_y_out = out_subset.size()
if size_y_in is not None and size_y_in != size_y_out:
raise ValueError("Input y-vector must match output y-vector.")
if (len(size_y_out) != 1 or size_y_out[0] != a_rows):
raise ValueError("Vector input to GEMV must match matrix rows.")
# Numpy replacement
@oprepo.replaces('dace.libraries.blas.gemv')
@oprepo.replaces('dace.libraries.blas.Gemv')
def gemv_libnode(sdfg: SDFG,
state: SDFGState,
A,
x,
y,
alpha,
beta,
trans=None):
# Get properties
if trans is None:
trans = (sdfg.arrays[x].shape[0] == sdfg.arrays[A].shape[0])
# Add nodes
A_in, x_in = (state.add_read(name) for name in (A, x))
y_out = state.add_write(y)
libnode = Gemv('gemv', transA=trans, alpha=alpha, beta=beta)
state.add_node(libnode)
# Connect nodes
state.add_edge(A_in, None, libnode, '_A', mm.Memlet(A))
state.add_edge(x_in, None, libnode, '_x', mm.Memlet(x))
state.add_edge(libnode, '_y', y_out, None, mm.Memlet(y))
if beta != 0:
y_in = state.add_read(y)
state.add_edge(y_in, None, libnode, '_y', mm.Memlet(y))
return []
| [
"dace.libraries.blas.nodes.matmul._get_matmul_operands",
"dace.sdfg.scope.is_devicelevel_gpu",
"dace.libraries.blas.blas_helpers.cublas_type_metadata",
"dace.memlet.Memlet",
"dace.frontend.common.op_repository.replaces",
"dace.properties.SymbolicProperty",
"dace.libraries.blas.environments.cublas.cuBLAS... | [((42195, 42238), 'dace.frontend.common.op_repository.replaces', 'oprepo.replaces', (['"""dace.libraries.blas.gemv"""'], {}), "('dace.libraries.blas.gemv')\n", (42210, 42238), True, 'from dace.frontend.common import op_repository as oprepo\n'), ((42240, 42283), 'dace.frontend.common.op_repository.replaces', 'oprepo.replaces', (['"""dace.libraries.blas.Gemv"""'], {}), "('dace.libraries.blas.Gemv')\n", (42255, 42283), True, 'from dace.frontend.common import op_repository as oprepo\n'), ((39668, 39724), 'dace.properties.SymbolicProperty', 'properties.SymbolicProperty', ([], {'allow_none': '(False)', 'default': '(1)'}), '(allow_none=False, default=1)\n', (39695, 39724), False, 'from dace import properties, symbolic\n'), ((39736, 39792), 'dace.properties.SymbolicProperty', 'properties.SymbolicProperty', ([], {'allow_none': '(False)', 'default': '(0)'}), '(allow_none=False, default=0)\n', (39763, 39792), False, 'from dace import properties, symbolic\n'), ((39807, 39893), 'dace.properties.Property', 'properties.Property', ([], {'dtype': 'bool', 'desc': '"""Whether to transpose A before multiplying"""'}), "(dtype=bool, desc=\n 'Whether to transpose A before multiplying')\n", (39826, 39893), False, 'from dace import properties, symbolic\n'), ((39907, 39965), 'dace.properties.SymbolicProperty', 'properties.SymbolicProperty', ([], {'allow_none': '(True)', 'default': 'None'}), '(allow_none=True, default=None)\n', (39934, 39965), False, 'from dace import properties, symbolic\n'), ((39974, 40032), 'dace.properties.SymbolicProperty', 'properties.SymbolicProperty', ([], {'allow_none': '(True)', 'default': 'None'}), '(allow_none=True, default=None)\n', (40001, 40032), False, 'from dace import properties, symbolic\n'), ((1055, 1157), 'dace.libraries.blas.nodes.matmul._get_matmul_operands', '_get_matmul_operands', (['node', 'parent_state', 'parent_sdfg'], {'name_lhs': '"""_A"""', 'name_rhs': '"""_x"""', 'name_out': '"""_y"""'}), "(node, parent_state, parent_sdfg, name_lhs='_A',\n name_rhs='_x', name_out='_y')\n", (1075, 1157), False, 'from dace.libraries.blas.nodes.matmul import _get_matmul_operands\n'), ((30508, 30596), 'dace.libraries.blas.nodes.matmul._get_matmul_operands', '_get_matmul_operands', (['node', 'state', 'sdfg'], {'name_lhs': '"""_A"""', 'name_rhs': '"""_x"""', 'name_out': '"""_y"""'}), "(node, state, sdfg, name_lhs='_A', name_rhs='_x',\n name_out='_y')\n", (30528, 30596), False, 'from dace.libraries.blas.nodes.matmul import _get_matmul_operands\n'), ((32296, 32336), 'dace.libraries.blas.blas_helpers.cublas_type_metadata', 'blas_helpers.cublas_type_metadata', (['dtype'], {}), '(dtype)\n', (32329, 32336), False, 'from dace.libraries.blas import blas_helpers\n'), ((33555, 33592), 'dace.sdfg.scope.is_devicelevel_gpu', 'is_devicelevel_gpu', (['sdfg', 'state', 'node'], {}), '(sdfg, state, node)\n', (33573, 33592), False, 'from dace.sdfg.scope import is_devicelevel_gpu\n'), ((33914, 34002), 'dace.libraries.blas.nodes.matmul._get_matmul_operands', '_get_matmul_operands', (['node', 'state', 'sdfg'], {'name_lhs': '"""_A"""', 'name_rhs': '"""_x"""', 'name_out': '"""_y"""'}), "(node, state, sdfg, name_lhs='_A', name_rhs='_x',\n name_out='_y')\n", (33934, 34002), False, 'from dace.libraries.blas.nodes.matmul import _get_matmul_operands\n'), ((35736, 35776), 'dace.libraries.blas.blas_helpers.cublas_type_metadata', 'blas_helpers.cublas_type_metadata', (['dtype'], {}), '(dtype)\n', (35769, 35776), False, 'from dace.libraries.blas import blas_helpers\n'), ((36996, 37084), 'dace.libraries.blas.nodes.matmul._get_matmul_operands', '_get_matmul_operands', (['node', 'state', 'sdfg'], {'name_lhs': '"""_A"""', 'name_rhs': '"""_x"""', 'name_out': '"""_y"""'}), "(node, state, sdfg, name_lhs='_A', name_rhs='_x',\n name_out='_y')\n", (37016, 37084), False, 'from dace.libraries.blas.nodes.matmul import _get_matmul_operands\n'), ((41795, 41827), 'copy.deepcopy', 'copy.deepcopy', (['out_memlet.subset'], {}), '(out_memlet.subset)\n', (41808, 41827), False, 'import copy\n'), ((42865, 42877), 'dace.memlet.Memlet', 'mm.Memlet', (['A'], {}), '(A)\n', (42874, 42877), True, 'from dace import memlet as mm, data as dt\n'), ((42925, 42937), 'dace.memlet.Memlet', 'mm.Memlet', (['x'], {}), '(x)\n', (42934, 42937), True, 'from dace import memlet as mm, data as dt\n'), ((42986, 42998), 'dace.memlet.Memlet', 'mm.Memlet', (['y'], {}), '(y)\n', (42995, 42998), True, 'from dace import memlet as mm, data as dt\n'), ((31892, 31956), 'warnings.warn', 'warnings.warn', (['"""Vector GEMV not supported, falling back to pure"""'], {}), "('Vector GEMV not supported, falling back to pure')\n", (31905, 31956), False, 'import warnings\n'), ((32745, 32795), 'dace.libraries.blas.environments.cublas.cuBLAS.handle_setup_code', 'environments.cublas.cuBLAS.handle_setup_code', (['node'], {}), '(node)\n', (32789, 32795), False, 'from dace.libraries.blas import environments\n'), ((35331, 35396), 'warnings.warn', 'warnings.warn', (['"""Vector GEMV not supported, falling back to pure."""'], {}), "('Vector GEMV not supported, falling back to pure.')\n", (35344, 35396), False, 'import warnings\n'), ((38037, 38081), 'numpy.empty', 'np.empty', (['(m // Px, n // Py)'], {'dtype': '_A.dtype'}), '((m // Px, n // Py), dtype=_A.dtype)\n', (38045, 38081), True, 'import numpy as np\n'), ((38099, 38135), 'numpy.empty', 'np.empty', (['(n // Px,)'], {'dtype': '_x.dtype'}), '((n // Px,), dtype=_x.dtype)\n', (38107, 38135), True, 'import numpy as np\n'), ((38489, 38533), 'numpy.empty', 'np.empty', (['(m // Px, n // Py)'], {'dtype': '_A.dtype'}), '((m // Px, n // Py), dtype=_A.dtype)\n', (38497, 38533), True, 'import numpy as np\n'), ((38551, 38587), 'numpy.empty', 'np.empty', (['(m // Px,)'], {'dtype': '_x.dtype'}), '((m // Px,), dtype=_x.dtype)\n', (38559, 38587), True, 'import numpy as np\n'), ((43102, 43114), 'dace.memlet.Memlet', 'mm.Memlet', (['y'], {}), '(y)\n', (43111, 43114), True, 'from dace import memlet as mm, data as dt\n'), ((31309, 31420), 'warnings.warn', 'warnings.warn', (['"""Matrix must be contiguous in at least one dimension. Falling back to pure expansion."""'], {}), "(\n 'Matrix must be contiguous in at least one dimension. Falling back to pure expansion.'\n )\n", (31322, 31420), False, 'import warnings\n'), ((34715, 34826), 'warnings.warn', 'warnings.warn', (['"""Matrix must be contiguous in at least one dimension. Falling back to pure expansion."""'], {}), "(\n 'Matrix must be contiguous in at least one dimension. Falling back to pure expansion.'\n )\n", (34728, 34826), False, 'import warnings\n'), ((40691, 40719), 'copy.deepcopy', 'copy.deepcopy', (['memlet.subset'], {}), '(memlet.subset)\n', (40704, 40719), False, 'import copy\n'), ((40850, 40878), 'copy.deepcopy', 'copy.deepcopy', (['memlet.subset'], {}), '(memlet.subset)\n', (40863, 40878), False, 'import copy\n'), ((41009, 41037), 'copy.deepcopy', 'copy.deepcopy', (['memlet.subset'], {}), '(memlet.subset)\n', (41022, 41037), False, 'import copy\n'), ((3614, 3632), 'dace.symbolic.symstr', 'symbolic.symstr', (['d'], {}), '(d)\n', (3629, 3632), False, 'from dace import properties, symbolic\n')] |
# You are at the top. If you attempt to go any higher
# you will go beyond the known limits of the code
# universe where there are most certainly monsters
# might be able to get a speedup where I'm appending move and -move
# to do:
# use point raycaster to make a cloth_wrap option
# self colisions
# maybe do dynamic margins for when cloth is moving fast
# object collisions
# collisions need to properly exclude pinned and vertex pinned
# add bending springs
# add curl by shortening bending springs on one axis or diagonal
# independantly scale bending springs and structural to create buckling
# option to cache animation?
# Custom Source shape option for animated shapes
# collisions:
# Only need to check one of the edges for groups connected to a vertex
# for edge to face intersections...
# figure out where the edge hit the face
# figure out which end of the edge is inside the face
# move along the face normal to the surface for the point inside.
# if I reflect by flipping the vel around the face normal
# if it collides on the bounce it will get caught on the next iteration
# Sewing
# Could create super sewing that doesn't use edges but uses scalars along the edge to place virtual points
# sort of a barycentric virtual spring. Could even use it to sew to faces if I can think of a ui for where on the face.
# On an all triangle mesh, where sew edges come together there are long strait lines. This probably causes those edges to fold.
# in other words... creating diagonal springs between these edges will not solve the fold problem. Bend spring could do this.
# Bend springs:
# need to speed things up
# When faces have various sizes, the forces don't add up
# self collision
# where points are pinned, stuff is all jittery
'''??? Would it make sense to do self collisions with virtual edges ???'''
'''??? Could do dynamic collision margins for stuff moving fast ???'''
bl_info = {
"name": "Modeling Cloth",
"author": "<NAME> (<EMAIL>.com), <NAME> (@ucupumar)",
"version": (1, 0),
"blender": (2, 79, 0),
"location": "View3D > Extended Tools > Modeling Cloth",
"description": "Maintains the surface area of an object so it behaves like cloth",
"warning": "There might be an angry rhinoceros behind you",
"wiki_url": "",
"category": '3D View'}
import bpy
import bmesh
import numpy as np
from numpy import newaxis as nax
from bpy_extras import view3d_utils
from bpy.props import *
from bpy.app.handlers import persistent
from mathutils import *
import time, sys
#enable_numexpr = True
enable_numexpr = False
if enable_numexpr:
import numexpr as ne
you_have_a_sense_of_humor = False
#you_have_a_sense_of_humor = True
if you_have_a_sense_of_humor:
import antigravity
def get_co(ob, arr=None, key=None): # key
"""Returns vertex coords as N x 3"""
c = len(ob.data.vertices)
if arr is None:
arr = np.zeros(c * 3, dtype=np.float32)
if key is not None:
ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
ob.data.vertices.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
def get_proxy_co(ob, arr, me):
"""Returns vertex coords with modifier effects as N x 3"""
if arr is None:
arr = np.zeros(len(me.vertices) * 3, dtype=np.float32)
arr.shape = (arr.shape[0] //3, 3)
c = arr.shape[0]
me.vertices.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
def triangulate(me, ob=None):
"""Requires a mesh. Returns an index array for viewing co as triangles"""
obm = bmesh.new()
obm.from_mesh(me)
bmesh.ops.triangulate(obm, faces=obm.faces)
#obm.to_mesh(me)
count = len(obm.faces)
#tri_idx = np.zeros(count * 3, dtype=np.int32)
#me.polygons.foreach_get('vertices', tri_idx)
tri_idx = np.array([[v.index for v in f.verts] for f in obm.faces])
# Identify bend spring groups. Each edge gets paired with two points on tips of tris around edge
# Restricted to edges with two linked faces on a triangulated version of the mesh
if ob is not None:
link_ed = [e for e in obm.edges if len(e.link_faces) == 2]
ob.bend_eidx = np.array([[e.verts[0].index, e.verts[1].index] for e in link_ed])
fv = np.array([[[v.index for v in f.verts] for f in e.link_faces] for e in link_ed])
fv.shape = (fv.shape[0],6)
ob.bend_tips = np.array([[idx for idx in fvidx if idx not in e] for e, fvidx in zip(ob.bend_eidx, fv)])
obm.free()
return tri_idx#.reshape(count, 3)
def tri_normals_in_place(col, tri_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
col.origins = tri_co[:,0]
col.cross_vecs = tri_co[:,1:] - col.origins[:, nax]
col.normals = np.cross(col.cross_vecs[:,0], col.cross_vecs[:,1])
col.nor_dots = np.einsum("ij, ij->i", col.normals, col.normals)
col.normals /= np.sqrt(col.nor_dots)[:, nax]
def get_tri_normals(tr_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
origins = tr_co[:,0]
cross_vecs = tr_co[:,1:] - origins[:, nax]
return cross_vecs, np.cross(cross_vecs[:,0], cross_vecs[:,1]), origins
def closest_points_edge(vec, origin, p):
'''Returns the location of the point on the edge'''
vec2 = p - origin
d = (vec2 @ vec) / (vec @ vec)
cp = vec * d[:, nax]
return cp, d
def proxy_in_place(col, me):
"""Overwrite vert coords with modifiers in world space"""
me.vertices.foreach_get('co', col.co.ravel())
col.co = apply_transforms(col.ob, col.co)
def apply_rotation(col):
"""When applying vectors such as normals we only need
to rotate"""
m = np.array(col.ob.matrix_world)
mat = m[:3, :3].T
col.v_normals = col.v_normals @ mat
def proxy_v_normals_in_place(col, world=True, me=None):
"""Overwrite vert coords with modifiers in world space"""
me.vertices.foreach_get('normal', col.v_normals.ravel())
if world:
apply_rotation(col)
def proxy_v_normals(ob, me):
"""Overwrite vert coords with modifiers in world space"""
arr = np.zeros(len(me.vertices) * 3, dtype=np.float32)
me.vertices.foreach_get('normal', arr)
arr.shape = (arr.shape[0] //3, 3)
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
return arr @ mat
def apply_transforms(ob, co):
"""Get vert coords in world space"""
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def apply_in_place(ob, arr, cloth):
"""Overwrite vert coords in world space"""
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
arr[:] = arr @ mat + loc
#cloth.co = cloth.co @ mat + loc
def applied_key_co(ob, arr=None, key=None):
"""Get vert coords in world space"""
c = len(ob.data.vertices)
if arr is None:
arr = np.zeros(c * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr)
arr.shape = (c, 3)
m = np.array(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def revert_transforms(ob, co):
"""Set world coords on object.
Run before setting coords to deal with object transforms
if using apply_transforms()"""
m = np.linalg.inv(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def revert_in_place(ob, co):
"""Revert world coords to object coords in place."""
m = np.linalg.inv(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
co[:] = co @ mat + loc
def revert_rotation(ob, co):
"""When reverting vectors such as normals we only need
to rotate"""
#m = np.linalg.inv(ob.matrix_world)
m = np.array(ob.matrix_world)
mat = m[:3, :3] # rotates backwards without T
return co @ mat
def get_last_object():
"""Finds cloth objects for keeping settings active
while selecting other objects like pins"""
cloths = [i for i in bpy.data.objects if i.mclo.enable] # so we can select an empty and keep the settings menu up
if bpy.context.object.mclo.enable:
return cloths, bpy.context.object
if len(cloths) > 0:
ob = bpy.context.scene.mclo.last_object
return cloths, ob
return None, None
def get_poly_centers(ob, type=np.float32, mesh=None):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
p_count = len(mesh.polygons)
center = np.zeros(p_count * 3, dtype=type)
mesh.polygons.foreach_get('center', center)
center.shape = (p_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
return center
def simple_poly_centers(ob, key=None):
if key is not None:
s_key = ob.data.shape_keys.key_blocks[key].data
return np.squeeze([[np.mean([ob.data.vertices[i].co for i in p.vertices], axis=0)] for p in ob.data.polygons])
def get_poly_normals(ob, type=np.float32, mesh=None):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
p_count = len(mesh.polygons)
normal = np.zeros(p_count * 3, dtype=type)
mesh.polygons.foreach_get('normal', normal)
normal.shape = (p_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
return normal
def get_v_normals(ob, arr, mesh):
"""Since we're reading from a shape key we have to use
a proxy mesh."""
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
#v_count = len(mesh.vertices)
#normal = np.zeros(v_count * 3)#, dtype=type)
mesh.vertices.foreach_get('normal', arr.ravel())
#normal.shape = (v_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
def get_v_nor(ob, nor_arr):
ob.data.vertices.foreach_get('normal', nor_arr.ravel())
return nor_arr
def closest_point_edge(e1, e2, p):
'''Returns the location of the point on the edge'''
vec1 = e2 - e1
vec2 = p - e1
d = np.dot(vec2, vec1) / np.dot(vec1, vec1)
cp = e1 + vec1 * d
return cp
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
'''Creates vertex groups and sets weights. "groups" is a list of strings
for the names of the groups. "weights" is a list of weights corresponding
to the strings. Each vertex is assigned a weight for each vertex group to
avoid calling vertex weights that are not assigned. If the groups are
already present, the previous weights will be preserved. To reset weights
delete the created groups'''
if ob is None:
ob = bpy.context.object
vg = ob.vertex_groups
for g in range(0, len(groups)):
if groups[g] not in vg.keys(): # Don't create groups if there are already there
vg.new(groups[g])
vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
else:
vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups.
def get_bmesh(obj=None):
ob = get_last_object()[1]
if ob is None:
ob = obj
obm = bmesh.new()
if ob.mode == 'OBJECT':
obm.from_mesh(ob.data)
elif ob.mode == 'EDIT':
obm = bmesh.from_edit_mesh(ob.data)
return obm
def get_minimal_edges(ob):
obm = get_bmesh(ob)
obm.edges.ensure_lookup_table()
obm.verts.ensure_lookup_table()
obm.faces.ensure_lookup_table()
# get sew edges:
sew = [i.index for i in obm.edges if len(i.link_faces)==0]
# so if I have a vertex with one or more sew edges attached
# I need to get the mean location of all verts shared by those edges
# every one of those verts needs to move towards the total mean
# get linear edges
e_count = len(obm.edges)
eidx = np.zeros(e_count * 2, dtype=np.int32)
e_bool = np.zeros(e_count, dtype=np.bool)
e_bool[sew] = True
ob.data.edges.foreach_get('vertices', eidx)
eidx.shape = (e_count, 2)
# get diagonal edges:
diag_eidx = []
start = 0
stop = 0
step_size = [len(i.verts) for i in obm.faces]
p_v_count = np.sum(step_size)
p_verts = np.ones(p_v_count, dtype=np.int32)
ob.data.polygons.foreach_get('vertices', p_verts)
# can only be understood on a good day when the coffee flows (uses rolling and slicing)
# creates uniqe diagonal edge sets
for f in obm.faces:
fv_count = len(f.verts)
stop += fv_count
if fv_count > 3: # triangles are already connected by linear springs
skip = 2
f_verts = p_verts[start:stop]
for fv in range(len(f_verts)):
if fv > 1: # as we go around the loop of verts in face we start overlapping
skip = fv + 1 # this lets us skip the overlap so we don't have mirror duplicates
roller = np.roll(f_verts, fv)
for r in roller[skip:-1]:
diag_eidx.append([roller[0], r])
start += fv_count
# eidx groups
sew_eidx = eidx[e_bool]
lin_eidx = eidx[~e_bool]
diag_eidx = np.array(diag_eidx)
# deal with sew verts connected to more than one edge
s_t_rav = sew_eidx.T.ravel()
s_uni, s_inv, s_counts = np.unique(s_t_rav,return_inverse=True, return_counts=True)
s_multi = s_counts > 1
multi_groups = None
if np.any(s_counts):
multi_groups = []
ls = sew_eidx[:,0]
rs = sew_eidx[:,1]
for i in s_uni[s_multi]:
gr = np.array([i])
gr = np.append(gr, ls[rs==i])
gr = np.append(gr, rs[ls==i])
multi_groups.append(gr)
return lin_eidx, diag_eidx, sew_eidx, multi_groups
def add_remove_virtual_springs(remove=False):
ob = get_last_object()[1]
cloth = get_cloth_data(ob)
obm = get_bmesh()
obm.verts.ensure_lookup_table()
count = len(obm.verts)
idxer = np.arange(count, dtype=np.int32)
sel = np.array([v.select for v in obm.verts])
selected = idxer[sel]
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
if virtual_springs.shape[0] == 0:
virtual_springs.shape = (0, 2)
if remove:
ls = virtual_springs[:, 0]
in_sel = np.in1d(ls, idxer[sel])
deleter = np.arange(ls.shape[0], dtype=np.int32)[in_sel]
for i in reversed(deleter):
ob.mclo.virtual_springs.remove(i)
return
existing = np.append(cloth.eidx, virtual_springs, axis=0)
flip = existing[:, ::-1]
existing = np.append(existing, flip, axis=0)
ls = existing[:,0]
#springs = []
for i in idxer[sel]:
# to avoid duplicates:
# where this vert occurs on the left side of the existing spring list
v_in = existing[i == ls]
v_in_r = v_in[:,1]
not_in = selected[~np.in1d(selected, v_in_r)]
idx_set = not_in[not_in != i]
for sv in idx_set:
#springs.append([i, sv])
new_vs = ob.mclo.virtual_springs.add()
new_vs.vertex_id_1 = i
new_vs.vertex_id_2 = sv
# gets appended to eidx in the cloth_init function after calling get connected polys in case geometry changes
def generate_guide_mesh():
"""Makes the arrow that appears when creating pins"""
verts = [[0.0, 0.0, 0.0], [-0.01, -0.01, 0.1], [-0.01, 0.01, 0.1], [0.01, -0.01, 0.1], [0.01, 0.01, 0.1], [-0.03, -0.03, 0.1], [-0.03, 0.03, 0.1], [0.03, 0.03, 0.1], [0.03, -0.03, 0.1], [-0.01, -0.01, 0.2], [-0.01, 0.01, 0.2], [0.01, -0.01, 0.2], [0.01, 0.01, 0.2]]
edges = [[0, 5], [5, 6], [6, 7], [7, 8], [8, 5], [1, 2], [2, 4], [4, 3], [3, 1], [5, 1], [2, 6], [4, 7], [3, 8], [9, 10], [10, 12], [12, 11], [11, 9], [3, 11], [9, 1], [2, 10], [12, 4], [6, 0], [7, 0], [8, 0]]
faces = [[0, 5, 6], [0, 6, 7], [0, 7, 8], [0, 8, 5], [1, 3, 11, 9], [1, 2, 6, 5], [2, 4, 7, 6], [4, 3, 8, 7], [3, 1, 5, 8], [12, 10, 9, 11], [4, 2, 10, 12], [3, 4, 12, 11], [2, 1, 9, 10]]
name = 'ModelingClothPinGuide'
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
else:
mesh = bpy.data.meshes.new('ModelingClothPinGuide')
mesh.from_pydata(verts, edges, faces)
mesh.update()
mesh_ob = bpy.data.objects.new(name, mesh)
bpy.context.scene.objects.link(mesh_ob)
mesh_ob.show_x_ray = True
return mesh_ob
def create_guide():
"""Spawns the guide"""
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
return mesh_ob
mesh_ob = generate_guide_mesh()
bpy.context.scene.objects.active = mesh_ob
bpy.ops.object.material_slot_add()
if 'ModelingClothPinGuide' in bpy.data.materials:
mat = bpy.data.materials['ModelingClothPinGuide']
else:
mat = bpy.data.materials.new(name='ModelingClothPinGuide')
mat.use_transparency = True
mat.alpha = 0.35
mat.emit = 2
mat.game_settings.alpha_blend = 'ALPHA_ANTIALIASING'
mat.diffuse_color = (1, 1, 0)
mesh_ob.material_slots[0].material = mat
return mesh_ob
def delete_guide():
"""Deletes the arrow"""
if 'ModelingClothPinGuide' in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects['ModelingClothPinGuide'])
if 'ModelingClothPinGuide' in bpy.data.meshes:
guide_mesh = bpy.data.meshes['ModelingClothPinGuide']
guide_mesh.user_clear()
bpy.data.meshes.remove(guide_mesh)
def scale_source(multiplier):
"""grow or shrink the source shape"""
ob = get_last_object()[1]
if ob is not None:
if ob.mclo.enable:
count = len(ob.data.vertices)
co = np.zeros(count*3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', co)
co.shape = (count, 3)
mean = np.mean(co, axis=0)
co -= mean
co *= multiplier
co += mean
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_set('co', co.ravel())
cloth = get_cloth_data(ob)
if hasattr(cloth, 'cy_dists'):
cloth.cy_dists *= multiplier
def reset_shapes(ob=None):
"""Sets the modeling cloth key to match the source key.
Will regenerate shape keys if they are missing"""
if ob is None:
if bpy.context.object.mclo.enable:
ob = bpy.context.object
else:
ob = bpy.context.scene.mclo.last_object
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
keys = ob.data.shape_keys.key_blocks
count = len(ob.data.vertices)
co = np.zeros(count * 3, dtype=np.float32)
keys['Basis'].data.foreach_get('co', co)
#co = applied_key_co(ob, None, 'modeling cloth source key')
#keys['modeling cloth source key'].data.foreach_set('co', co)
keys['modeling cloth key'].data.foreach_set('co', co)
# reset the data stored in the class
cloth = get_cloth_data(ob)
cloth.vel[:] = 0
co.shape = (co.shape[0]//3, 3)
cloth.co = co
keys['modeling cloth key'].mute = True
keys['modeling cloth key'].mute = False
def get_spring_mix(ob, eidx):
rs = []
ls = []
minrl = []
for i in eidx:
r = eidx[eidx == i[1]].shape[0]
l = eidx[eidx == i[0]].shape[0]
rs.append (min(r,l))
ls.append (min(r,l))
mix = 1 / np.array(rs + ls, dtype=np.float32) ** 1.2
return mix
def collision_data_update(self, context):
ob = self.id_data
if ob.mclo.self_collision:
create_cloth_data(ob)
def refresh_noise(self, context):
ob = self.id_data
cloth = get_cloth_data(ob)
if cloth:
zeros = np.zeros(cloth.count, dtype=np.float32)
random = np.random.random(cloth.count)
zeros[:] = random
cloth.noise = ((zeros + -0.5) * ob.mclo.noise * 0.1)[:, nax]
def generate_wind(wind_vec, ob, cloth):
"""Maintains a wind array and adds it to the cloth vel"""
tri_nor = cloth.normals # non-unit calculated by tri_normals_in_place() per each triangle
w_vec = revert_rotation(ob, wind_vec)
turb = ob.mclo.turbulence
if turb != 0:
w_vec += np.random.random(3).astype(np.float32) * turb * np.mean(w_vec) * 4
# only blow on verts facing the wind
perp = np.abs(tri_nor @ w_vec)
cloth.wind += w_vec
cloth.wind *= perp[:, nax][:, nax]
# reshape for add.at
shape = cloth.wind.shape
cloth.wind.shape = (shape[0] * 3, 3)
cloth.wind *= cloth.tri_mix
np.add.at(cloth.vel, cloth.tridex.ravel(), cloth.wind)
cloth.wind.shape = shape
def generate_inflate(ob, cloth):
"""Blow it up baby!"""
tri_nor = cloth.normals #* ob.mclo.inflate # non-unit calculated by tri_normals_in_place() per each triangle
#tri_nor /= np.einsum("ij, ij->i", tri_nor, tri_nor)[:, nax]
# reshape for add.at
shape = cloth.inflate.shape
cloth.inflate += tri_nor[:, nax] * ob.mclo.inflate# * cloth.tri_mix
cloth.inflate.shape = (shape[0] * 3, 3)
cloth.inflate *= cloth.tri_mix
np.add.at(cloth.vel, cloth.tridex.ravel(), cloth.inflate)
cloth.inflate.shape = shape
cloth.inflate *= 0
def get_quat(rad, axis):
theta = (rad * 0.5)
w = np.cos(theta)
q_axis = axis * np.sin(theta)[:, nax]
return w, q_axis
def q_rotate(co, w, axis):
"""Takes an N x 3 numpy array and returns that array rotated around
the axis by the angle in radians w. (standard quaternion)"""
move1 = np.cross(axis, co)
move2 = np.cross(axis, move1)
move1 *= w[:, nax]
return co + (move1 + move2) * 2
def bend_springs(cloth, co, measure=None):
bend_eidx, tips = cloth.bend_eidx, cloth.bend_tips
tips_co = co[tips]
bls, brs = bend_eidx[:,0], bend_eidx[:, 1]
b_oris = co[bls]
be_vecs = co[brs] - b_oris
te_vecs = tips_co - b_oris[:, nax]
bcp_dots = np.einsum('ij,ikj->ik', be_vecs, te_vecs)
be_dots = np.einsum('ij,ij->i', be_vecs, be_vecs)
b_div = np.nan_to_num(bcp_dots / be_dots[:, nax])
tcp = be_vecs[:, nax] * b_div[:, :, nax]
# tip vecs from cp
tcp_vecs = te_vecs - tcp
tcp_dots = np.einsum('ijk,ijk->ij',tcp_vecs, tcp_vecs)
u_tcp_vecs = tcp_vecs / np.sqrt(tcp_dots)[:, :, nax]
u_tcp_ls = u_tcp_vecs[:, 0]
u_tcp_rs = u_tcp_vecs[:, 1]
# dot of unit tri tips around axis
angle_dot = np.einsum('ij,ij->i', u_tcp_ls, u_tcp_rs)
#paralell = angle_dot < -.9999999
angle = np.arccos(np.clip(angle_dot, -1, 1)) # values outside and arccos gives nan
#angle = np.arccos(angle_dot) # values outside and arccos gives nan
# get the angle sign
tcp_cross = np.cross(u_tcp_vecs[:, 0], u_tcp_vecs[:, 1])
sign = np.sign(np.einsum('ij,ij->i', be_vecs, tcp_cross))
if measure is None:
s = np.arccos(angle_dot)
s *= sign
s[angle_dot < -.9999999] = np.pi
return s
angle *= sign
# rotate edges with quaternypoos
u_be_vecs = be_vecs / np.sqrt(be_dots)[:, nax]
b_dif = angle - measure
l_ws, l_axes = get_quat(b_dif, u_be_vecs)
r_ws, r_axes = l_ws, -l_axes
# move tcp vecs so their origin is in the middle:
#u_tcp_vecs *= 0.5
# should I rotate the unit vecs or the source?
# rotating the unit vecs here.
#stiff = cloth.ob.modeling_cloth_bend_stiff * 0.0057
stiff = cloth.ob.mclo.bend_stiff * 0.0057
rot_ls = q_rotate(u_tcp_ls, l_ws, l_axes)
l_force = (rot_ls - u_tcp_ls) * stiff
rot_rs = q_rotate(u_tcp_rs, r_ws, r_axes)
r_force = (rot_rs - u_tcp_rs) * stiff
np.add.at(cloth.co, tips[:, 0], l_force)
np.add.at(cloth.co, tips[:, 1], r_force)
np.subtract.at(cloth.co, bend_eidx.ravel(), np.tile(r_force * .5, 2).reshape(r_force.shape[0] * 2, 3))
np.subtract.at(cloth.co, bend_eidx.ravel(), np.tile(l_force * .5, 2).reshape(l_force.shape[0] * 2, 3))
return
cloth.co[tips[:, 0]] += l_force
cloth.co[tips[:, 1]] += r_force
#cloth.co[bend_eidx] -= l_force
cloth.co[bend_eidx] -= r_force[:, nax]
cloth.co[bend_eidx] -= l_force[:, nax]
#cloth.co[brs] -= r_force
#print("bend here")
# will need to read bend springs continuously when using
# a dynamic source shape. Guess I should do that now...
# need the angle at each edge
# need to get the tips of each tri around each edge
# should be a pair everywhere there is a link face in
# the tri bmesh
"""
With no sign I just get the dot in radians.
Rotation should move towards the shortest distance
to the same dot in radians.
Without getting the sign at all, it will always rotate
in the same direction to go back to the target.
By multiplying the dif by the sign, I can make it spin
the other way to go back to the target dot in rads
"""
# sewing functions ---------------->>>
def create_sew_edges():
bpy.ops.mesh.bridge_edge_loops()
bpy.ops.mesh.delete(type='ONLY_FACE')
return
#highlight a sew edge
#compare vertex counts
#subdivide to match counts
#distribute and smooth back into mesh
#create sew lines
# sewing functions ---------------->>>
def check_and_get_pins_and_hooks(ob):
scene = bpy.context.scene
pins = []
hooks = []
cull_ids = []
for i, pin in enumerate(ob.mclo.pins):
# Check if hook object still exists
if not pin.hook or (pin.hook and not scene.objects.get(pin.hook.name)):
cull_ids.append(i)
else:
#vert = ob.data.vertices[pin.vertex_id]
pins.append(pin.vertex_id)
hooks.append(pin.hook)
# Delete missing hooks pointers
for i in reversed(cull_ids):
pin = ob.mclo.pins[i]
if pin.hook:
bpy.data.objects.remove(pin.hook)
ob.mclo.pins.remove(i)
return pins, hooks
class ClothData:
pass
def create_cloth_data(ob):
"""Creates instance of cloth object with attributes needed for engine"""
scene = bpy.context.scene
data = scene.modeling_cloth_data_set
# Try to get the cloth data first
try:
cloth = data[ob.name]
except:
# Search for possible name changes
cloth = None
for ob_name, c in data.items():
if c.ob == ob:
# Rename the key
data[ob.name] = data.pop(ob_name)
cloth = data[ob.name]
break
# If cloth still not found
if not cloth:
cloth = ClothData()
data[ob.name] = cloth
cloth.ob = ob
# get proxy object
#proxy = ob.to_mesh(bpy.context.scene, False, 'PREVIEW')
# ----------------
scene.objects.active = ob
cloth.idxer = np.arange(len(ob.data.vertices), dtype=np.int32)
# data only accesible through object mode
mode = ob.mode
if mode == 'EDIT':
bpy.ops.object.mode_set(mode='OBJECT')
# data is read from a source shape and written to the display shape so we can change the target springs by changing the source shape
#cloth.name = ob.name
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
cloth.count = len(ob.data.vertices)
# we can set a large group's pin state using the vertex group. No hooks are used here
if 'modeling_cloth_pin' not in ob.vertex_groups:
cloth.pin_group = create_vertex_groups(groups=['modeling_cloth_pin'], weights=[0.0], ob=None)
for i in range(cloth.count):
try:
ob.vertex_groups['modeling_cloth_pin'].weight(i)
except RuntimeError:
# assign a weight of zero
ob.vertex_groups['modeling_cloth_pin'].add(range(0,len(ob.data.vertices)), 0.0, 'REPLACE')
cloth.pin_bool = ~np.array([ob.vertex_groups['modeling_cloth_pin'].weight(i) for i in range(cloth.count)], dtype=np.bool)
# unique edges------------>>>
uni_edges = get_minimal_edges(ob)
if len(uni_edges[1]) > 0:
cloth.eidx = np.append(uni_edges[0], uni_edges[1], axis=0)
else:
cloth.eidx = uni_edges[0]
#cloth.eidx = uni_edges[0][0]
if len(ob.mclo.virtual_springs) > 0:
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
cloth.eidx = np.append(cloth.eidx, virtual_springs, axis=0)
cloth.eidx_tiler = cloth.eidx.T.ravel()
mixology = get_spring_mix(ob, cloth.eidx)
#eidx1 = np.copy(cloth.eidx)
cloth.pindexer = np.arange(cloth.count, dtype=np.int32)[cloth.pin_bool]
cloth.unpinned = np.in1d(cloth.eidx_tiler, cloth.pindexer)
cloth.eidx_tiler = cloth.eidx_tiler[cloth.unpinned]
cloth.sew_edges = uni_edges[2]
cloth.multi_sew = uni_edges[3]
# unique edges------------>>>
#cloth.pcount = pindexer.shape[0]
cloth.sco = np.zeros(cloth.count * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', cloth.sco)
cloth.sco.shape = (cloth.count, 3)
cloth.co = np.zeros(cloth.count * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth key'].data.foreach_get('co', cloth.co)
cloth.co.shape = (cloth.count, 3)
co = cloth.co
cloth.vel = np.zeros(cloth.count * 3, dtype=np.float32)
cloth.vel.shape = (cloth.count, 3)
cloth.vel_start = np.zeros(cloth.count * 3, dtype=np.float32)
cloth.vel_start.shape = (cloth.count, 3)
cloth.self_col_vel = np.copy(co)
cloth.v_normals = np.zeros(co.shape, dtype=np.float32)
#get_v_normals(ob, cloth.v_normals, proxy)
#noise---
noise_zeros = np.zeros(cloth.count, dtype=np.float32)
random = np.random.random(cloth.count).astype(np.float32)
noise_zeros[:] = random
cloth.noise = ((noise_zeros + -0.5) * ob.mclo.noise * 0.1)[:, nax]
#cloth.waiting = False
#cloth.clicked = False # for the grab tool
# this helps with extra springs behaving as if they had more mass---->>>
cloth.mix = mixology[cloth.unpinned][:, nax]
# -------------->>>
# new self collisions:
cloth.tridex = triangulate(ob.data, cloth)
cloth.tridexer = np.arange(cloth.tridex.shape[0], dtype=np.int32)
cloth.tri_co = cloth.co[cloth.tridex]
tri_normals_in_place(cloth, cloth.tri_co) # non-unit normals
# -------------->>>
tri_uni, tri_inv, tri_counts = np.unique(cloth.tridex, return_inverse=True, return_counts=True)
cloth.tri_mix = (1 / tri_counts[tri_inv])[:, nax]
cloth.wind = np.zeros(cloth.tri_co.shape, dtype=np.float32)
cloth.inflate = np.zeros(cloth.tri_co.shape, dtype=np.float32)
bpy.ops.object.mode_set(mode=mode)
# for use with a static source shape:
cloth.source_angles = bend_springs(cloth, cloth.sco, None)
svecs = cloth.sco[cloth.eidx[:, 1]] - cloth.sco[cloth.eidx[:, 0]]
cloth.sdots = np.einsum('ij,ij->i', svecs, svecs)
# for doing static cling
# cloth.col_idx = np.array([], dtype=np.int32)
# cloth.re_col = np.empty((0,3), dtype=np.float32)
print('INFO: Cloth data for', ob.name, 'is created!')
return cloth
def run_handler(ob, cloth):
T = time.time()
scene = bpy.context.scene
extra_data = scene.modeling_cloth_data_set_extra
col_data = scene.modeling_cloth_data_set_colliders
if not ob.mclo.waiting and ob.mode != 'OBJECT':
ob.mclo.waiting = True
if ob.mclo.waiting:
if ob.mode == 'OBJECT':
create_cloth_data(ob)
ob.mclo.waiting = False
if not ob.mclo.waiting:
eidx = cloth.eidx # world's most important variable
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', cloth.sco.ravel())
sco = cloth.sco
co = cloth.co
co[cloth.pindexer] += cloth.noise[cloth.pindexer]
#co += cloth.noise
cloth.noise *= ob.mclo.noise_decay
# mix in vel before collisions and sewing
co[cloth.pindexer] += cloth.vel[cloth.pindexer]
cloth.vel_start[:] = co
# measure source -------------------------->>>
dynamic = True # can store for speedup if source shape is static
# bend spring calculations:
if ob.mclo.bend_stiff != 0:
# measure bend source if using dynamic source:
source_angles = cloth.source_angles
if dynamic:
source_angles = bend_springs(cloth, sco, None)
# linear spring measure
sdots = cloth.sdots
if dynamic:
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', sco.ravel())
svecs = sco[eidx[:, 1]] - sco[eidx[:, 0]]
sdots = np.einsum('ij,ij->i', svecs, svecs)
# ----------------------------------------->>>
force = ob.mclo.spring_force
mix = cloth.mix * force
pin_list = []
if len(ob.mclo.pins) > 0:
pin_list, hook_list = check_and_get_pins_and_hooks(ob)
hook_co = np.array([ob.matrix_world.inverted() * hook.matrix_world.to_translation()
for hook in hook_list])
ers = eidx[:, 1]
els = eidx[:, 0]
for x in range(ob.mclo.iterations):
# bend spring calculations:
if ob.mclo.bend_stiff != 0:
bend_springs(cloth, co, source_angles)
# add pull
vecs = co[eidx[:, 1]] - co[eidx[:, 0]]
dots = np.einsum('ij,ij->i', vecs, vecs)
div = np.nan_to_num(sdots / dots)
swap = vecs * np.sqrt(div)[:, nax]
move = vecs - swap
# pull separate test--->>>
push = ob.mclo.push_springs
if push == 0:
move[div > 1] = 0
else:
move[div > 1] *= push
# pull only test--->>>
tiled_move = np.append(move, -move, axis=0)[cloth.unpinned] * mix # * mix for stability: force multiplied by 1/number of springs
np.add.at(cloth.co, cloth.eidx_tiler, tiled_move)
# for doing static cling
# cloth.co[cloth.col_idx] = cloth.re_col
cloth.co[~cloth.pin_bool] = cloth.vel_start[~cloth.pin_bool]
if pin_list:
cloth.co[pin_list] = hook_co
# grab inside spring iterations
if ob.mclo.clicked: # for the grab tool
cloth.co[extra_data['vidx']] = np.array(extra_data['stored_vidx']) + np.array(+ extra_data['move'])
# refresh normals for inflate wind and self collisions
cloth.tri_co = cloth.co[cloth.tridex]
tri_normals_in_place(cloth, cloth.tri_co) # unit normals
# add effects of velocity and Gravity to the vel array for later
spring_dif = cloth.co - cloth.vel_start
#if ob.mclo.bend_stiff > 0:
#for
# non-unit normals might be better for inflate and wind because
# their strength is affected by the area as it is should be
#place after wind and inflate unless those are added to vel after collisions
# get proxy object
#proxy = ob.to_mesh(bpy.context.scene, False, 'PREVIEW')
#proxy = ob.data
#get_v_normals(ob, cloth.v_normals, proxy)
# gravity
grav = ob.mclo.gravity * 0.01# / ob.mclo.iterations)
if grav != 0:
cloth.vel += revert_rotation(ob, np.array([0, 0, grav])) / np.array(ob.scale)
# can cheat here:
#spring_mean = np.mean(spring_dif, axis=0)
#cloth.vel += spring_mean * 20
# inextensible calc:
cloth.vel += spring_dif * 2
# The amount of drag increases with speed.
# have to convert to to a range between 0 and 1
#squared_move_dist = np.sqrt(np.einsum("ij, ij->i", cloth.vel, cloth.vel))
squared_move_dist = np.einsum("ij, ij->i", cloth.vel, cloth.vel)
squared_move_dist += 1
cloth.vel *= (1 / (squared_move_dist / ob.mclo.velocity))[:, nax]
#cloth.vel *= ob.mclo.velocity
# wind:
x = ob.mclo.wind_x
y = ob.mclo.wind_y
z = ob.mclo.wind_z
wind_vec = np.array([x,y,z])
check_wind = wind_vec != 0
if np.any(check_wind):
generate_wind(wind_vec, ob, cloth)
# inflate
inflate = ob.mclo.inflate
if inflate != 0:
generate_inflate(ob, cloth)
#cloth.v_normals *= inflate
#cloth.vel += cloth.v_normals
if ob.mclo.sew != 0:
if len(cloth.sew_edges) > 0:
sew_edges = cloth.sew_edges
rs = co[sew_edges[:,1]]
ls = co[sew_edges[:,0]]
sew_vecs = (rs - ls) * 0.5 * ob.mclo.sew
co[sew_edges[:,1]] -= sew_vecs
co[sew_edges[:,0]] += sew_vecs
# for sew verts with more than one sew edge
if cloth.multi_sew is not None:
for sg in cloth.multi_sew:
cosg = co[sg]
meanie = np.mean(cosg, axis=0)
sg_vecs = meanie - cosg
co[sg] += sg_vecs * ob.mclo.sew
# !!!!! need to try adding in the velocity before doing the collision stuff
# !!!!! so vel would be added here after wind and inflate but before collision
# floor ---
if ob.mclo.floor:
floored = cloth.co[:,2] < 0
cloth.vel[:,2][floored] *= -1
cloth.vel[floored] *= .1
cloth.co[:, 2][floored] = 0
# floor ---
# objects ---
#T = time.time()
if ob.mclo.object_collision_detect:
if ob.mclo.self_collision:
self_collide(ob)
cull_ids = []
for i, cp in enumerate(scene.mclo.collider_pointers):
# Check if object is still exists
if not cp.ob or (cp.ob and not scene.objects.get(cp.ob.name)):
cull_ids.append(i)
continue
#if cp.ob == ob:
# self_collide(ob)
if cp.ob != ob:
object_collide(ob, cp.ob)
# Remove collider missing object from pointer list
for i in reversed(cull_ids):
o = scene.mclo.collider_pointers[i].ob
if o:
o.mclo.object_collision = False
else:
scene.mclo.collider_pointers.remove(i)
#print(time.time()-T, "the whole enchalada")
# objects ---
cloth.co[~cloth.pin_bool] = cloth.vel_start[~cloth.pin_bool]
if pin_list:
cloth.co[pin_list] = hook_co
cloth.vel[pin_list] = 0
if ob.mclo.clicked: # for the grab tool
cloth.co[extra_data['vidx']] = np.array(extra_data['stored_vidx']) + np.array(+ extra_data['move'])
ob.data.shape_keys.key_blocks['modeling cloth key'].data.foreach_set('co', cloth.co.ravel())
ob.data.shape_keys.key_blocks['modeling cloth key'].mute = True
ob.data.shape_keys.key_blocks['modeling cloth key'].mute = False
# remove proxy
#proxy.user_clear()
#bpy.data.meshes.remove(proxy)
#del(proxy)
#print(time.time()-T, "the entire handler time")
# +++++++++++++ object collisions ++++++++++++++
def bounds_check(co1, co2, fudge):
"""Returns True if object bounding boxes intersect.
Have to add the fudge factor for collision margins"""
check = False
co1_max = None # will never return None if check is true
co1_min = np.min(co1, axis=0)
co2_max = np.max(co2, axis=0)
if np.all(co2_max + fudge > co1_min):
co1_max = np.max(co1, axis=0)
co2_min = np.min(co2, axis=0)
if np.all(co1_max > co2_min - fudge):
check = True
return check, co1_min, co1_max # might as well reuse the checks
def triangle_bounds_check(tri_co, co_min, co_max, idxer, fudge):
"""Returns a bool aray indexing the triangles that
intersect the bounds of the object"""
# min check cull step 1
tri_min = np.min(tri_co, axis=1) - fudge
check_min = co_max > tri_min
in_min = np.all(check_min, axis=1)
# max check cull step 2
idx = idxer[in_min]
tri_max = np.max(tri_co[in_min], axis=1) + fudge
check_max = tri_max > co_min
in_max = np.all(check_max, axis=1)
in_min[idx[~in_max]] = False
return in_min, tri_min[in_min], tri_max[in_max] # can reuse the min and max
def tri_back_check(co, tri_min, tri_max, idxer, fudge):
"""Returns a bool aray indexing the vertices that
intersect the bounds of the culled triangles"""
# min check cull step 1
tb_min = np.min(tri_min, axis=0) - fudge
check_min = co > tb_min
in_min = np.all(check_min, axis=1)
idx = idxer[in_min]
# max check cull step 2
tb_max = np.max(tri_max, axis=0) + fudge
check_max = co[in_min] < tb_max
in_max = np.all(check_max, axis=1)
in_min[idx[~in_max]] = False
return in_min
# -------------------------------------------------------
# -------------------------------------------------------
def zxy_grid(co_y, tymin, tymax, subs, c, t, c_peat, t_peat):
# create linespace grid between bottom and top of tri z
#subs = 7
t_min = np.min(tymin)
t_max = np.max(tymax)
divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)
# figure out which triangles and which co are in each section
co_bools = (co_y > divs[:-1][:, nax]) & (co_y < divs[1:][:, nax])
tri_bools = (tymin < divs[1:][:, nax]) & (tymax > divs[:-1][:, nax])
for i, j in zip(co_bools, tri_bools):
if (np.sum(i) > 0) & (np.sum(j) > 0):
c3 = c[i]
t3 = t[j]
c_peat.append(np.repeat(c3, t3.shape[0]))
t_peat.append(np.tile(t3, c3.shape[0]))
def zx_grid(co_x, txmin, txmax, subs, c, t, c_peat, t_peat, co_y, tymin, tymax):
# create linespace grid between bottom and top of tri z
#subs = 7
t_min = np.min(txmin)
t_max = np.max(txmax)
divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)
# figure out which triangles and which co are in each section
co_bools = (co_x > divs[:-1][:, nax]) & (co_x < divs[1:][:, nax])
tri_bools = (txmin < divs[1:][:, nax]) & (txmax > divs[:-1][:, nax])
for i, j in zip(co_bools, tri_bools):
if (np.sum(i) > 0) & (np.sum(j) > 0):
c2 = c[i]
t2 = t[j]
zxy_grid(co_y[i], tymin[j], tymax[j], subs, c2, t2, c_peat, t_peat)
def z_grid(co_z, tzmin, tzmax, subs, co_x, txmin, txmax, co_y, tymin, tymax):
# create linespace grid between bottom and top of tri z
#subs = 7
t_min = np.min(tzmin)
t_max = np.max(tzmax)
divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)
# figure out which triangles and which co are in each section
co_bools = (co_z > divs[:-1][:, nax]) & (co_z < divs[1:][:, nax])
tri_bools = (tzmin < divs[1:][:, nax]) & (tzmax > divs[:-1][:, nax])
c_ranger = np.arange(co_bools.shape[1])
t_ranger = np.arange(tri_bools.shape[1])
c_peat = []
t_peat = []
for i, j in zip(co_bools, tri_bools):
if (np.sum(i) > 0) & (np.sum(j) > 0):
c = c_ranger[i]
t = t_ranger[j]
zx_grid(co_x[i], txmin[j], txmax[j], subs, c, t, c_peat, t_peat, co_y[i], tymin[j], tymax[j])
if (len(c_peat) == 0) | (len(t_peat) == 0):
return None, None
return np.hstack(c_peat), np.hstack(t_peat)
# -------------------------------------------------------
# -------------------------------------------------------
"""Combined with numexpr the first check min and max is faster
Combined without numexpr is slower. It's better to separate min and max"""
def v_per_tri(co, tri_min, tri_max, idxer, tridexer, c_peat=None, t_peat=None):
"""Checks each point against the bounding box of each triangle"""
co_x, co_y, co_z = co[:, 0], co[:, 1], co[:, 2]
subs = 7
#subs = bpy.data.objects['Plane.002'].mclo.grid_size
c_peat, t_peat = z_grid(co_z, tri_min[:, 2], tri_max[:, 2], subs, co_x, tri_min[:, 0], tri_max[:, 0], co_y, tri_min[:, 1], tri_max[:, 1])
if c_peat is None:
return
# X
# Step 1 check x_min (because we're N squared here we break it into steps)
check_x_min = co_x[c_peat] > tri_min[:, 0][t_peat]
c_peat = c_peat[check_x_min]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_x_min]
# Step 2 check x max
check_x_max = co_x[c_peat] < tri_max[:, 0][t_peat]
c_peat = c_peat[check_x_max]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_x_max]
# Y
# Step 3 check y min
check_y_min = co_y[c_peat] > tri_min[:, 1][t_peat]
c_peat = c_peat[check_y_min]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_y_min]
# Step 4 check y max
check_y_max = co_y[c_peat] < tri_max[:, 1][t_peat]
c_peat = c_peat[check_y_max]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_y_max]
# Z
# Step 5 check z min
check_z_min = co_z[c_peat] > tri_min[:, 2][t_peat]
c_peat = c_peat[check_z_min]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_z_min]
# Step 6 check y max
check_z_max = co_z[c_peat] < tri_max[:, 2][t_peat]
c_peat = c_peat[check_z_max]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_z_max]
return idxer[c_peat], t_peat
#return c_peat, t_peat
def inside_triangles(tri_vecs, v2, co, tri_co_2, cidx, tidx, nor, ori, in_margin, offset=None):
idxer = np.arange(in_margin.shape[0], dtype=np.int32)[in_margin]
r_co = co[cidx[in_margin]]
r_tri = tri_co_2[tidx[in_margin]]
v0 = tri_vecs[:,0]
v1 = tri_vecs[:,1]
d00_d11 = np.einsum('ijk,ijk->ij', tri_vecs, tri_vecs)
d00 = d00_d11[:,0]
d11 = d00_d11[:,1]
d01 = np.einsum('ij,ij->i', v0, v1)
d02 = np.einsum('ij,ij->i', v0, v2)
d12 = np.einsum('ij,ij->i', v1, v2)
div = 1 / (d00 * d11 - d01 * d01)
u = (d11 * d02 - d01 * d12) * div
v = (d00 * d12 - d01 * d02) * div
# !!! Watch out for this number. It could affect speed !!!
if offset:
check = (u > -offset) & (v > -offset) & (u + v < offset + 1)
else:
check = (u > 0) & (v > 0) & (u + v < 1)
in_margin[idxer] = check
def object_collide(cloth_ob, col_ob):
cloth = get_cloth_data(cloth_ob)
col = get_collider_data(col_ob)
# for doing static cling
# cloth.col_idx = np.array([], dtype=np.int32)
# cloth.re_col = np.empty((0,3), dtype=np.float32)
proxy = col_ob.to_mesh(bpy.context.scene, True, 'PREVIEW')
# Recreate collider data if number of vertices is changing
if col.co.shape[0] != len(proxy.vertices):
col = create_collider_data(col_ob)
proxy_in_place(col, proxy)
apply_in_place(cloth_ob, cloth.co, cloth)
inner_margin = col_ob.mclo.object_collision_inner_margin
outer_margin = col_ob.mclo.object_collision_outer_margin
fudge = max(inner_margin, outer_margin)
# check object bounds: (need inner and out margins to adjust box size)
box_check, co1_min, co1_max = bounds_check(cloth.co, col.co, fudge)
# check for triangles inside the cloth bounds
#anim = col_ob.mclo.collision_animated
if box_check:
proxy_v_normals_in_place(col, True, proxy)
tri_co = col.co[col.tridex]
tri_vo = col.vel[col.tridex]
tris_in, tri_min, tri_max = triangle_bounds_check(tri_co, co1_min, co1_max, col.tridexer, fudge)#, object.ob.dimensions)
# check for verts in the bounds around the culled triangles
if np.any(tris_in):
tri_co_2 = tri_co[tris_in]
back_check = tri_back_check(cloth.co, tri_min, tri_max, cloth.idxer, fudge)
# begin every vertex co against every tri
if np.any(back_check):
v_tris = v_per_tri(cloth.co[back_check], tri_min, tri_max, cloth.idxer[back_check], col.tridexer[tris_in])
if v_tris is not None:
# update the normals. cross_vecs used by barycentric tri check
# move the surface along the vertex normals by the outer margin distance
marginalized = (col.co + col.v_normals * outer_margin)[col.tridex]
tri_normals_in_place(col, marginalized)
# add normals to make extruded tris
u_norms = col.normals[tris_in]
#u_norms = norms_2 / np.sqrt(np.einsum('ij, ij->i', norms_2, norms_2))[:, nax]
cidx, tidx = v_tris
ori = col.origins[tris_in][tidx]
nor = u_norms[tidx]
vec2 = cloth.co[cidx] - ori
d = np.einsum('ij, ij->i', nor, vec2) # nor is unit norms
in_margin = (d > -(inner_margin + outer_margin)) & (d < 0)#outer_margin) (we have offset outer margin)
# <<<--- Inside triangle check --->>>
# will overwrite in_margin:
cross_2 = col.cross_vecs[tris_in][tidx][in_margin]
inside_triangles(cross_2, vec2[in_margin], cloth.co, marginalized[tris_in], cidx, tidx, nor, ori, in_margin)
if np.any(in_margin):
# collision response --------------------------->>>
#if anim:
t_in = tidx[in_margin]
tri_vo = tri_vo[tris_in]
tri_vel1 = np.mean(tri_co_2[t_in], axis=1)
tri_vel2 = np.mean(tri_vo[t_in], axis=1)
tvel = tri_vel1 - tri_vel2
col_idx = cidx[in_margin]
cloth.co[col_idx] -= nor[in_margin] * (d[in_margin])[:, nax]
cloth.vel[col_idx] = tvel
# for doing static cling
# cloth.re_col = np.copy(cloth.co[col_idx])
# cloth.col_idx = col_idx
col.vel[:] = col.co
revert_in_place(cloth_ob, cloth.co)
#temp_ob = bpy.data.objects.new('__TEMP', proxy)
#for key in proxy.shape_keys.key_blocks:
# temp_ob.shape_key_remove(key)
#bpy.data.objects.remove(temp_ob)
bpy.data.meshes.remove(proxy)
# self collider =============================================
def self_collide(ob):
cloth = get_cloth_data(ob)
margin = ob.mclo.object_collision_outer_margin
tri_co = cloth.tri_co
tri_min = np.min(tri_co, axis=1) - margin
tri_max = np.max(tri_co, axis=1) + margin
# begin every vertex co against every tri
v_tris = v_per_tri(cloth.co, tri_min, tri_max, cloth.idxer, cloth.tridexer)
if v_tris is not None:
cidx, tidx = v_tris
u_norms = cloth.normals
# don't check faces the verts are part of
check_neighbors = cidx[:, nax] == cloth.tridex[tidx]
cull = np.any(check_neighbors, axis=1)
cidx, tidx = cidx[~cull], tidx[~cull]
ori = cloth.origins[tidx]
nor = u_norms[tidx]
vec2 = cloth.co[cidx] - ori
d = np.einsum('ij, ij->i', nor, vec2) # nor is unit norms
in_margin = (d > -margin) & (d < margin)
# <<<--- Inside triangle check --->>>
# will overwrite in_margin:
cross_2 = cloth.cross_vecs[tidx][in_margin]
inside_triangles(cross_2, vec2[in_margin], cloth.co, tri_co, cidx, tidx, nor, ori, in_margin, offset=0.0)
if np.any(in_margin):
# collision response --------------------------->>>
t_in = tidx[in_margin]
#tri_vel1 = np.mean(tri_co[t_in], axis=1)
#tvel = np.mean(tri_vo[t_in], axis=1)
#tvel = tri_vel1 - tri_vel2
t_vel = np.mean(cloth.vel[cloth.tridex][t_in], axis=1)
col_idx = cidx[in_margin]
d_in = d[in_margin]
sign_margin = margin * np.sign(d_in) # which side of the face
c_move = ((nor[in_margin] * d_in[:, nax]) - (nor[in_margin] * sign_margin[:, nax]))#) * -np.sign(d[in_margin])[:, nax]
#c_move *= 1 / cloth.ob.modeling_cloth_grid_size
#cloth.co[col_idx] -= ((nor[in_margin] * d_in[:, nax]) - (nor[in_margin] * sign_margin[:, nax]))#) * -np.sign(d[in_margin])[:, nax]
cloth.co[col_idx] -= c_move #* .7
#cloth.vel[col_idx] = 0
cloth.vel[col_idx] = t_vel
#col.vel[:] = col.co
# self collider =============================================
# update functions --------------------->>>
def tile_and_remove_neighbors(vidx, tidx, c_peat, t_peat):
tshape = tidx.shape[0]
vshape = vidx.shape[0]
# eliminate tris that contain the point:
# check the speed difference of doing a reshape with ravel at the end
co_tidex = c_peat.reshape(vshape, tshape)
tri_tidex = tidx[t_peat.reshape(vshape, tshape)]
check = tri_tidex == vidx[co_tidex][:,:,nax]
cull = ~np.any(check, axis=2)
# duplicate of each tri for each vert and each vert for each tri
c_peat = c_peat[cull.ravel()]
t_peat = t_peat[cull.ravel()]
return c_peat, t_peat
class ColliderData:
pass
class SelfColliderData:
pass
def get_collider_data(ob):
col_data = bpy.context.scene.modeling_cloth_data_set_colliders
col = None
for key, c in col_data.items():
if c.ob == ob:
col = c
if not col:
col = create_collider_data(ob)
return col
def create_collider_data(ob):
col_data = bpy.context.scene.modeling_cloth_data_set_colliders
col = ColliderData()
col_data[ob.name] = col
col.ob = ob
# get proxy
proxy = ob.to_mesh(bpy.context.scene, True, 'PREVIEW')
col.co = get_proxy_co(ob, None, proxy)
col.idxer = np.arange(col.co.shape[0], dtype=np.int32)
proxy_in_place(col, proxy)
col.v_normals = proxy_v_normals(col.ob, proxy)
col.vel = np.copy(col.co)
col.tridex = triangulate(proxy)
col.tridexer = np.arange(col.tridex.shape[0], dtype=np.int32)
# cross_vecs used later by barycentric tri check
proxy_v_normals_in_place(col, True, proxy)
marginalized = np.array(col.co + col.v_normals * ob.mclo.object_collision_outer_margin, dtype=np.float32)
col.cross_vecs, col.origins, col.normals = get_tri_normals(marginalized[col.tridex])
col.cross_vecs.dtype = np.float32
col.origins.dtype = np.float32
#col.normals.dtype = np.float32
# remove proxy
bpy.data.meshes.remove(proxy)
print('INFO: Collider data for', ob.name, 'is created!')
return col
# Self collision object
def create_self_collider(ob):
# maybe fixed? !!! bug where first frame of collide uses empty data. Stuff goes flying.
col = ColliderData()
col.ob = ob
col.co = get_co(ob, None)
proxy_in_place(col)
col.v_normals = proxy_v_normals(ob)
col.vel = np.copy(col.co)
#col.tridex = triangulate(ob)
col.tridexer = np.arange(col.tridex.shape[0], dtype=np.int32)
# cross_vecs used later by barycentric tri check
proxy_v_normals_in_place(col)
marginalized = np.array(col.co + col.v_normals * ob.mclo.object_collision_outer_margin, dtype=np.float32)
col.cross_vecs, col.origins, col.normals = get_tri_normals(marginalized[col.tridex])
col.cross_vecs.dtype = np.float32
col.origins.dtype = np.float32
#col.normals.dtype = np.float32
return col
# collide object updater
def collision_object_update(self, context):
"""Updates the collider object"""
scene = context.scene
col_data = scene.modeling_cloth_data_set_colliders
ob = self.id_data
if self.object_collision:
cp = scene.mclo.collider_pointers.add()
cp.ob = ob
else:
for i, cp in enumerate(scene.mclo.collider_pointers):
if cp.ob == ob:
# Remove collider data first
cull_keys = []
for key, col in col_data.items():
if col.ob == cp.ob:
cull_keys.append(key)
for key in cull_keys:
del(col_data[key])
scene.mclo.collider_pointers.remove(i)
break
# cloth object detect updater:
def cloth_object_update(self, context):
"""Updates the cloth object when detecting."""
print("ran the detect updater. It did nothing.")
def manage_animation_handler(self, context):
ob = self.id_data
if ob.mclo.frame_update:
ob.mclo.scene_update = False
def manage_continuous_handler(self, context):
ob = self.id_data
if ob.mclo.scene_update:
ob.mclo.frame_update = False
# ================= Handler ======================
@persistent
def handler_frame(scene):
handler_unified(scene, frame_update=True)
@persistent
def handler_scene(scene):
handler_unified(scene, frame_update=False)
def handler_unified(scene, frame_update=False):
data = bpy.context.scene.modeling_cloth_data_set
cull_ids = []
for i, cp in enumerate(scene.mclo.cloth_pointers):
ob = cp.ob
# Check if object still exists
if not ob or (ob and not scene.objects.get(ob.name)):
if scene.mclo.last_object == ob:
scene.mclo.last_object = None
cull_ids.append(i)
else:
cloth = get_cloth_data(ob)
# Frame update
if frame_update and ob.mclo.frame_update:
run_handler(ob, cloth)
if ob.mclo.auto_reset:
if scene.frame_current <= 1:
reset_shapes(ob)
# Scene update
elif not frame_update and ob.mclo.scene_update:
run_handler(ob, cloth)
# Remove missing object from cloth pointer
for i in reversed(cull_ids):
ob = scene.mclo.cloth_pointers[i].ob
if ob:
ob.mclo.enable = False
else:
scene.mclo.cloth_pointers.remove(i)
def get_cloth_data(ob):
data = bpy.context.scene.modeling_cloth_data_set
try:
return data[ob.name]
except:
print(sys.exc_info())
for ob_name, c in data.items():
if c.ob == ob:
# Rename the key
data[ob.name] = data.pop(ob_name)
return data[ob.name]
## If cloth still not found
return create_cloth_data(ob)
def enable_cloth(self, context):
ob = self.id_data
scene = context.scene
data = scene.modeling_cloth_data_set
extra_data = scene.modeling_cloth_data_set_extra
scene.mclo.last_object = ob
if ob.mclo.enable:
# New cloth on scene data
cp = scene.mclo.cloth_pointers.add()
cp.ob = ob
create_cloth_data(ob)
else:
for i, cp in enumerate(scene.mclo.cloth_pointers):
if cp.ob == ob:
# Remove cloth data first
cull_keys = []
for key, cloth in data.items():
if ob == cloth.ob:
cull_keys.append(key)
for key in cull_keys:
del(data[key])
# Remove pointers
scene.mclo.cloth_pointers.remove(i)
def visible_objects_and_duplis(context):
"""Loop over (object, matrix) pairs (mesh only)"""
for obj in context.visible_objects:
if obj.type == 'MESH':
if obj.mclo.enable:
yield (obj, obj.matrix_world.copy())
def obj_ray_cast(obj, matrix, ray_origin, ray_target):
"""Wrapper for ray casting that moves the ray into object space"""
# get the ray relative to the object
matrix_inv = matrix.inverted()
ray_origin_obj = matrix_inv * ray_origin
ray_target_obj = matrix_inv * ray_target
ray_direction_obj = ray_target_obj - ray_origin_obj
# cast the ray
success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)
if face_index > len(obj.data.polygons):
return None, None, None
elif success:
return location, normal, face_index
else:
return None, None, None
# sewing --------->>>
class ModelingClothSew(bpy.types.Operator):
"""For connected two edges with sew lines"""
bl_idname = "object.modeling_cloth_create_sew_lines"
bl_label = "Modeling Cloth Create Sew Lines"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
#ob = get_last_object() # returns tuple with list and last cloth objects or None
#if ob is not None:
#obj = ob[1]
#else:
obj = bpy.context.object
#bpy.context.scene.objects.active = obj
mode = obj.mode
if mode != "EDIT":
bpy.ops.object.mode_set(mode="EDIT")
create_sew_edges()
bpy.ops.object.mode_set(mode="EDIT")
return {'FINISHED'}
# sewing --------->>>
class ModelingClothPin(bpy.types.Operator):
"""Modal ray cast for placing pins"""
bl_idname = "view3d.modeling_cloth_pin"
bl_label = "Modeling Cloth Pin"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.space_data.type == 'VIEW_3D' and any(context.scene.mclo.cloth_pointers)
def __init__(self):
self.obj = None
self.latest_hit = None
self.closest = None
def invoke(self, context, event):
#bpy.ops.object.select_all(action='DESELECT')
context.scene.mclo.pin_alert = True
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def raycast(self, context, event):
# get the context arguments
scene = context.scene
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + view_vector
guide = create_guide()
# cast rays and find the closest object
best_length_squared = -1.0
best_obj = None
best_matrix = None
best_face_index = -1
for obj, matrix in visible_objects_and_duplis(context):
hit, normal, face_index = obj_ray_cast(obj, matrix, ray_origin, ray_target)
if hit:
hit_world = matrix * hit
length_squared = (hit_world - ray_origin).length_squared
if not best_obj or length_squared < best_length_squared:
best_length_squared = length_squared
best_obj = obj
best_face_index = face_index
best_matrix = matrix
if best_obj:
verts = np.array([best_matrix * best_obj.data.shape_keys.key_blocks['modeling cloth key'].data[v].co
for v in best_obj.data.polygons[best_face_index].vertices])
vecs = verts - np.array(hit_world)
vidx = [v for v in best_obj.data.polygons[best_face_index].vertices]
self.closest = vidx[np.argmin(np.einsum('ij,ij->i', vecs, vecs))]
loc = best_matrix * best_obj.data.shape_keys.key_blocks['modeling cloth key'].data[self.closest].co
self.latest_hit = guide.location = loc
self.obj = best_obj
def modal(self, context, event):
bpy.context.window.cursor_set("CROSSHAIR")
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE', 'NUMPAD_0',
'NUMPAD_PERIOD','NUMPAD_1', 'NUMPAD_2', 'NUMPAD_3', 'NUMPAD_4',
'NUMPAD_5', 'NUMPAD_6', 'NUMPAD_7', 'NUMPAD_8', 'NUMPAD_9'}:
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'MOUSEMOVE':
self.raycast(context, event)
elif event.type == 'LEFTMOUSE' and event.value == 'PRESS':
if self.latest_hit and self.obj:
e = bpy.data.objects.new('modeling_cloth_pin', None)
bpy.context.scene.objects.link(e)
e.location = self.latest_hit
e.show_x_ray = True
e.select = True
e.empty_draw_size = .1
pin = self.obj.mclo.pins.add()
pin.vertex_id = self.closest
pin.hook = e
self.latest_hit = None
self.obj = None
elif event.type in {'RIGHTMOUSE', 'ESC'}:
delete_guide()
cloths = [i for i in bpy.data.objects if i.mclo.enable] # so we can select an empty and keep the settings menu up
context.scene.mclo.pin_alert = False
if len(cloths) > 0: #
ob = context.scene.mclo.last_object
bpy.context.scene.objects.active = ob
bpy.context.window.cursor_set("DEFAULT")
return {'FINISHED'}
return {'RUNNING_MODAL'}
# drag===================================
# drag===================================
#[‘DEFAULT’, ‘NONE’, ‘WAIT’, ‘CROSSHAIR’, ‘MOVE_X’, ‘MOVE_Y’, ‘KNIFE’, ‘TEXT’, ‘PAINT_BRUSH’, ‘HAND’, ‘SCROLL_X’, ‘SCROLL_Y’, ‘SCROLL_XY’, ‘EYEDROPPER’]
# dragger===
class ModelingClothDrag(bpy.types.Operator):
"""Modal ray cast for dragging"""
bl_idname = "view3d.modeling_cloth_drag"
bl_label = "Modeling Cloth Drag"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.space_data.type == 'VIEW_3D' and any(context.scene.mclo.cloth_pointers)
def __init__(self):
self.clicked = False
self.stored_mouse = None
self.matrix = None
def invoke(self, context, event):
scene = context.scene
extra_data = scene.modeling_cloth_data_set_extra
scene.mclo.drag_alert = True
#bpy.ops.object.select_all(action='DESELECT')
extra_data['vidx'] = None # Vertex ids of dragged face
extra_data['stored_vidx'] = None # Vertex coordinates
extra_data['move'] = None # Direction of drag
for cp in scene.mclo.cloth_pointers:
if cp.ob:
cp.ob.mclo.clicked = False
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def main_drag(self, context, event):
# get the context arguments
scene = context.scene
extra_data = scene.modeling_cloth_data_set_extra
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + view_vector
if self.clicked:
# cast rays and find the closest object
best_length_squared = -1.0
best_obj = None
best_face_index = -1
best_matrix = None
for obj, matrix in visible_objects_and_duplis(context):
hit, normal, face_index = obj_ray_cast(obj, matrix, ray_origin, ray_target)
if hit:
hit_world = matrix * hit
length_squared = (hit_world - ray_origin).length_squared
if not best_obj or length_squared < best_length_squared:
best_length_squared = length_squared
best_obj = obj
best_face_index = face_index
best_matrix = matrix
if best_obj:
best_obj.mclo.clicked = True
vidx = [v for v in best_obj.data.polygons[best_face_index].vertices]
vert = best_obj.data.shape_keys.key_blocks['modeling cloth key'].data
extra_data['vidx'] = vidx
extra_data['stored_vidx'] = np.array([vert[v].co for v in extra_data['vidx']])
self.stored_mouse = np.copy(ray_target)
self.matrix = best_matrix.inverted()
self.clicked = False
if self.stored_mouse is not None:
move = np.array(ray_target) - self.stored_mouse
extra_data['move'] = (move @ np.array(self.matrix)[:3, :3].T)
def modal(self, context, event):
scene = context.scene
#data = scene.modeling_cloth_data_set
extra_data = scene.modeling_cloth_data_set_extra
bpy.context.window.cursor_set("HAND")
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'MOUSEMOVE':
#pos = queryMousePosition()
self.main_drag(context, event)
elif event.type == 'LEFTMOUSE' and event.value == 'PRESS':
# when I click, If I have a hit, store the hit on press
self.clicked = True
extra_data['vidx'] = []
elif event.type == 'LEFTMOUSE' and event.value == 'RELEASE':
self.clicked = False
self.stored_mouse = None
extra_data['vidx'] = None
#for key, cloth in data.items():
# cloth.clicked = False
for cp in scene.mclo.cloth_pointers:
if cp.ob:
cp.ob.mclo.clicked = False
elif event.type in {'RIGHTMOUSE', 'ESC'}:
self.clicked = False
self.stored_mouse = None
bpy.context.window.cursor_set("DEFAULT")
scene.mclo.drag_alert = False
return {'FINISHED'}
return {'RUNNING_MODAL'}
# drag===================================End
# drag===================================End
class DeletePins(bpy.types.Operator):
"""Delete modeling cloth pins and clear pin list for current object"""
bl_idname = "object.delete_modeling_cloth_pins"
bl_label = "Delete Modeling Cloth Pins"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = get_last_object() # returns tuple with list and last cloth objects or None
if not ob: return {'CANCELLED'}
for i, pin in reversed(list(enumerate(ob[1].mclo.pins))):
bpy.data.objects.remove(pin.hook)
ob[1].mclo.pins.remove(i)
bpy.context.scene.objects.active = ob[1]
return {'FINISHED'}
class SelectPins(bpy.types.Operator):
"""Select modeling cloth pins for current object"""
bl_idname = "object.select_modeling_cloth_pins"
bl_label = "Select Modeling Cloth Pins"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = get_last_object() # returns list and last cloth objects or None
if not ob: return {'CANCELLED'}
#bpy.ops.object.select_all(action='DESELECT')
for pin in ob[1].mclo.pins:
pin.hook.select = True
return {'FINISHED'}
class PinSelected(bpy.types.Operator):
"""Add pins to verts selected in edit mode"""
bl_idname = "object.modeling_cloth_pin_selected"
bl_label = "Modeling Cloth Pin Selected"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = bpy.context.object
bpy.ops.object.mode_set(mode='OBJECT')
sel = [i.index for i in ob.data.vertices if i.select]
matrix = ob.matrix_world.copy()
for v in sel:
e = bpy.data.objects.new('modeling_cloth_pin', None)
bpy.context.scene.objects.link(e)
if ob.active_shape_key is None:
closest = matrix * ob.data.vertices[v].co# * matrix
else:
closest = matrix * ob.active_shape_key.data[v].co# * matrix
e.location = closest #* matrix
e.show_x_ray = True
e.select = True
e.empty_draw_size = .1
pin = ob.mclo.pins.add()
pin.vertex_id = v
pin.hook = e
ob.select = False
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}
class GrowSource(bpy.types.Operator):
"""Grow Source Shape"""
bl_idname = "object.modeling_cloth_grow"
bl_label = "Modeling Cloth Grow"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scale_source(1.02)
return {'FINISHED'}
class ShrinkSource(bpy.types.Operator):
"""Shrink Source Shape"""
bl_idname = "object.modeling_cloth_shrink"
bl_label = "Modeling Cloth Shrink"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scale_source(0.98)
return {'FINISHED'}
class ResetShapes(bpy.types.Operator):
"""Reset Shapes"""
bl_idname = "object.modeling_cloth_reset"
bl_label = "Modeling Cloth Reset"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
reset_shapes()
return {'FINISHED'}
class AddVirtualSprings(bpy.types.Operator):
"""Add Virtual Springs Between All Selected Vertices"""
bl_idname = "object.modeling_cloth_add_virtual_spring"
bl_label = "Modeling Cloth Add Virtual Spring"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
add_remove_virtual_springs()
return {'FINISHED'}
class RemoveVirtualSprings(bpy.types.Operator):
"""Remove Virtual Springs Between All Selected Vertices"""
bl_idname = "object.modeling_cloth_remove_virtual_spring"
bl_label = "Modeling Cloth Remove Virtual Spring"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
add_remove_virtual_springs(remove=True)
return {'FINISHED'}
class ModelingClothObject(bpy.types.PropertyGroup):
ob = PointerProperty(type=bpy.types.Object)
class ModelingClothCollider(bpy.types.PropertyGroup):
ob = PointerProperty(type=bpy.types.Object)
class ModelingClothGlobals(bpy.types.PropertyGroup):
cloth_pointers = CollectionProperty(
name="Modeling Cloth Objects",
description = 'List of cloth objects for quick pointers',
type=ModelingClothObject)
collider_pointers = CollectionProperty(
name="Modeling Cloth Colliders",
description = 'List of collider objects for quick pointers',
type=ModelingClothCollider)
drag_alert = BoolProperty(default=False)
pin_alert = BoolProperty(default=False)
last_object = PointerProperty(type=bpy.types.Object)
class ModelingClothPinObject(bpy.types.PropertyGroup):
vertex_id = IntProperty(default=-1)
hook = PointerProperty(type=bpy.types.Object)
class ApplyClothToMesh(bpy.types.Operator):
"""Apply cloth effects to mesh for export."""
bl_idname = "object.modeling_cloth_apply_cloth_to_mesh"
bl_label = "Modeling Cloth Remove Virtual Spring"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = get_last_object()[1]
v_count = len(ob.data.vertices)
co = np.zeros(v_count * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth key'].data.foreach_get('co', co)
ob.data.shape_keys.key_blocks['Basis'].data.foreach_set('co', co)
ob.data.shape_keys.key_blocks['Basis'].mute = True
ob.data.shape_keys.key_blocks['Basis'].mute = False
ob.data.vertices.foreach_set('co', co)
ob.data.update()
return {'FINISHED'}
class ModelingClothVirtualSpring(bpy.types.PropertyGroup):
vertex_id_1 = IntProperty(default=-1)
vertex_id_2 = IntProperty(default=-1)
class ModelingClothObjectProps(bpy.types.PropertyGroup):
enable = BoolProperty(name="Enable Modeling Cloth",
description="For toggling modeling cloth",
default=False, update=enable_cloth)
floor = BoolProperty(name="Modeling Cloth Floor",
description="Stop at floor",
default=False)
# handler type ----->>>
scene_update = BoolProperty(name="Modeling Cloth Continuous Update",
description="Choose continuous update",
default=False, update=manage_continuous_handler)
frame_update = BoolProperty(name="Modeling Cloth Handler Animation Update",
description="Choose animation update",
default=False, update=manage_animation_handler)
auto_reset = BoolProperty(name="Modeling Cloth Reset at Frame 1",
description="Automatically reset if the current frame number is 1 or less",
default=False)#, update=manage_handlers)
# ------------------>>>
noise = FloatProperty(name="Modeling Cloth Noise",
description="Set the noise strength",
default=0.001, precision=4, min=0, max=1, update=refresh_noise)
noise_decay = FloatProperty(name="Modeling Cloth Noise Decay",
description="Multiply the noise by this value each iteration",
default=0.99, precision=4, min=0, max=1)#, update=refresh_noise_decay)
# spring forces ------------>>>
spring_force = FloatProperty(name="Modeling Cloth Spring Force",
description="Set the spring force",
default=1.0, precision=4, min=0, max=2.5)#, update=refresh_noise)
push_springs = FloatProperty(name="Modeling Cloth Push Spring Force",
description="Set the push spring force",
default=1.0, precision=4, min=0, max=2.5)#, update=refresh_noise)
bend_stiff = FloatProperty(name="Modeling Cloth Bend Spring Force",
description="Set the bend spring force",
default=0.0, precision=4, min=0, max=10, soft_max=1)#, update=refresh_noise)
# -------------------------->>>
gravity = FloatProperty(name="Modeling Cloth Gravity",
description="Modeling cloth gravity",
default=0.0, precision=4, soft_min=-10, soft_max=10, min=-1000, max=1000)
iterations = IntProperty(name="Iterations",
description="How stiff the cloth is",
default=2, min=1, max=500)#, update=refresh_noise_decay)
velocity = FloatProperty(name="Velocity",
description="Cloth keeps moving",
default=.98, min= -200, max=200, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
# Wind. Note, wind should be measured agains normal and be at zero when normals are at zero. Squared should work
wind_x = FloatProperty(name="Wind X",
description="Not the window cleaner",
default=0, min= -10, max=10, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
wind_y = FloatProperty(name="Wind Y",
description="Y? Because wind is cool",
default=0, min= -10, max=10, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
wind_z = FloatProperty(name="Wind Z",
description="It's windzee outzide",
default=0, min= -10, max=10, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
turbulence = FloatProperty(name="Wind Turbulence",
description="Add Randomness to wind",
default=0, min=0, max=10, soft_min= 0, soft_max=1)#, update=refresh_noise_decay)
# self collision ----->>>
self_collision = BoolProperty(name="Modeling Cloth Self Collsion",
description="Toggle self collision",
default=False)#, update=collision_data_update)
# self_collision_force = FloatProperty(name="recovery force",
# description="Self colide faces repel",
# default=.17, precision=4, min= -1.1, max=1.1, soft_min= 0, soft_max=1)
self_collision_margin = FloatProperty(name="Margin",
description="Self colide faces margin",
default=.08, precision=4, min= -1, max=1, soft_min= 0, soft_max=1)
# self_collision_cy_size = FloatProperty(name="Cylinder size",
# description="Self colide faces cylinder size",
# default=1, precision=4, min= 0, max=4, soft_min= 0, soft_max=1.5)
# ---------------------->>>
# extras ------->>>
inflate = FloatProperty(name="inflate",
description="add force to vertex normals",
default=0, precision=4, min= -10, max=10, soft_min= -1, soft_max=1)
sew = FloatProperty(name="sew",
description="add force to vertex normals",
default=0, precision=4, min= -10, max=10, soft_min= -1, soft_max=1)
# -------------->>>
# external collisions ------->>>
object_collision = BoolProperty(name="Modeling Cloth Self Collsion",
description="Detect and collide with this object",
default=False, update=collision_object_update)
#collision_animated = bpy.props.BoolProperty(name="Modeling Cloth Collsion Animated",
#description="Treat collide object as animated. (turn off for speed on static objects)",
#default=True)#, update=collision_object_update)
object_collision_detect = BoolProperty(name="Modeling Cloth Self Collsion",
description="Detect collision objects",
default=True, update=cloth_object_update)
object_collision_outer_margin = FloatProperty(name="Modeling Cloth Outer Margin",
description="Collision margin on positive normal side of face",
default=0.04, precision=4, min=0, max=100, soft_min=0, soft_max=1000)
object_collision_inner_margin = FloatProperty(name="Modeling Cloth Inner Margin",
description="Collision margin on negative normal side of face",
default=0.08, precision=4, min=0, max=100, soft_min=0, soft_max=1000)
# ---------------------------->>>
# more collision stuff ------->>>
grid_size = IntProperty(name="Modeling Cloth Grid Size",
description="Max subdivisions for the dynamic broad phase grid",
default=10, min=0, max=1000, soft_min=0, soft_max=1000)
# Not for manual editing ----->>>
waiting = BoolProperty(name='Pause Cloth Update',
default=False)
clicked = BoolProperty(name='Click for drag event',
default=False)
pins = CollectionProperty(name="Modeling Cloth Pins",
type=ModelingClothPinObject)
virtual_springs = CollectionProperty(name="Modeling Cloth Virtual Springs",
type=ModelingClothVirtualSpring)
def create_properties():
bpy.types.Scene.mclo = PointerProperty(type=ModelingClothGlobals)
bpy.types.Object.mclo = PointerProperty(type=ModelingClothObjectProps)
# property dictionaries
bpy.types.Scene.modeling_cloth_data_set = {}
bpy.types.Scene.modeling_cloth_data_set_colliders = {}
bpy.types.Scene.modeling_cloth_data_set_extra = {}
def remove_properties():
'''Drives to the grocery store and buys a sandwich'''
# No need to remove properties because yolo
pass
@persistent
def refresh_cloth_data(scene):
# Create new data based on available clothes and colliders
scene = bpy.context.scene
for cp in scene.mclo.cloth_pointers:
if cp.ob:
create_cloth_data(cp.ob)
for cp in scene.mclo.collider_pointers:
if cp.ob:
create_collider_data(cp.ob)
class ModelingClothPanel(bpy.types.Panel):
"""Modeling Cloth Panel"""
bl_label = "Modeling Cloth Panel"
bl_idname = "Modeling Cloth"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Extended Tools"
#gt_show = True
def draw(self, context):
scene = context.scene
status = False
layout = self.layout
# tools
col = layout.column(align=True)
col.label(text="Tools")
col.operator("object.modeling_cloth_create_sew_lines", text="Sew Lines", icon="MOD_UVPROJECT")
col.operator("object.modeling_cloth_apply_cloth_to_mesh", text="Apply to Mesh", icon="FILE_TICK")
# modeling cloth
col = layout.column(align=True)
col.label(text="Modeling Cloth")
ob = bpy.context.object
cloths = [i for i in bpy.data.objects if i.mclo.enable] # so we can select an empty and keep the settings menu up
if len(cloths) > 0:
status = scene.mclo.pin_alert
if ob is not None:
if ob.type != 'MESH' or status:
ob = scene.mclo.last_object
if ob is not None:
if ob.type == 'MESH':
col.prop(ob.mclo ,"enable", text="Modeling Cloth", icon='SURFACE_DATA')
if ob.mclo.enable:
col.prop(ob.mclo ,"self_collision", text="Self Collision", icon='PHYSICS')
if ob.mclo.self_collision:
col.prop(ob.mclo ,"self_collision_margin", text="Self Margin")#, icon='PLAY')
#pause = 'PAUSE'
#if ob.mclo.pause:
# pause = 'PLAY'
col.prop(ob.mclo ,"object_collision", text="Collider", icon="STYLUS_PRESSURE")
#if ob.mclo.object_collision:
#col.prop(ob.mclo ,"collision_animated", text="Animated", icon="POSE_DATA")
if ob.mclo.object_collision:
col.prop(ob.mclo ,"object_collision_outer_margin", text="Outer Margin", icon="FORCE_FORCE")
col.prop(ob.mclo ,"object_collision_inner_margin", text="Inner Margin", icon="STICKY_UVS_LOC")
col = layout.column(align=True)
col.label("Collide List:")
for cp in scene.mclo.collider_pointers:
if cp.ob:
col.label(cp.ob.name)
if ob.mclo.enable:
#col.label('Active: ' + ob.name)
# object collisions
col = layout.column(align=True)
col.label("Collisions")
col.prop(ob.mclo ,"object_collision_detect", text="Object Collisions", icon="PHYSICS")
col = layout.column(align=True)
col.scale_y = 2.0
col = layout.column(align=True)
col.scale_y = 1.4
col.prop(ob.mclo, "grid_size", text="Grid Boxes", icon="MESH_GRID")
col.prop(ob.mclo, "frame_update", text="Animation Update", icon="TRIA_RIGHT")
if ob.mclo.frame_update:
col.prop(ob.mclo, "auto_reset", text="Frame 1 Reset")
col.prop(ob.mclo, "scene_update", text="Continuous Update", icon="TIME")
col = layout.column(align=True)
col.scale_y = 2.0
col.operator("object.modeling_cloth_reset", text="Reset")
col.alert = scene.mclo.drag_alert
col.operator("view3d.modeling_cloth_drag", text="Grab")
col = layout.column(align=True)
col.prop(ob.mclo ,"iterations", text="Iterations")#, icon='OUTLINER_OB_LATTICE')
col.prop(ob.mclo ,"spring_force", text="Stiffness")#, icon='OUTLINER_OB_LATTICE')
col.prop(ob.mclo ,"push_springs", text="Push Springs")#, icon='OUTLINER_OB_LATTICE')
col.prop(ob.mclo ,"bend_stiff", text="Bend Springs")#, icon='CURVE_NCURVE')
col.prop(ob.mclo ,"noise", text="Noise")#, icon='PLAY')
col.prop(ob.mclo ,"noise_decay", text="Decay Noise")#, icon='PLAY')
col.prop(ob.mclo ,"gravity", text="Gravity")#, icon='PLAY')
col.prop(ob.mclo ,"inflate", text="Inflate")#, icon='PLAY')
col.prop(ob.mclo ,"sew", text="Sew Force")#, icon='PLAY')
col.prop(ob.mclo ,"velocity", text="Velocity")#, icon='PLAY')
col = layout.column(align=True)
col.label("Wind")
col.prop(ob.mclo ,"wind_x", text="Wind X")#, icon='PLAY')
col.prop(ob.mclo ,"wind_y", text="Wind Y")#, icon='PLAY')
col.prop(ob.mclo ,"wind_z", text="Wind Z")#, icon='PLAY')
col.prop(ob.mclo ,"turbulence", text="Turbulence")#, icon='PLAY')
col.prop(ob.mclo ,"floor", text="Floor")#, icon='PLAY')
col = layout.column(align=True)
col.scale_y = 1.5
col.alert = status
if ob.mclo.enable:
if ob.mode == 'EDIT':
col.operator("object.modeling_cloth_pin_selected", text="Pin Selected")
col = layout.column(align=True)
col.operator("object.modeling_cloth_add_virtual_spring", text="Add Virtual Springs")
col.operator("object.modeling_cloth_remove_virtual_spring", text="Remove Selected")
else:
col.operator("view3d.modeling_cloth_pin", text="Create Pins")
col = layout.column(align=True)
col.operator("object.select_modeling_cloth_pins", text="Select Pins")
col.operator("object.delete_modeling_cloth_pins", text="Delete Pins")
col.operator("object.modeling_cloth_grow", text="Grow Source")
col.operator("object.modeling_cloth_shrink", text="Shrink Source")
col = layout.column(align=True)
#col.prop(ob.mclo ,"self_collision", text="Self Collision")#, icon='PLAY')
#col.prop(ob.mclo ,"self_collision_force", text="Repel")#, icon='PLAY')
#col.prop(ob.mclo ,"self_collision_margin", text="Margin")#, icon='PLAY')
#col.prop(ob.mclo ,"self_collision_cy_size", text="Cylinder Size")#, icon='PLAY')
# =============================
col = layout.column(align=True)
col.label('Collision Series')
col.operator("object.modeling_cloth_collision_series", text="Paperback")
col.operator("object.modeling_cloth_collision_series_kindle", text="Kindle")
col.operator("object.modeling_cloth_donate", text="Donate")
class CollisionSeries(bpy.types.Operator):
"""Support my addons by checking out my awesome sci fi books"""
bl_idname = "object.modeling_cloth_collision_series"
bl_label = "Modeling Cloth Collision Series"
def execute(self, context):
collision_series()
return {'FINISHED'}
class CollisionSeriesKindle(bpy.types.Operator):
"""Support my addons by checking out my awesome sci fi books"""
bl_idname = "object.modeling_cloth_collision_series_kindle"
bl_label = "Modeling Cloth Collision Series Kindle"
def execute(self, context):
collision_series(False)
return {'FINISHED'}
class Donate(bpy.types.Operator):
"""Support my addons by donating"""
bl_idname = "object.modeling_cloth_donate"
bl_label = "Modeling Cloth Donate"
def execute(self, context):
collision_series(False, False)
self.report({'INFO'}, 'Paypal, <EMAIL>')
return {'FINISHED'}
def collision_series(paperback=True, kindle=True):
import webbrowser
import imp
if paperback:
webbrowser.open("https://www.createspace.com/6043857")
imp.reload(webbrowser)
webbrowser.open("https://www.createspace.com/7164863")
return
if kindle:
webbrowser.open("https://www.amazon.com/Resolve-Immortal-Flesh-Collision-Book-ebook/dp/B01CO3MBVQ")
imp.reload(webbrowser)
webbrowser.open("https://www.amazon.com/Formulacrum-Collision-Book-Rich-Colburn-ebook/dp/B0711P744G")
return
webbrowser.open("https://www.paypal.com/donate/?token=G1UymFn4CP8lSFn1r63jf_XOHAuSBfQJWFj9xjW9kWCScqkfYUCdTzP-ywiHIxHxYe7uJW&country.x=US&locale.x=US")
# ============================================================================================
def register():
# Operators
bpy.utils.register_class(ModelingClothSew)
bpy.utils.register_class(ModelingClothPin)
bpy.utils.register_class(ModelingClothDrag)
bpy.utils.register_class(DeletePins)
bpy.utils.register_class(SelectPins)
bpy.utils.register_class(PinSelected)
bpy.utils.register_class(GrowSource)
bpy.utils.register_class(ShrinkSource)
bpy.utils.register_class(ResetShapes)
bpy.utils.register_class(AddVirtualSprings)
bpy.utils.register_class(RemoveVirtualSprings)
bpy.utils.register_class(ApplyClothToMesh)
bpy.utils.register_class(CollisionSeries)
bpy.utils.register_class(CollisionSeriesKindle)
bpy.utils.register_class(Donate)
# Props
bpy.utils.register_class(ModelingClothObject)
bpy.utils.register_class(ModelingClothCollider)
bpy.utils.register_class(ModelingClothGlobals)
bpy.utils.register_class(ModelingClothPinObject)
bpy.utils.register_class(ModelingClothVirtualSpring)
bpy.utils.register_class(ModelingClothObjectProps)
create_properties()
# Panels
bpy.utils.register_class(ModelingClothPanel)
# Main handlers
bpy.app.handlers.frame_change_post.append(handler_frame)
bpy.app.handlers.scene_update_post.append(handler_scene)
# Add load handlers
bpy.app.handlers.load_post.append(refresh_cloth_data)
def unregister():
# Remove load handlers
bpy.app.handlers.load_post.remove(refresh_cloth_data)
# Remove main handlers
bpy.app.handlers.frame_change_post.remove(handler_frame)
bpy.app.handlers.scene_update_post.remove(handler_scene)
# Operators
bpy.utils.unregister_class(ModelingClothSew)
bpy.utils.unregister_class(ModelingClothPin)
bpy.utils.unregister_class(ModelingClothDrag)
bpy.utils.unregister_class(DeletePins)
bpy.utils.unregister_class(SelectPins)
bpy.utils.unregister_class(PinSelected)
bpy.utils.unregister_class(GrowSource)
bpy.utils.unregister_class(ShrinkSource)
bpy.utils.unregister_class(ResetShapes)
bpy.utils.unregister_class(AddVirtualSprings)
bpy.utils.unregister_class(RemoveVirtualSprings)
bpy.utils.unregister_class(ApplyClothToMesh)
bpy.utils.unregister_class(CollisionSeries)
bpy.utils.unregister_class(CollisionSeriesKindle)
bpy.utils.unregister_class(Donate)
# Props
remove_properties()
bpy.utils.unregister_class(ModelingClothObject)
bpy.utils.unregister_class(ModelingClothCollider)
bpy.utils.unregister_class(ModelingClothGlobals)
bpy.utils.unregister_class(ModelingClothPinObject)
bpy.utils.unregister_class(ModelingClothVirtualSpring)
bpy.utils.unregister_class(ModelingClothObjectProps)
# Panels
bpy.utils.unregister_class(ModelingClothPanel)
if __name__ == "__main__":
register()
| [
"numpy.clip",
"bpy.context.scene.objects.link",
"numpy.sqrt",
"numpy.arccos",
"numpy.hstack",
"bpy.data.objects.new",
"webbrowser.open",
"bmesh.new",
"numpy.array",
"bpy.app.handlers.load_post.append",
"numpy.einsum",
"sys.exc_info",
"numpy.add.at",
"numpy.sin",
"bpy.ops.object.material_... | [((3656, 3667), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (3665, 3667), False, 'import bmesh\n'), ((3702, 3745), 'bmesh.ops.triangulate', 'bmesh.ops.triangulate', (['obm'], {'faces': 'obm.faces'}), '(obm, faces=obm.faces)\n', (3723, 3745), False, 'import bmesh\n'), ((3929, 3986), 'numpy.array', 'np.array', (['[[v.index for v in f.verts] for f in obm.faces]'], {}), '([[v.index for v in f.verts] for f in obm.faces])\n', (3937, 3986), True, 'import numpy as np\n'), ((4901, 4953), 'numpy.cross', 'np.cross', (['col.cross_vecs[:, 0]', 'col.cross_vecs[:, 1]'], {}), '(col.cross_vecs[:, 0], col.cross_vecs[:, 1])\n', (4909, 4953), True, 'import numpy as np\n'), ((4971, 5019), 'numpy.einsum', 'np.einsum', (['"""ij, ij->i"""', 'col.normals', 'col.normals'], {}), "('ij, ij->i', col.normals, col.normals)\n", (4980, 5019), True, 'import numpy as np\n'), ((5835, 5864), 'numpy.array', 'np.array', (['col.ob.matrix_world'], {}), '(col.ob.matrix_world)\n', (5843, 5864), True, 'import numpy as np\n'), ((6399, 6442), 'numpy.array', 'np.array', (['ob.matrix_world'], {'dtype': 'np.float32'}), '(ob.matrix_world, dtype=np.float32)\n', (6407, 6442), True, 'import numpy as np\n'), ((6601, 6644), 'numpy.array', 'np.array', (['ob.matrix_world'], {'dtype': 'np.float32'}), '(ob.matrix_world, dtype=np.float32)\n', (6609, 6644), True, 'import numpy as np\n'), ((6839, 6882), 'numpy.array', 'np.array', (['ob.matrix_world'], {'dtype': 'np.float32'}), '(ob.matrix_world, dtype=np.float32)\n', (6847, 6882), True, 'import numpy as np\n'), ((7307, 7332), 'numpy.array', 'np.array', (['ob.matrix_world'], {}), '(ob.matrix_world)\n', (7315, 7332), True, 'import numpy as np\n'), ((7607, 7637), 'numpy.linalg.inv', 'np.linalg.inv', (['ob.matrix_world'], {}), '(ob.matrix_world)\n', (7620, 7637), True, 'import numpy as np\n'), ((7837, 7867), 'numpy.linalg.inv', 'np.linalg.inv', (['ob.matrix_world'], {}), '(ob.matrix_world)\n', (7850, 7867), True, 'import numpy as np\n'), ((8129, 8154), 'numpy.array', 'np.array', (['ob.matrix_world'], {}), '(ob.matrix_world)\n', (8137, 8154), True, 'import numpy as np\n'), ((9056, 9089), 'numpy.zeros', 'np.zeros', (['(p_count * 3)'], {'dtype': 'type'}), '(p_count * 3, dtype=type)\n', (9064, 9089), True, 'import numpy as np\n'), ((9875, 9908), 'numpy.zeros', 'np.zeros', (['(p_count * 3)'], {'dtype': 'type'}), '(p_count * 3, dtype=type)\n', (9883, 9908), True, 'import numpy as np\n'), ((12104, 12115), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (12113, 12115), False, 'import bmesh\n'), ((12805, 12842), 'numpy.zeros', 'np.zeros', (['(e_count * 2)'], {'dtype': 'np.int32'}), '(e_count * 2, dtype=np.int32)\n', (12813, 12842), True, 'import numpy as np\n'), ((12856, 12888), 'numpy.zeros', 'np.zeros', (['e_count'], {'dtype': 'np.bool'}), '(e_count, dtype=np.bool)\n', (12864, 12888), True, 'import numpy as np\n'), ((13129, 13146), 'numpy.sum', 'np.sum', (['step_size'], {}), '(step_size)\n', (13135, 13146), True, 'import numpy as np\n'), ((13161, 13195), 'numpy.ones', 'np.ones', (['p_v_count'], {'dtype': 'np.int32'}), '(p_v_count, dtype=np.int32)\n', (13168, 13195), True, 'import numpy as np\n'), ((14113, 14132), 'numpy.array', 'np.array', (['diag_eidx'], {}), '(diag_eidx)\n', (14121, 14132), True, 'import numpy as np\n'), ((14258, 14317), 'numpy.unique', 'np.unique', (['s_t_rav'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(s_t_rav, return_inverse=True, return_counts=True)\n', (14267, 14317), True, 'import numpy as np\n'), ((14380, 14396), 'numpy.any', 'np.any', (['s_counts'], {}), '(s_counts)\n', (14386, 14396), True, 'import numpy as np\n'), ((14941, 14973), 'numpy.arange', 'np.arange', (['count'], {'dtype': 'np.int32'}), '(count, dtype=np.int32)\n', (14950, 14973), True, 'import numpy as np\n'), ((14984, 15023), 'numpy.array', 'np.array', (['[v.select for v in obm.verts]'], {}), '([v.select for v in obm.verts])\n', (14992, 15023), True, 'import numpy as np\n'), ((15077, 15155), 'numpy.array', 'np.array', (['[[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs]'], {}), '([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])\n', (15085, 15155), True, 'import numpy as np\n'), ((15515, 15561), 'numpy.append', 'np.append', (['cloth.eidx', 'virtual_springs'], {'axis': '(0)'}), '(cloth.eidx, virtual_springs, axis=0)\n', (15524, 15561), True, 'import numpy as np\n'), ((15606, 15639), 'numpy.append', 'np.append', (['existing', 'flip'], {'axis': '(0)'}), '(existing, flip, axis=0)\n', (15615, 15639), True, 'import numpy as np\n'), ((17754, 17788), 'bpy.ops.object.material_slot_add', 'bpy.ops.object.material_slot_add', ([], {}), '()\n', (17786, 17788), False, 'import bpy\n'), ((20159, 20196), 'numpy.zeros', 'np.zeros', (['(count * 3)'], {'dtype': 'np.float32'}), '(count * 3, dtype=np.float32)\n', (20167, 20196), True, 'import numpy as np\n'), ((21854, 21877), 'numpy.abs', 'np.abs', (['(tri_nor @ w_vec)'], {}), '(tri_nor @ w_vec)\n', (21860, 21877), True, 'import numpy as np\n'), ((22816, 22829), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (22822, 22829), True, 'import numpy as np\n'), ((23075, 23093), 'numpy.cross', 'np.cross', (['axis', 'co'], {}), '(axis, co)\n', (23083, 23093), True, 'import numpy as np\n'), ((23106, 23127), 'numpy.cross', 'np.cross', (['axis', 'move1'], {}), '(axis, move1)\n', (23114, 23127), True, 'import numpy as np\n'), ((23470, 23511), 'numpy.einsum', 'np.einsum', (['"""ij,ikj->ik"""', 'be_vecs', 'te_vecs'], {}), "('ij,ikj->ik', be_vecs, te_vecs)\n", (23479, 23511), True, 'import numpy as np\n'), ((23526, 23565), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'be_vecs', 'be_vecs'], {}), "('ij,ij->i', be_vecs, be_vecs)\n", (23535, 23565), True, 'import numpy as np\n'), ((23578, 23619), 'numpy.nan_to_num', 'np.nan_to_num', (['(bcp_dots / be_dots[:, nax])'], {}), '(bcp_dots / be_dots[:, nax])\n', (23591, 23619), True, 'import numpy as np\n'), ((23742, 23786), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', 'tcp_vecs', 'tcp_vecs'], {}), "('ijk,ijk->ij', tcp_vecs, tcp_vecs)\n", (23751, 23786), True, 'import numpy as np\n'), ((23978, 24019), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'u_tcp_ls', 'u_tcp_rs'], {}), "('ij,ij->i', u_tcp_ls, u_tcp_rs)\n", (23987, 24019), True, 'import numpy as np\n'), ((24270, 24314), 'numpy.cross', 'np.cross', (['u_tcp_vecs[:, 0]', 'u_tcp_vecs[:, 1]'], {}), '(u_tcp_vecs[:, 0], u_tcp_vecs[:, 1])\n', (24278, 24314), True, 'import numpy as np\n'), ((25222, 25262), 'numpy.add.at', 'np.add.at', (['cloth.co', 'tips[:, 0]', 'l_force'], {}), '(cloth.co, tips[:, 0], l_force)\n', (25231, 25262), True, 'import numpy as np\n'), ((25267, 25307), 'numpy.add.at', 'np.add.at', (['cloth.co', 'tips[:, 1]', 'r_force'], {}), '(cloth.co, tips[:, 1], r_force)\n', (25276, 25307), True, 'import numpy as np\n'), ((26560, 26592), 'bpy.ops.mesh.bridge_edge_loops', 'bpy.ops.mesh.bridge_edge_loops', ([], {}), '()\n', (26590, 26592), False, 'import bpy\n'), ((26597, 26634), 'bpy.ops.mesh.delete', 'bpy.ops.mesh.delete', ([], {'type': '"""ONLY_FACE"""'}), "(type='ONLY_FACE')\n", (26616, 26634), False, 'import bpy\n'), ((30551, 30592), 'numpy.in1d', 'np.in1d', (['cloth.eidx_tiler', 'cloth.pindexer'], {}), '(cloth.eidx_tiler, cloth.pindexer)\n', (30558, 30592), True, 'import numpy as np\n'), ((30819, 30862), 'numpy.zeros', 'np.zeros', (['(cloth.count * 3)'], {'dtype': 'np.float32'}), '(cloth.count * 3, dtype=np.float32)\n', (30827, 30862), True, 'import numpy as np\n'), ((31014, 31057), 'numpy.zeros', 'np.zeros', (['(cloth.count * 3)'], {'dtype': 'np.float32'}), '(cloth.count * 3, dtype=np.float32)\n', (31022, 31057), True, 'import numpy as np\n'), ((31223, 31266), 'numpy.zeros', 'np.zeros', (['(cloth.count * 3)'], {'dtype': 'np.float32'}), '(cloth.count * 3, dtype=np.float32)\n', (31231, 31266), True, 'import numpy as np\n'), ((31328, 31371), 'numpy.zeros', 'np.zeros', (['(cloth.count * 3)'], {'dtype': 'np.float32'}), '(cloth.count * 3, dtype=np.float32)\n', (31336, 31371), True, 'import numpy as np\n'), ((31442, 31453), 'numpy.copy', 'np.copy', (['co'], {}), '(co)\n', (31449, 31453), True, 'import numpy as np\n'), ((31481, 31517), 'numpy.zeros', 'np.zeros', (['co.shape'], {'dtype': 'np.float32'}), '(co.shape, dtype=np.float32)\n', (31489, 31517), True, 'import numpy as np\n'), ((31598, 31637), 'numpy.zeros', 'np.zeros', (['cloth.count'], {'dtype': 'np.float32'}), '(cloth.count, dtype=np.float32)\n', (31606, 31637), True, 'import numpy as np\n'), ((32129, 32177), 'numpy.arange', 'np.arange', (['cloth.tridex.shape[0]'], {'dtype': 'np.int32'}), '(cloth.tridex.shape[0], dtype=np.int32)\n', (32138, 32177), True, 'import numpy as np\n'), ((32349, 32413), 'numpy.unique', 'np.unique', (['cloth.tridex'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(cloth.tridex, return_inverse=True, return_counts=True)\n', (32358, 32413), True, 'import numpy as np\n'), ((32490, 32536), 'numpy.zeros', 'np.zeros', (['cloth.tri_co.shape'], {'dtype': 'np.float32'}), '(cloth.tri_co.shape, dtype=np.float32)\n', (32498, 32536), True, 'import numpy as np\n'), ((32557, 32603), 'numpy.zeros', 'np.zeros', (['cloth.tri_co.shape'], {'dtype': 'np.float32'}), '(cloth.tri_co.shape, dtype=np.float32)\n', (32565, 32603), True, 'import numpy as np\n'), ((32609, 32643), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': 'mode'}), '(mode=mode)\n', (32632, 32643), False, 'import bpy\n'), ((32847, 32882), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'svecs', 'svecs'], {}), "('ij,ij->i', svecs, svecs)\n", (32856, 32882), True, 'import numpy as np\n'), ((33146, 33157), 'time.time', 'time.time', ([], {}), '()\n', (33155, 33157), False, 'import time, sys\n'), ((41732, 41751), 'numpy.min', 'np.min', (['co1'], {'axis': '(0)'}), '(co1, axis=0)\n', (41738, 41751), True, 'import numpy as np\n'), ((41766, 41785), 'numpy.max', 'np.max', (['co2'], {'axis': '(0)'}), '(co2, axis=0)\n', (41772, 41785), True, 'import numpy as np\n'), ((41794, 41827), 'numpy.all', 'np.all', (['(co2_max + fudge > co1_min)'], {}), '(co2_max + fudge > co1_min)\n', (41800, 41827), True, 'import numpy as np\n'), ((42346, 42371), 'numpy.all', 'np.all', (['check_min'], {'axis': '(1)'}), '(check_min, axis=1)\n', (42352, 42371), True, 'import numpy as np\n'), ((42528, 42553), 'numpy.all', 'np.all', (['check_max'], {'axis': '(1)'}), '(check_max, axis=1)\n', (42534, 42553), True, 'import numpy as np\n'), ((42951, 42976), 'numpy.all', 'np.all', (['check_min'], {'axis': '(1)'}), '(check_min, axis=1)\n', (42957, 42976), True, 'import numpy as np\n'), ((43128, 43153), 'numpy.all', 'np.all', (['check_max'], {'axis': '(1)'}), '(check_max, axis=1)\n', (43134, 43153), True, 'import numpy as np\n'), ((43489, 43502), 'numpy.min', 'np.min', (['tymin'], {}), '(tymin)\n', (43495, 43502), True, 'import numpy as np\n'), ((43515, 43528), 'numpy.max', 'np.max', (['tymax'], {}), '(tymax)\n', (43521, 43528), True, 'import numpy as np\n'), ((43540, 43593), 'numpy.linspace', 'np.linspace', (['t_min', 't_max'], {'num': 'subs', 'dtype': 'np.float32'}), '(t_min, t_max, num=subs, dtype=np.float32)\n', (43551, 43593), True, 'import numpy as np\n'), ((44237, 44250), 'numpy.min', 'np.min', (['txmin'], {}), '(txmin)\n', (44243, 44250), True, 'import numpy as np\n'), ((44263, 44276), 'numpy.max', 'np.max', (['txmax'], {}), '(txmax)\n', (44269, 44276), True, 'import numpy as np\n'), ((44288, 44341), 'numpy.linspace', 'np.linspace', (['t_min', 't_max'], {'num': 'subs', 'dtype': 'np.float32'}), '(t_min, t_max, num=subs, dtype=np.float32)\n', (44299, 44341), True, 'import numpy as np\n'), ((44960, 44973), 'numpy.min', 'np.min', (['tzmin'], {}), '(tzmin)\n', (44966, 44973), True, 'import numpy as np\n'), ((44986, 44999), 'numpy.max', 'np.max', (['tzmax'], {}), '(tzmax)\n', (44992, 44999), True, 'import numpy as np\n'), ((45011, 45064), 'numpy.linspace', 'np.linspace', (['t_min', 't_max'], {'num': 'subs', 'dtype': 'np.float32'}), '(t_min, t_max, num=subs, dtype=np.float32)\n', (45022, 45064), True, 'import numpy as np\n'), ((45303, 45331), 'numpy.arange', 'np.arange', (['co_bools.shape[1]'], {}), '(co_bools.shape[1])\n', (45312, 45331), True, 'import numpy as np\n'), ((45347, 45376), 'numpy.arange', 'np.arange', (['tri_bools.shape[1]'], {}), '(tri_bools.shape[1])\n', (45356, 45376), True, 'import numpy as np\n'), ((48140, 48184), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', 'tri_vecs', 'tri_vecs'], {}), "('ijk,ijk->ij', tri_vecs, tri_vecs)\n", (48149, 48184), True, 'import numpy as np\n'), ((48241, 48270), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'v0', 'v1'], {}), "('ij,ij->i', v0, v1)\n", (48250, 48270), True, 'import numpy as np\n'), ((48281, 48310), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'v0', 'v2'], {}), "('ij,ij->i', v0, v2)\n", (48290, 48310), True, 'import numpy as np\n'), ((48321, 48350), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'v1', 'v2'], {}), "('ij,ij->i', v1, v2)\n", (48330, 48350), True, 'import numpy as np\n'), ((52873, 52902), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['proxy'], {}), '(proxy)\n', (52895, 52902), False, 'import bpy\n'), ((56432, 56474), 'numpy.arange', 'np.arange', (['col.co.shape[0]'], {'dtype': 'np.int32'}), '(col.co.shape[0], dtype=np.int32)\n', (56441, 56474), True, 'import numpy as np\n'), ((56571, 56586), 'numpy.copy', 'np.copy', (['col.co'], {}), '(col.co)\n', (56578, 56586), True, 'import numpy as np\n'), ((56642, 56688), 'numpy.arange', 'np.arange', (['col.tridex.shape[0]'], {'dtype': 'np.int32'}), '(col.tridex.shape[0], dtype=np.int32)\n', (56651, 56688), True, 'import numpy as np\n'), ((56808, 56902), 'numpy.array', 'np.array', (['(col.co + col.v_normals * ob.mclo.object_collision_outer_margin)'], {'dtype': 'np.float32'}), '(col.co + col.v_normals * ob.mclo.object_collision_outer_margin,\n dtype=np.float32)\n', (56816, 56902), True, 'import numpy as np\n'), ((57126, 57155), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['proxy'], {}), '(proxy)\n', (57148, 57155), False, 'import bpy\n'), ((57530, 57545), 'numpy.copy', 'np.copy', (['col.co'], {}), '(col.co)\n', (57537, 57545), True, 'import numpy as np\n'), ((57599, 57645), 'numpy.arange', 'np.arange', (['col.tridex.shape[0]'], {'dtype': 'np.int32'}), '(col.tridex.shape[0], dtype=np.int32)\n', (57608, 57645), True, 'import numpy as np\n'), ((57752, 57846), 'numpy.array', 'np.array', (['(col.co + col.v_normals * ob.mclo.object_collision_outer_margin)'], {'dtype': 'np.float32'}), '(col.co + col.v_normals * ob.mclo.object_collision_outer_margin,\n dtype=np.float32)\n', (57760, 57846), True, 'import numpy as np\n'), ((94810, 94971), 'webbrowser.open', 'webbrowser.open', (['"""https://www.paypal.com/donate/?token=G1UymFn4CP8lSFn1r63jf_XOHAuSBfQJWFj9xjW9kWCScqkfYUCdTzP-ywiHIxHxYe7uJW&country.x=US&locale.x=US"""'], {}), "(\n 'https://www.paypal.com/donate/?token=G1UymFn4CP8lSFn1r63jf_XOHAuSBfQJWFj9xjW9kWCScqkfYUCdTzP-ywiHIxHxYe7uJW&country.x=US&locale.x=US'\n )\n", (94825, 94971), False, 'import webbrowser\n'), ((95101, 95143), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothSew'], {}), '(ModelingClothSew)\n', (95125, 95143), False, 'import bpy\n'), ((95148, 95190), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothPin'], {}), '(ModelingClothPin)\n', (95172, 95190), False, 'import bpy\n'), ((95195, 95238), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothDrag'], {}), '(ModelingClothDrag)\n', (95219, 95238), False, 'import bpy\n'), ((95243, 95279), 'bpy.utils.register_class', 'bpy.utils.register_class', (['DeletePins'], {}), '(DeletePins)\n', (95267, 95279), False, 'import bpy\n'), ((95284, 95320), 'bpy.utils.register_class', 'bpy.utils.register_class', (['SelectPins'], {}), '(SelectPins)\n', (95308, 95320), False, 'import bpy\n'), ((95325, 95362), 'bpy.utils.register_class', 'bpy.utils.register_class', (['PinSelected'], {}), '(PinSelected)\n', (95349, 95362), False, 'import bpy\n'), ((95367, 95403), 'bpy.utils.register_class', 'bpy.utils.register_class', (['GrowSource'], {}), '(GrowSource)\n', (95391, 95403), False, 'import bpy\n'), ((95408, 95446), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ShrinkSource'], {}), '(ShrinkSource)\n', (95432, 95446), False, 'import bpy\n'), ((95451, 95488), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ResetShapes'], {}), '(ResetShapes)\n', (95475, 95488), False, 'import bpy\n'), ((95493, 95536), 'bpy.utils.register_class', 'bpy.utils.register_class', (['AddVirtualSprings'], {}), '(AddVirtualSprings)\n', (95517, 95536), False, 'import bpy\n'), ((95541, 95587), 'bpy.utils.register_class', 'bpy.utils.register_class', (['RemoveVirtualSprings'], {}), '(RemoveVirtualSprings)\n', (95565, 95587), False, 'import bpy\n'), ((95592, 95634), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ApplyClothToMesh'], {}), '(ApplyClothToMesh)\n', (95616, 95634), False, 'import bpy\n'), ((95639, 95680), 'bpy.utils.register_class', 'bpy.utils.register_class', (['CollisionSeries'], {}), '(CollisionSeries)\n', (95663, 95680), False, 'import bpy\n'), ((95685, 95732), 'bpy.utils.register_class', 'bpy.utils.register_class', (['CollisionSeriesKindle'], {}), '(CollisionSeriesKindle)\n', (95709, 95732), False, 'import bpy\n'), ((95737, 95769), 'bpy.utils.register_class', 'bpy.utils.register_class', (['Donate'], {}), '(Donate)\n', (95761, 95769), False, 'import bpy\n'), ((95787, 95832), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothObject'], {}), '(ModelingClothObject)\n', (95811, 95832), False, 'import bpy\n'), ((95837, 95884), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothCollider'], {}), '(ModelingClothCollider)\n', (95861, 95884), False, 'import bpy\n'), ((95889, 95935), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothGlobals'], {}), '(ModelingClothGlobals)\n', (95913, 95935), False, 'import bpy\n'), ((95940, 95988), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothPinObject'], {}), '(ModelingClothPinObject)\n', (95964, 95988), False, 'import bpy\n'), ((95993, 96045), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothVirtualSpring'], {}), '(ModelingClothVirtualSpring)\n', (96017, 96045), False, 'import bpy\n'), ((96050, 96100), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothObjectProps'], {}), '(ModelingClothObjectProps)\n', (96074, 96100), False, 'import bpy\n'), ((96143, 96187), 'bpy.utils.register_class', 'bpy.utils.register_class', (['ModelingClothPanel'], {}), '(ModelingClothPanel)\n', (96167, 96187), False, 'import bpy\n'), ((96213, 96269), 'bpy.app.handlers.frame_change_post.append', 'bpy.app.handlers.frame_change_post.append', (['handler_frame'], {}), '(handler_frame)\n', (96254, 96269), False, 'import bpy\n'), ((96274, 96330), 'bpy.app.handlers.scene_update_post.append', 'bpy.app.handlers.scene_update_post.append', (['handler_scene'], {}), '(handler_scene)\n', (96315, 96330), False, 'import bpy\n'), ((96360, 96413), 'bpy.app.handlers.load_post.append', 'bpy.app.handlers.load_post.append', (['refresh_cloth_data'], {}), '(refresh_cloth_data)\n', (96393, 96413), False, 'import bpy\n'), ((96465, 96518), 'bpy.app.handlers.load_post.remove', 'bpy.app.handlers.load_post.remove', (['refresh_cloth_data'], {}), '(refresh_cloth_data)\n', (96498, 96518), False, 'import bpy\n'), ((96551, 96607), 'bpy.app.handlers.frame_change_post.remove', 'bpy.app.handlers.frame_change_post.remove', (['handler_frame'], {}), '(handler_frame)\n', (96592, 96607), False, 'import bpy\n'), ((96612, 96668), 'bpy.app.handlers.scene_update_post.remove', 'bpy.app.handlers.scene_update_post.remove', (['handler_scene'], {}), '(handler_scene)\n', (96653, 96668), False, 'import bpy\n'), ((96690, 96734), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothSew'], {}), '(ModelingClothSew)\n', (96716, 96734), False, 'import bpy\n'), ((96739, 96783), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothPin'], {}), '(ModelingClothPin)\n', (96765, 96783), False, 'import bpy\n'), ((96788, 96833), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothDrag'], {}), '(ModelingClothDrag)\n', (96814, 96833), False, 'import bpy\n'), ((96838, 96876), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['DeletePins'], {}), '(DeletePins)\n', (96864, 96876), False, 'import bpy\n'), ((96881, 96919), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['SelectPins'], {}), '(SelectPins)\n', (96907, 96919), False, 'import bpy\n'), ((96924, 96963), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['PinSelected'], {}), '(PinSelected)\n', (96950, 96963), False, 'import bpy\n'), ((96968, 97006), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['GrowSource'], {}), '(GrowSource)\n', (96994, 97006), False, 'import bpy\n'), ((97011, 97051), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ShrinkSource'], {}), '(ShrinkSource)\n', (97037, 97051), False, 'import bpy\n'), ((97056, 97095), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ResetShapes'], {}), '(ResetShapes)\n', (97082, 97095), False, 'import bpy\n'), ((97100, 97145), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['AddVirtualSprings'], {}), '(AddVirtualSprings)\n', (97126, 97145), False, 'import bpy\n'), ((97150, 97198), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['RemoveVirtualSprings'], {}), '(RemoveVirtualSprings)\n', (97176, 97198), False, 'import bpy\n'), ((97203, 97247), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ApplyClothToMesh'], {}), '(ApplyClothToMesh)\n', (97229, 97247), False, 'import bpy\n'), ((97252, 97295), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['CollisionSeries'], {}), '(CollisionSeries)\n', (97278, 97295), False, 'import bpy\n'), ((97300, 97349), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['CollisionSeriesKindle'], {}), '(CollisionSeriesKindle)\n', (97326, 97349), False, 'import bpy\n'), ((97354, 97388), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['Donate'], {}), '(Donate)\n', (97380, 97388), False, 'import bpy\n'), ((97430, 97477), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothObject'], {}), '(ModelingClothObject)\n', (97456, 97477), False, 'import bpy\n'), ((97482, 97531), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothCollider'], {}), '(ModelingClothCollider)\n', (97508, 97531), False, 'import bpy\n'), ((97536, 97584), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothGlobals'], {}), '(ModelingClothGlobals)\n', (97562, 97584), False, 'import bpy\n'), ((97589, 97639), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothPinObject'], {}), '(ModelingClothPinObject)\n', (97615, 97639), False, 'import bpy\n'), ((97644, 97698), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothVirtualSpring'], {}), '(ModelingClothVirtualSpring)\n', (97670, 97698), False, 'import bpy\n'), ((97703, 97755), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothObjectProps'], {}), '(ModelingClothObjectProps)\n', (97729, 97755), False, 'import bpy\n'), ((97774, 97820), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['ModelingClothPanel'], {}), '(ModelingClothPanel)\n', (97800, 97820), False, 'import bpy\n'), ((2924, 2957), 'numpy.zeros', 'np.zeros', (['(c * 3)'], {'dtype': 'np.float32'}), '(c * 3, dtype=np.float32)\n', (2932, 2957), True, 'import numpy as np\n'), ((4296, 4361), 'numpy.array', 'np.array', (['[[e.verts[0].index, e.verts[1].index] for e in link_ed]'], {}), '([[e.verts[0].index, e.verts[1].index] for e in link_ed])\n', (4304, 4361), True, 'import numpy as np\n'), ((4375, 4454), 'numpy.array', 'np.array', (['[[[v.index for v in f.verts] for f in e.link_faces] for e in link_ed]'], {}), '([[[v.index for v in f.verts] for f in e.link_faces] for e in link_ed])\n', (4383, 4454), True, 'import numpy as np\n'), ((5039, 5060), 'numpy.sqrt', 'np.sqrt', (['col.nor_dots'], {}), '(col.nor_dots)\n', (5046, 5060), True, 'import numpy as np\n'), ((5286, 5330), 'numpy.cross', 'np.cross', (['cross_vecs[:, 0]', 'cross_vecs[:, 1]'], {}), '(cross_vecs[:, 0], cross_vecs[:, 1])\n', (5294, 5330), True, 'import numpy as np\n'), ((7175, 7208), 'numpy.zeros', 'np.zeros', (['(c * 3)'], {'dtype': 'np.float32'}), '(c * 3, dtype=np.float32)\n', (7183, 7208), True, 'import numpy as np\n'), ((8815, 8847), 'numpy.zeros', 'np.zeros', (['m_count'], {'dtype': 'np.bool'}), '(m_count, dtype=np.bool)\n', (8823, 8847), True, 'import numpy as np\n'), ((8866, 8879), 'numpy.copy', 'np.copy', (['show'], {}), '(show)\n', (8873, 8879), True, 'import numpy as np\n'), ((9634, 9666), 'numpy.zeros', 'np.zeros', (['m_count'], {'dtype': 'np.bool'}), '(m_count, dtype=np.bool)\n', (9642, 9666), True, 'import numpy as np\n'), ((9685, 9698), 'numpy.copy', 'np.copy', (['show'], {}), '(show)\n', (9692, 9698), True, 'import numpy as np\n'), ((10273, 10305), 'numpy.zeros', 'np.zeros', (['m_count'], {'dtype': 'np.bool'}), '(m_count, dtype=np.bool)\n', (10281, 10305), True, 'import numpy as np\n'), ((10324, 10337), 'numpy.copy', 'np.copy', (['show'], {}), '(show)\n', (10331, 10337), True, 'import numpy as np\n'), ((10951, 10969), 'numpy.dot', 'np.dot', (['vec2', 'vec1'], {}), '(vec2, vec1)\n', (10957, 10969), True, 'import numpy as np\n'), ((10972, 10990), 'numpy.dot', 'np.dot', (['vec1', 'vec1'], {}), '(vec1, vec1)\n', (10978, 10990), True, 'import numpy as np\n'), ((15310, 15333), 'numpy.in1d', 'np.in1d', (['ls', 'idxer[sel]'], {}), '(ls, idxer[sel])\n', (15317, 15333), True, 'import numpy as np\n'), ((17216, 17260), 'bpy.data.meshes.new', 'bpy.data.meshes.new', (['"""ModelingClothPinGuide"""'], {}), "('ModelingClothPinGuide')\n", (17235, 17260), False, 'import bpy\n'), ((17349, 17381), 'bpy.data.objects.new', 'bpy.data.objects.new', (['name', 'mesh'], {}), '(name, mesh)\n', (17369, 17381), False, 'import bpy\n'), ((17390, 17429), 'bpy.context.scene.objects.link', 'bpy.context.scene.objects.link', (['mesh_ob'], {}), '(mesh_ob)\n', (17420, 17429), False, 'import bpy\n'), ((17929, 17981), 'bpy.data.materials.new', 'bpy.data.materials.new', ([], {'name': '"""ModelingClothPinGuide"""'}), "(name='ModelingClothPinGuide')\n", (17951, 17981), False, 'import bpy\n'), ((18334, 18400), 'bpy.data.objects.remove', 'bpy.data.objects.remove', (["bpy.data.objects['ModelingClothPinGuide']"], {}), "(bpy.data.objects['ModelingClothPinGuide'])\n", (18357, 18400), False, 'import bpy\n'), ((18562, 18596), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['guide_mesh'], {}), '(guide_mesh)\n', (18584, 18596), False, 'import bpy\n'), ((21236, 21275), 'numpy.zeros', 'np.zeros', (['cloth.count'], {'dtype': 'np.float32'}), '(cloth.count, dtype=np.float32)\n', (21244, 21275), True, 'import numpy as np\n'), ((21293, 21322), 'numpy.random.random', 'np.random.random', (['cloth.count'], {}), '(cloth.count)\n', (21309, 21322), True, 'import numpy as np\n'), ((24090, 24115), 'numpy.clip', 'np.clip', (['angle_dot', '(-1)', '(1)'], {}), '(angle_dot, -1, 1)\n', (24097, 24115), True, 'import numpy as np\n'), ((24334, 24375), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'be_vecs', 'tcp_cross'], {}), "('ij,ij->i', be_vecs, tcp_cross)\n", (24343, 24375), True, 'import numpy as np\n'), ((24418, 24438), 'numpy.arccos', 'np.arccos', (['angle_dot'], {}), '(angle_dot)\n', (24427, 24438), True, 'import numpy as np\n'), ((28561, 28599), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""'}), "(mode='OBJECT')\n", (28584, 28599), False, 'import bpy\n'), ((29985, 30030), 'numpy.append', 'np.append', (['uni_edges[0]', 'uni_edges[1]'], {'axis': '(0)'}), '(uni_edges[0], uni_edges[1], axis=0)\n', (29994, 30030), True, 'import numpy as np\n'), ((30177, 30255), 'numpy.array', 'np.array', (['[[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs]'], {}), '([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])\n', (30185, 30255), True, 'import numpy as np\n'), ((30277, 30323), 'numpy.append', 'np.append', (['cloth.eidx', 'virtual_springs'], {'axis': '(0)'}), '(cloth.eidx, virtual_springs, axis=0)\n', (30286, 30323), True, 'import numpy as np\n'), ((30475, 30513), 'numpy.arange', 'np.arange', (['cloth.count'], {'dtype': 'np.int32'}), '(cloth.count, dtype=np.int32)\n', (30484, 30513), True, 'import numpy as np\n'), ((37892, 37936), 'numpy.einsum', 'np.einsum', (['"""ij, ij->i"""', 'cloth.vel', 'cloth.vel'], {}), "('ij, ij->i', cloth.vel, cloth.vel)\n", (37901, 37936), True, 'import numpy as np\n'), ((38198, 38217), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (38206, 38217), True, 'import numpy as np\n'), ((38262, 38280), 'numpy.any', 'np.any', (['check_wind'], {}), '(check_wind)\n', (38268, 38280), True, 'import numpy as np\n'), ((41847, 41866), 'numpy.max', 'np.max', (['co1'], {'axis': '(0)'}), '(co1, axis=0)\n', (41853, 41866), True, 'import numpy as np\n'), ((41885, 41904), 'numpy.min', 'np.min', (['co2'], {'axis': '(0)'}), '(co2, axis=0)\n', (41891, 41904), True, 'import numpy as np\n'), ((41933, 41966), 'numpy.all', 'np.all', (['(co1_max > co2_min - fudge)'], {}), '(co1_max > co2_min - fudge)\n', (41939, 41966), True, 'import numpy as np\n'), ((42269, 42291), 'numpy.min', 'np.min', (['tri_co'], {'axis': '(1)'}), '(tri_co, axis=1)\n', (42275, 42291), True, 'import numpy as np\n'), ((42443, 42473), 'numpy.max', 'np.max', (['tri_co[in_min]'], {'axis': '(1)'}), '(tri_co[in_min], axis=1)\n', (42449, 42473), True, 'import numpy as np\n'), ((42878, 42901), 'numpy.min', 'np.min', (['tri_min'], {'axis': '(0)'}), '(tri_min, axis=0)\n', (42884, 42901), True, 'import numpy as np\n'), ((43047, 43070), 'numpy.max', 'np.max', (['tri_max'], {'axis': '(0)'}), '(tri_max, axis=0)\n', (43053, 43070), True, 'import numpy as np\n'), ((45757, 45774), 'numpy.hstack', 'np.hstack', (['c_peat'], {}), '(c_peat)\n', (45766, 45774), True, 'import numpy as np\n'), ((45776, 45793), 'numpy.hstack', 'np.hstack', (['t_peat'], {}), '(t_peat)\n', (45785, 45793), True, 'import numpy as np\n'), ((47935, 47980), 'numpy.arange', 'np.arange', (['in_margin.shape[0]'], {'dtype': 'np.int32'}), '(in_margin.shape[0], dtype=np.int32)\n', (47944, 47980), True, 'import numpy as np\n'), ((50028, 50043), 'numpy.any', 'np.any', (['tris_in'], {}), '(tris_in)\n', (50034, 50043), True, 'import numpy as np\n'), ((53114, 53136), 'numpy.min', 'np.min', (['tri_co'], {'axis': '(1)'}), '(tri_co, axis=1)\n', (53120, 53136), True, 'import numpy as np\n'), ((53160, 53182), 'numpy.max', 'np.max', (['tri_co'], {'axis': '(1)'}), '(tri_co, axis=1)\n', (53166, 53182), True, 'import numpy as np\n'), ((53545, 53576), 'numpy.any', 'np.any', (['check_neighbors'], {'axis': '(1)'}), '(check_neighbors, axis=1)\n', (53551, 53576), True, 'import numpy as np\n'), ((53751, 53784), 'numpy.einsum', 'np.einsum', (['"""ij, ij->i"""', 'nor', 'vec2'], {}), "('ij, ij->i', nor, vec2)\n", (53760, 53784), True, 'import numpy as np\n'), ((54122, 54139), 'numpy.any', 'np.any', (['in_margin'], {}), '(in_margin)\n', (54128, 54139), True, 'import numpy as np\n'), ((55608, 55629), 'numpy.any', 'np.any', (['check'], {'axis': '(2)'}), '(check, axis=2)\n', (55614, 55629), True, 'import numpy as np\n'), ((63487, 63523), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (63510, 63523), False, 'import bpy\n'), ((64581, 64637), 'bpy_extras.view3d_utils.region_2d_to_vector_3d', 'view3d_utils.region_2d_to_vector_3d', (['region', 'rv3d', 'coord'], {}), '(region, rv3d, coord)\n', (64616, 64637), False, 'from bpy_extras import view3d_utils\n'), ((64659, 64715), 'bpy_extras.view3d_utils.region_2d_to_origin_3d', 'view3d_utils.region_2d_to_origin_3d', (['region', 'rv3d', 'coord'], {}), '(region, rv3d, coord)\n', (64694, 64715), False, 'from bpy_extras import view3d_utils\n'), ((66183, 66225), 'bpy.context.window.cursor_set', 'bpy.context.window.cursor_set', (['"""CROSSHAIR"""'], {}), "('CROSSHAIR')\n", (66212, 66225), False, 'import bpy\n'), ((69444, 69500), 'bpy_extras.view3d_utils.region_2d_to_vector_3d', 'view3d_utils.region_2d_to_vector_3d', (['region', 'rv3d', 'coord'], {}), '(region, rv3d, coord)\n', (69479, 69500), False, 'from bpy_extras import view3d_utils\n'), ((69522, 69578), 'bpy_extras.view3d_utils.region_2d_to_origin_3d', 'view3d_utils.region_2d_to_origin_3d', (['region', 'rv3d', 'coord'], {}), '(region, rv3d, coord)\n', (69557, 69578), False, 'from bpy_extras import view3d_utils\n'), ((71359, 71396), 'bpy.context.window.cursor_set', 'bpy.context.window.cursor_set', (['"""HAND"""'], {}), "('HAND')\n", (71388, 71396), False, 'import bpy\n'), ((74147, 74185), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""'}), "(mode='OBJECT')\n", (74170, 74185), False, 'import bpy\n'), ((74920, 74956), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (74943, 74956), False, 'import bpy\n'), ((77918, 77957), 'numpy.zeros', 'np.zeros', (['(v_count * 3)'], {'dtype': 'np.float32'}), '(v_count * 3, dtype=np.float32)\n', (77926, 77957), True, 'import numpy as np\n'), ((94363, 94417), 'webbrowser.open', 'webbrowser.open', (['"""https://www.createspace.com/6043857"""'], {}), "('https://www.createspace.com/6043857')\n", (94378, 94417), False, 'import webbrowser\n'), ((94426, 94448), 'imp.reload', 'imp.reload', (['webbrowser'], {}), '(webbrowser)\n', (94436, 94448), False, 'import imp\n'), ((94457, 94511), 'webbrowser.open', 'webbrowser.open', (['"""https://www.createspace.com/7164863"""'], {}), "('https://www.createspace.com/7164863')\n", (94472, 94511), False, 'import webbrowser\n'), ((94550, 94659), 'webbrowser.open', 'webbrowser.open', (['"""https://www.amazon.com/Resolve-Immortal-Flesh-Collision-Book-ebook/dp/B01CO3MBVQ"""'], {}), "(\n 'https://www.amazon.com/Resolve-Immortal-Flesh-Collision-Book-ebook/dp/B01CO3MBVQ'\n )\n", (94565, 94659), False, 'import webbrowser\n'), ((94658, 94680), 'imp.reload', 'imp.reload', (['webbrowser'], {}), '(webbrowser)\n', (94668, 94680), False, 'import imp\n'), ((94689, 94800), 'webbrowser.open', 'webbrowser.open', (['"""https://www.amazon.com/Formulacrum-Collision-Book-Rich-Colburn-ebook/dp/B0711P744G"""'], {}), "(\n 'https://www.amazon.com/Formulacrum-Collision-Book-Rich-Colburn-ebook/dp/B0711P744G'\n )\n", (94704, 94800), False, 'import webbrowser\n'), ((12217, 12246), 'bmesh.from_edit_mesh', 'bmesh.from_edit_mesh', (['ob.data'], {}), '(ob.data)\n', (12237, 12246), False, 'import bmesh\n'), ((14537, 14550), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (14545, 14550), True, 'import numpy as np\n'), ((14568, 14594), 'numpy.append', 'np.append', (['gr', 'ls[rs == i]'], {}), '(gr, ls[rs == i])\n', (14577, 14594), True, 'import numpy as np\n'), ((14610, 14636), 'numpy.append', 'np.append', (['gr', 'rs[ls == i]'], {}), '(gr, rs[ls == i])\n', (14619, 14636), True, 'import numpy as np\n'), ((15353, 15391), 'numpy.arange', 'np.arange', (['ls.shape[0]'], {'dtype': 'np.int32'}), '(ls.shape[0], dtype=np.int32)\n', (15362, 15391), True, 'import numpy as np\n'), ((18814, 18851), 'numpy.zeros', 'np.zeros', (['(count * 3)'], {'dtype': 'np.float32'}), '(count * 3, dtype=np.float32)\n', (18822, 18851), True, 'import numpy as np\n'), ((19001, 19020), 'numpy.mean', 'np.mean', (['co'], {'axis': '(0)'}), '(co, axis=0)\n', (19008, 19020), True, 'import numpy as np\n'), ((20915, 20950), 'numpy.array', 'np.array', (['(rs + ls)'], {'dtype': 'np.float32'}), '(rs + ls, dtype=np.float32)\n', (20923, 20950), True, 'import numpy as np\n'), ((22850, 22863), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (22856, 22863), True, 'import numpy as np\n'), ((23820, 23837), 'numpy.sqrt', 'np.sqrt', (['tcp_dots'], {}), '(tcp_dots)\n', (23827, 23837), True, 'import numpy as np\n'), ((24598, 24614), 'numpy.sqrt', 'np.sqrt', (['be_dots'], {}), '(be_dots)\n', (24605, 24614), True, 'import numpy as np\n'), ((27441, 27474), 'bpy.data.objects.remove', 'bpy.data.objects.remove', (['pin.hook'], {}), '(pin.hook)\n', (27464, 27474), False, 'import bpy\n'), ((31651, 31680), 'numpy.random.random', 'np.random.random', (['cloth.count'], {}), '(cloth.count)\n', (31667, 31680), True, 'import numpy as np\n'), ((34706, 34741), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'svecs', 'svecs'], {}), "('ij,ij->i', svecs, svecs)\n", (34715, 34741), True, 'import numpy as np\n'), ((35481, 35514), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'vecs', 'vecs'], {}), "('ij,ij->i', vecs, vecs)\n", (35490, 35514), True, 'import numpy as np\n'), ((35533, 35560), 'numpy.nan_to_num', 'np.nan_to_num', (['(sdots / dots)'], {}), '(sdots / dots)\n', (35546, 35560), True, 'import numpy as np\n'), ((36025, 36074), 'numpy.add.at', 'np.add.at', (['cloth.co', 'cloth.eidx_tiler', 'tiled_move'], {}), '(cloth.co, cloth.eidx_tiler, tiled_move)\n', (36034, 36074), True, 'import numpy as np\n'), ((50246, 50264), 'numpy.any', 'np.any', (['back_check'], {}), '(back_check)\n', (50252, 50264), True, 'import numpy as np\n'), ((54404, 54450), 'numpy.mean', 'np.mean', (['cloth.vel[cloth.tridex][t_in]'], {'axis': '(1)'}), '(cloth.vel[cloth.tridex][t_in], axis=1)\n', (54411, 54450), True, 'import numpy as np\n'), ((63406, 63442), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (63429, 63442), False, 'import bpy\n'), ((65558, 65722), 'numpy.array', 'np.array', (["[(best_matrix * best_obj.data.shape_keys.key_blocks['modeling cloth key'].\n data[v].co) for v in best_obj.data.polygons[best_face_index].vertices]"], {}), "([(best_matrix * best_obj.data.shape_keys.key_blocks[\n 'modeling cloth key'].data[v].co) for v in best_obj.data.polygons[\n best_face_index].vertices])\n", (65566, 65722), True, 'import numpy as np\n'), ((73157, 73190), 'bpy.data.objects.remove', 'bpy.data.objects.remove', (['pin.hook'], {}), '(pin.hook)\n', (73180, 73190), False, 'import bpy\n'), ((74347, 74395), 'bpy.data.objects.new', 'bpy.data.objects.new', (['"""modeling_cloth_pin"""', 'None'], {}), "('modeling_cloth_pin', None)\n", (74367, 74395), False, 'import bpy\n'), ((74408, 74441), 'bpy.context.scene.objects.link', 'bpy.context.scene.objects.link', (['e'], {}), '(e)\n', (74438, 74441), False, 'import bpy\n'), ((13870, 13890), 'numpy.roll', 'np.roll', (['f_verts', 'fv'], {}), '(f_verts, fv)\n', (13877, 13890), True, 'import numpy as np\n'), ((15912, 15937), 'numpy.in1d', 'np.in1d', (['selected', 'v_in_r'], {}), '(selected, v_in_r)\n', (15919, 15937), True, 'import numpy as np\n'), ((21782, 21796), 'numpy.mean', 'np.mean', (['w_vec'], {}), '(w_vec)\n', (21789, 21796), True, 'import numpy as np\n'), ((25361, 25386), 'numpy.tile', 'np.tile', (['(r_force * 0.5)', '(2)'], {}), '(r_force * 0.5, 2)\n', (25368, 25386), True, 'import numpy as np\n'), ((25468, 25493), 'numpy.tile', 'np.tile', (['(l_force * 0.5)', '(2)'], {}), '(l_force * 0.5, 2)\n', (25475, 25493), True, 'import numpy as np\n'), ((37468, 37486), 'numpy.array', 'np.array', (['ob.scale'], {}), '(ob.scale)\n', (37476, 37486), True, 'import numpy as np\n'), ((40958, 40993), 'numpy.array', 'np.array', (["extra_data['stored_vidx']"], {}), "(extra_data['stored_vidx'])\n", (40966, 40993), True, 'import numpy as np\n'), ((40996, 41025), 'numpy.array', 'np.array', (["(+extra_data['move'])"], {}), "(+extra_data['move'])\n", (41004, 41025), True, 'import numpy as np\n'), ((43875, 43884), 'numpy.sum', 'np.sum', (['i'], {}), '(i)\n', (43881, 43884), True, 'import numpy as np\n'), ((43893, 43902), 'numpy.sum', 'np.sum', (['j'], {}), '(j)\n', (43899, 43902), True, 'import numpy as np\n'), ((43988, 44014), 'numpy.repeat', 'np.repeat', (['c3', 't3.shape[0]'], {}), '(c3, t3.shape[0])\n', (43997, 44014), True, 'import numpy as np\n'), ((44042, 44066), 'numpy.tile', 'np.tile', (['t3', 'c3.shape[0]'], {}), '(t3, c3.shape[0])\n', (44049, 44066), True, 'import numpy as np\n'), ((44623, 44632), 'numpy.sum', 'np.sum', (['i'], {}), '(i)\n', (44629, 44632), True, 'import numpy as np\n'), ((44641, 44650), 'numpy.sum', 'np.sum', (['j'], {}), '(j)\n', (44647, 44650), True, 'import numpy as np\n'), ((45465, 45474), 'numpy.sum', 'np.sum', (['i'], {}), '(i)\n', (45471, 45474), True, 'import numpy as np\n'), ((45483, 45492), 'numpy.sum', 'np.sum', (['j'], {}), '(j)\n', (45489, 45492), True, 'import numpy as np\n'), ((54575, 54588), 'numpy.sign', 'np.sign', (['d_in'], {}), '(d_in)\n', (54582, 54588), True, 'import numpy as np\n'), ((60770, 60784), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (60782, 60784), False, 'import time, sys\n'), ((65755, 65774), 'numpy.array', 'np.array', (['hit_world'], {}), '(hit_world)\n', (65763, 65774), True, 'import numpy as np\n'), ((70763, 70813), 'numpy.array', 'np.array', (["[vert[v].co for v in extra_data['vidx']]"], {}), "([vert[v].co for v in extra_data['vidx']])\n", (70771, 70813), True, 'import numpy as np\n'), ((70850, 70869), 'numpy.copy', 'np.copy', (['ray_target'], {}), '(ray_target)\n', (70857, 70869), True, 'import numpy as np\n'), ((71046, 71066), 'numpy.array', 'np.array', (['ray_target'], {}), '(ray_target)\n', (71054, 71066), True, 'import numpy as np\n'), ((9404, 9465), 'numpy.mean', 'np.mean', (['[ob.data.vertices[i].co for i in p.vertices]'], {'axis': '(0)'}), '([ob.data.vertices[i].co for i in p.vertices], axis=0)\n', (9411, 9465), True, 'import numpy as np\n'), ((35587, 35599), 'numpy.sqrt', 'np.sqrt', (['div'], {}), '(div)\n', (35594, 35599), True, 'import numpy as np\n'), ((35896, 35926), 'numpy.append', 'np.append', (['move', '(-move)'], {'axis': '(0)'}), '(move, -move, axis=0)\n', (35905, 35926), True, 'import numpy as np\n'), ((36469, 36504), 'numpy.array', 'np.array', (["extra_data['stored_vidx']"], {}), "(extra_data['stored_vidx'])\n", (36477, 36504), True, 'import numpy as np\n'), ((36507, 36536), 'numpy.array', 'np.array', (["(+extra_data['move'])"], {}), "(+extra_data['move'])\n", (36515, 36536), True, 'import numpy as np\n'), ((37442, 37464), 'numpy.array', 'np.array', (['[0, 0, grav]'], {}), '([0, 0, grav])\n', (37450, 37464), True, 'import numpy as np\n'), ((51243, 51276), 'numpy.einsum', 'np.einsum', (['"""ij, ij->i"""', 'nor', 'vec2'], {}), "('ij, ij->i', nor, vec2)\n", (51252, 51276), True, 'import numpy as np\n'), ((51791, 51808), 'numpy.any', 'np.any', (['in_margin'], {}), '(in_margin)\n', (51797, 51808), True, 'import numpy as np\n'), ((65898, 65931), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'vecs', 'vecs'], {}), "('ij,ij->i', vecs, vecs)\n", (65907, 65931), True, 'import numpy as np\n'), ((39124, 39145), 'numpy.mean', 'np.mean', (['cosg'], {'axis': '(0)'}), '(cosg, axis=0)\n', (39131, 39145), True, 'import numpy as np\n'), ((52080, 52111), 'numpy.mean', 'np.mean', (['tri_co_2[t_in]'], {'axis': '(1)'}), '(tri_co_2[t_in], axis=1)\n', (52087, 52111), True, 'import numpy as np\n'), ((52147, 52176), 'numpy.mean', 'np.mean', (['tri_vo[t_in]'], {'axis': '(1)'}), '(tri_vo[t_in], axis=1)\n', (52154, 52176), True, 'import numpy as np\n'), ((66738, 66786), 'bpy.data.objects.new', 'bpy.data.objects.new', (['"""modeling_cloth_pin"""', 'None'], {}), "('modeling_cloth_pin', None)\n", (66758, 66786), False, 'import bpy\n'), ((66803, 66836), 'bpy.context.scene.objects.link', 'bpy.context.scene.objects.link', (['e'], {}), '(e)\n', (66833, 66836), False, 'import bpy\n'), ((67633, 67673), 'bpy.context.window.cursor_set', 'bpy.context.window.cursor_set', (['"""DEFAULT"""'], {}), "('DEFAULT')\n", (67662, 67673), False, 'import bpy\n'), ((71128, 71149), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (71136, 71149), True, 'import numpy as np\n'), ((21734, 21753), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (21750, 21753), True, 'import numpy as np\n'), ((72422, 72462), 'bpy.context.window.cursor_set', 'bpy.context.window.cursor_set', (['"""DEFAULT"""'], {}), "('DEFAULT')\n", (72451, 72462), False, 'import bpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 12:04:32 2016
@author: Charles
"""
from __future__ import division
from past.utils import old_div
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mu, sig):
return np.exp(old_div(-np.power(x - mu, 2.), (2 * np.power(sig, 2.))))
t = np.arange(256)
noise = 5e-1 * (0.5-np.random.rand(len(t)))
#noise = 0
#func = 10*np.sin(np.pi*t/350.) + noise + 5*gaussian(t, 300, 5)
func = noise + 10*np.sin(2*np.pi*t/250.) + 5*gaussian(t, 150, 10)
#func = 1*np.sin(2*np.pi*t/5) + 1*np.sin(2*np.pi*t/20)
sp = 1*np.fft.fft(func)
#sp = np.zeros(len(t), dtype=complex)
#for k in range(len(func)):
# exp = func*np.exp(-2j*np.pi*t*k/len(t))
# sp[k] = np.sum(exp)
#freq = np.fft.fftfreq(t.shape[-1])
freq = np.hstack((np.linspace(0,0.5,old_div(len(t),2)),np.linspace(-0.5,0,old_div(len(t),2))))
#freq = 1.*np.arange(0, len(t))/len(t)
#freq = np.linspace(0,len(t),len(t))/len(t)
#freq = t / max(t)
sp_cut = sp.copy()
fig1 = plt.figure()
plt.semilogy(np.sort(freq), abs(sp[freq.argsort()]),lw=1)
plt.fill_between(np.sort(freq), 1e-3, abs(sp[freq.argsort()]), alpha=0.1)
plt.xlabel(u"Fréquence ($m^{-1}$)")
plt.ylabel("Amplitude (mGal)")
plt.xlim([min(freq), max(freq)])
fig1.savefig("Amplitude.png", dpi=200)
fig2 = plt.figure()
plt.plot(freq, np.angle(sp),lw=1)
plt.xlabel(u"Fréquence ($m^{-1}$)")
plt.ylabel("Phase")
fig2.savefig("Phase.png", dpi=200)
#sp_cut[(freq>0.004)&(freq<0.996)] = 0
#sp_cut[(freq>-0.496)&(freq<0.496)] = 0
sp_cut[abs(freq)>0.004] = 0
inv_fft = np.fft.ifft(sp_cut)
#inv_fft = np.zeros(len(t))
#for m in range(len(sp)):
# exp = sp*np.exp(2j*np.pi*t*m/len(t))
# inv_fft[m] = (1./len(t)) * np.sum(exp)
fig = plt.figure()
plt.plot(t, func, "k", lw=1, label=u"Profil brut")
plt.plot(t, inv_fft, "b", lw=1.5, label=u"Prolongement vers le haut")
plt.plot(t, func-inv_fft, "r", lw=2, label=u"Anomalie résiduelle")
leg = plt.legend()
for legobj in leg.legendHandles:
legobj.set_linewidth(3)
plt.ylabel(u"Gravité (mGal)")
plt.xlabel(u"Distance (m)")
plt.show()
fig.savefig("Fourier.png", dpi=200)
data = np.vstack((t, func)).T
np.savetxt("profilgravi.dat", data, delimiter=",") | [
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"numpy.sort",
"numpy.angle",
"matplotlib.pyplot.figure",
"numpy.vstack",
"numpy.savetxt",
"numpy.sin",
"numpy.fft.ifft",
"numpy.arange",
"matplotl... | [((310, 324), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (319, 324), True, 'import numpy as np\n'), ((998, 1010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1008, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1178), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""Fréquence ($m^{-1}$)"""'], {}), "(u'Fréquence ($m^{-1}$)')\n", (1153, 1178), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1209), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude (mGal)"""'], {}), "('Amplitude (mGal)')\n", (1189, 1209), True, 'import matplotlib.pyplot as plt\n'), ((1291, 1303), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1301, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1373), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""Fréquence ($m^{-1}$)"""'], {}), "(u'Fréquence ($m^{-1}$)')\n", (1348, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1374, 1393), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase"""'], {}), "('Phase')\n", (1384, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1568), 'numpy.fft.ifft', 'np.fft.ifft', (['sp_cut'], {}), '(sp_cut)\n', (1560, 1568), True, 'import numpy as np\n'), ((1718, 1730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1728, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1781), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'func', '"""k"""'], {'lw': '(1)', 'label': 'u"""Profil brut"""'}), "(t, func, 'k', lw=1, label=u'Profil brut')\n", (1739, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1851), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'inv_fft', '"""b"""'], {'lw': '(1.5)', 'label': 'u"""Prolongement vers le haut"""'}), "(t, inv_fft, 'b', lw=1.5, label=u'Prolongement vers le haut')\n", (1790, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1920), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(func - inv_fft)', '"""r"""'], {'lw': '(2)', 'label': 'u"""Anomalie résiduelle"""'}), "(t, func - inv_fft, 'r', lw=2, label=u'Anomalie résiduelle')\n", (1860, 1920), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1937), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1935, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1999, 2028), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['u"""Gravité (mGal)"""'], {}), "(u'Gravité (mGal)')\n", (2009, 2028), True, 'import matplotlib.pyplot as plt\n'), ((2029, 2056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""Distance (m)"""'], {}), "(u'Distance (m)')\n", (2039, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2065, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2185), 'numpy.savetxt', 'np.savetxt', (['"""profilgravi.dat"""', 'data'], {'delimiter': '""","""'}), "('profilgravi.dat', data, delimiter=',')\n", (2145, 2185), True, 'import numpy as np\n'), ((577, 593), 'numpy.fft.fft', 'np.fft.fft', (['func'], {}), '(func)\n', (587, 593), True, 'import numpy as np\n'), ((1024, 1037), 'numpy.sort', 'np.sort', (['freq'], {}), '(freq)\n', (1031, 1037), True, 'import numpy as np\n'), ((1086, 1099), 'numpy.sort', 'np.sort', (['freq'], {}), '(freq)\n', (1093, 1099), True, 'import numpy as np\n'), ((1319, 1331), 'numpy.angle', 'np.angle', (['sp'], {}), '(sp)\n', (1327, 1331), True, 'import numpy as np\n'), ((2112, 2132), 'numpy.vstack', 'np.vstack', (['(t, func)'], {}), '((t, func))\n', (2121, 2132), True, 'import numpy as np\n'), ((465, 494), 'numpy.sin', 'np.sin', (['(2 * np.pi * t / 250.0)'], {}), '(2 * np.pi * t / 250.0)\n', (471, 494), True, 'import numpy as np\n'), ((257, 278), 'numpy.power', 'np.power', (['(x - mu)', '(2.0)'], {}), '(x - mu, 2.0)\n', (265, 278), True, 'import numpy as np\n'), ((284, 302), 'numpy.power', 'np.power', (['sig', '(2.0)'], {}), '(sig, 2.0)\n', (292, 302), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#ATTENTION IF PYTHON OR PYTHON 3
# coding: utf-8
#/******************************************
#*MIT License
#*
#*Copyright (c) [2020] [<NAME>, <NAME>, <NAME>]
#*
#*Permission is hereby granted, free of charge, to any person obtaining a copy
#*of this software and associated documentation files (the "Software"), to deal
#*in the Software without restriction, including without limitation the rights
#*to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#*copies of the Software, and to permit persons to whom the Software is
#*furnished to do so, subject to the following conditions:
#*
#*The above copyright notice and this permission notice shall be included in all
#*copies or substantial portions of the Software.
#*
#*THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#*IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#*FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#*AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#*LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#*OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#*SOFTWARE.
#******************************************/
import os
import cv2
import numpy as np
import math
import glob
import time
import pandas as pd
import pynq
from pynq import Overlay
from pynq import allocate
import struct
import statistics
import argparse
from ddrbenchmark_handler import my_accel_map
def main():
parser = argparse.ArgumentParser(description='Python-based host')
parser.add_argument("-ol", "--overlay", nargs='?', help='Path and filename of the target overlay', default='./drambenchmark_top_wrapper.bit')
parser.add_argument("-clk", "--clock", nargs='?', help='Target clock frequency of the PL fclk0_mhz', default=100, type=int)
parser.add_argument("-t", "--core_number", nargs='?', help='Number of // threads', default=1, type=int)
parser.add_argument("-p", "--platform", nargs='?', help='platform to target.\
\'Alveo\' is used for PCIe based,\n while others will setup for a Zynq-based environment', default='Alveo')
parser.add_argument("-id", "--input_dimension", nargs='?', help='Target input dimensions', default=1000, type=int)
parser.add_argument("-ib", "--input_bitwidth", nargs='?', help='Target input bitwidth', default=512, type=int)
parser.add_argument("-rp", "--res_path", nargs='?', help='Path of the Results', default='./')
t=0
args = parser.parse_args()
accel_number=args.core_number
myoverlay = Overlay(args.overlay)
if args.platform=='Zynq':
from pynq.ps import Clocks;
print("Previous Frequency "+str(Clocks.fclk0_mhz))
Clocks.fclk0_mhz = args.clock;
print("New frequency "+str(Clocks.fclk0_mhz))
host_dt=np.uint32
host_dt_size = np.dtype(host_dt).itemsize * 8
packet_factor = math.ceil(args.input_bitwidth / host_dt_size)
packet_number = args.input_dimension * packet_factor
accel_list=my_accel_map(myoverlay, args.platform, accel_number, \
packet_number, host_dt, packet_number, host_dt)
#test
iterations=10
t_tot = 0
times=[]
dim=args.input_dimension
diffs=[]
start_tot = time.time()
for i in range(iterations):
input_vector = np.random.randint(low=0, high=255, size=(packet_number,), dtype=host_dt)
sw_data_out=accel_list[0].my_func_sw(input_vector, packet_number)
start_single = time.time()
accel_list[0].prepare_buff_one(input_vector)
fpga_data_out = accel_list[0].exec_and_wait()
end_single = time.time()
print("Hw result: ")
print(fpga_data_out)
print("Sw result: ")
print(sw_data_out)
t = end_single - start_single
times.append(t)
diff=np.all(fpga_data_out == sw_data_out)
diffs.append(diff)
t_tot = t_tot + t
end_tot = time.time()
accel_list[0].reset_cma_buff()
print("Mean value of hw vs sw difference" +str(np.mean(diffs)))
df = pd.DataFrame([\
["total_time_hw ",t_tot],\
["mean_time_hw",np.mean(times)],\
["std_time_hw",np.std(times)],\
["mean_diff",np.mean(diffs)],\
["std_diffs",np.std(diffs)]],\
columns=['Label','Test'+str(args.overlay)])
df_path = os.path.join(args.res_path,'Time_%02d.csv' % (args.clock))
df.to_csv(df_path, index=False)
data = {'time'+str(args.overlay):times,\
'error'+str(args.overlay):diffs}
df_breakdown = pd.DataFrame(data,\
columns=['time'+str(args.overlay),'error'+str(args.overlay)])
df_path_breakdown = os.path.join(args.res_path,'Breakdown_%02d.csv' % (args.clock))
df_breakdown.to_csv(df_path_breakdown, index=False)
if args.platform =='Alveo':
myoverlay.free()
print("The host code is at the end :)")
if __name__== "__main__":
main()
| [
"numpy.dtype",
"numpy.mean",
"math.ceil",
"argparse.ArgumentParser",
"os.path.join",
"numpy.random.randint",
"numpy.std",
"numpy.all",
"time.time",
"ddrbenchmark_handler.my_accel_map",
"pynq.Overlay"
] | [((1572, 1628), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Python-based host"""'}), "(description='Python-based host')\n", (1595, 1628), False, 'import argparse\n'), ((2629, 2650), 'pynq.Overlay', 'Overlay', (['args.overlay'], {}), '(args.overlay)\n', (2636, 2650), False, 'from pynq import Overlay\n'), ((2963, 3008), 'math.ceil', 'math.ceil', (['(args.input_bitwidth / host_dt_size)'], {}), '(args.input_bitwidth / host_dt_size)\n', (2972, 3008), False, 'import math\n'), ((3082, 3186), 'ddrbenchmark_handler.my_accel_map', 'my_accel_map', (['myoverlay', 'args.platform', 'accel_number', 'packet_number', 'host_dt', 'packet_number', 'host_dt'], {}), '(myoverlay, args.platform, accel_number, packet_number, host_dt,\n packet_number, host_dt)\n', (3094, 3186), False, 'from ddrbenchmark_handler import my_accel_map\n'), ((3307, 3318), 'time.time', 'time.time', ([], {}), '()\n', (3316, 3318), False, 'import time\n'), ((3991, 4002), 'time.time', 'time.time', ([], {}), '()\n', (4000, 4002), False, 'import time\n'), ((4406, 4463), 'os.path.join', 'os.path.join', (['args.res_path', "('Time_%02d.csv' % args.clock)"], {}), "(args.res_path, 'Time_%02d.csv' % args.clock)\n", (4418, 4463), False, 'import os\n'), ((4729, 4791), 'os.path.join', 'os.path.join', (['args.res_path', "('Breakdown_%02d.csv' % args.clock)"], {}), "(args.res_path, 'Breakdown_%02d.csv' % args.clock)\n", (4741, 4791), False, 'import os\n'), ((3375, 3447), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(255)', 'size': '(packet_number,)', 'dtype': 'host_dt'}), '(low=0, high=255, size=(packet_number,), dtype=host_dt)\n', (3392, 3447), True, 'import numpy as np\n'), ((3545, 3556), 'time.time', 'time.time', ([], {}), '()\n', (3554, 3556), False, 'import time\n'), ((3685, 3696), 'time.time', 'time.time', ([], {}), '()\n', (3694, 3696), False, 'import time\n'), ((3886, 3922), 'numpy.all', 'np.all', (['(fpga_data_out == sw_data_out)'], {}), '(fpga_data_out == sw_data_out)\n', (3892, 3922), True, 'import numpy as np\n'), ((2912, 2929), 'numpy.dtype', 'np.dtype', (['host_dt'], {}), '(host_dt)\n', (2920, 2929), True, 'import numpy as np\n'), ((4090, 4104), 'numpy.mean', 'np.mean', (['diffs'], {}), '(diffs)\n', (4097, 4104), True, 'import numpy as np\n'), ((4192, 4206), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (4199, 4206), True, 'import numpy as np\n'), ((4233, 4246), 'numpy.std', 'np.std', (['times'], {}), '(times)\n', (4239, 4246), True, 'import numpy as np\n'), ((4271, 4285), 'numpy.mean', 'np.mean', (['diffs'], {}), '(diffs)\n', (4278, 4285), True, 'import numpy as np\n'), ((4310, 4323), 'numpy.std', 'np.std', (['diffs'], {}), '(diffs)\n', (4316, 4323), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import statsmodels as sm
import statsmodels.api as smapi
import math
from pyqstrat.pq_utils import monotonically_increasing, infer_frequency
from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot
import matplotlib as mpl
import matplotlib.figure as mpl_fig
from typing import Tuple, Sequence, Mapping, MutableMapping, Optional, Any, Callable, Dict
def compute_periods_per_year(timestamps: np.ndarray) -> float:
"""
Computes trading periods per year for an array of numpy datetime64's.
e.g. if most of the timestamps are separated by 1 day, will return 252.
Args:
timestamps: a numpy array of datetime64's
>>> compute_periods_per_year(np.array(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-09'], dtype='M8[D]'))
252.0
>>> round(compute_periods_per_year(np.array(['2018-01-01 10:00', '2018-01-01 10:05', '2018-01-01 10:10'], dtype='M8[m]')), 2)
72576.05
"""
if not len(timestamps): return np.nan
freq = infer_frequency(timestamps)
return 252. / freq if freq != 0 else np.nan
def compute_amean(returns: np.ndarray, periods_per_year: int) -> float:
'''
Computes arithmetic mean of a return array, ignoring NaNs
Args:
returns: Represents returns at any frequency
periods_per_year: Frequency of the returns, e.g. 252 for daily returns
>>> compute_amean(np.array([0.003, 0.004, np.nan]), 252)
0.882
'''
if not len(returns): return np.nan
return np.nanmean(returns) * periods_per_year
def compute_num_periods(timestamps: np.ndarray, periods_per_year: float) -> float:
'''
Given an array of timestamps, we compute how many periods there are between the first and last element, where the length
of a period is defined by periods_per_year. For example, if there are 6 periods per year,
then each period would be approx. 2 months long.
Args:
timestamps (np.ndarray of np.datetime64): a numpy array of returns, can contain nans
periods_per_year: number of periods between first and last return
>>> compute_num_periods(np.array(['2015-01-01', '2015-03-01', '2015-05-01'], dtype='M8[D]'), 6)
2.0
'''
if not len(timestamps): return np.nan
assert(monotonically_increasing(timestamps))
fraction_of_year = (timestamps[-1] - timestamps[0]) / (np.timedelta64(1, 's') * 365 * 24 * 60 * 60)
return round(fraction_of_year * periods_per_year)
def compute_gmean(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> float:
"""
Compute geometric mean of an array of returns
Args:
returns: a numpy array of returns, can contain nans
periods_per_year: Used for annualizing returns
>>> round(compute_gmean(np.array(['2015-01-01', '2015-03-01', '2015-05-01'], dtype='M8[D]'), np.array([0.001, 0.002, 0.003]), 252.), 6)
0.018362
"""
if not len(returns): return np.nan
assert(len(returns) == len(timestamps))
assert(isinstance(timestamps, np.ndarray) and isinstance(returns, np.ndarray))
mask = np.isfinite(returns)
timestamps = timestamps[mask]
returns = returns[mask]
num_periods = compute_num_periods(timestamps, periods_per_year)
g_mean = ((1.0 + returns).prod())**(1.0 / num_periods)
g_mean = np.power(g_mean, periods_per_year) - 1.0
return g_mean
def compute_std(returns: np.ndarray) -> float:
""" Computes standard deviation of an array of returns, ignoring nans """
if not len(returns): return np.nan
return np.nanstd(returns)
def compute_sortino(returns: np.ndarray, amean: float, periods_per_year: float) -> float:
'''
Note that this assumes target return is 0.
Args:
returns: a numpy array of returns
amean: arithmetic mean of returns
periods_per_year: number of trading periods per year
>>> print(round(compute_sortino(np.array([0.001, -0.001, 0.002]), 0.001, 252), 6))
0.133631
'''
if not len(returns) or not np.isfinite(amean) or periods_per_year <= 0: return np.nan
returns = np.where((~np.isfinite(returns)), 0.0, returns)
normalized_rets = np.where(returns > 0.0, 0.0, returns)
sortino_denom = np.std(normalized_rets)
sortino = np.nan if sortino_denom == 0 else amean / (sortino_denom * np.sqrt(periods_per_year))
return sortino
def compute_sharpe(returns: np.ndarray, amean: float, periods_per_year: float) -> float:
'''
Note that this does not take into risk free returns so it's really a sharpe0, i.e. assumes risk free returns are 0
Args:
returns: a numpy array of returns
amean: arithmetic mean of returns
periods_per_year: number of trading periods per year
>>> round(compute_sharpe(np.array([0.001, -0.001, 0.002]), 0.001, 252), 6)
0.050508
'''
if not len(returns) or not np.isfinite(amean) or periods_per_year <= 0: return np.nan
returns = np.where((~np.isfinite(returns)), 0.0, returns)
s = np.std(returns)
sharpe = np.nan if s == 0 else amean / (s * np.sqrt(periods_per_year))
return sharpe
def compute_k_ratio(equity: np.ndarray, periods_per_year: int, halflife_years: float = None) -> float:
'''
Compute k-ratio (2013 or original versions by <NAME>). See https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2230949
We also implement a modification that allows higher weighting for more recent returns.
Args:
equity: a numpy array of the equity in your account
periods_per_year: 252 for daily values
halflife_years: If set, we use weighted linear regression to give less weight to older returns.
In this case, we compute the original k-ratio which does not use periods per year or number of observations
If not set, we compute the 2013 version of the k-ratio which weights k-ratio by sqrt(periods_per_year) / nobs
Returns:
weighted or unweighted k-ratio
>>> np.random.seed(0)
>>> t = np.arange(1000)
>>> ret = np.random.normal(loc = 0.0025, scale = 0.01, size = len(t))
>>> equity = (1 + ret).cumprod()
>>> assert(math.isclose(compute_k_ratio(equity, 252, None), 3.888, abs_tol=0.001))
>>> assert(math.isclose(compute_k_ratio(equity, 252, 0.5), 602.140, abs_tol=0.001))
'''
equity = equity[np.isfinite(equity)]
equity = np.log(equity)
t = np.arange(len(equity))
if halflife_years:
halflife = halflife_years * periods_per_year
k = math.log(0.5) / halflife
w = np.empty(len(equity), dtype=np.float)
w = np.exp(k * t)
w = w ** 2 # Statsmodels requires square of weights
w = w[::-1]
fit = sm.regression.linear_model.WLS(endog=equity, exog=t, weights=w, hasconst=False).fit()
k_ratio = fit.params[0] / fit.bse[0]
else:
fit = smapi.OLS(endog=equity, exog=np.arange(len(equity)), hasconst=False).fit()
k_ratio = fit.params[0] * math.sqrt(periods_per_year) / (fit.bse[0] * len(equity))
return k_ratio
def compute_equity(timestamps: np.ndarray, starting_equity: float, returns: np.ndarray) -> np.ndarray:
''' Given starting equity, timestamps and returns, create a numpy array of equity at each date'''
return starting_equity * np.cumprod(1. + returns)
def compute_rolling_dd(timestamps: np.ndarray, equity: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
'''
Compute numpy array of rolling drawdown percentage
Args:
timestamps: numpy array of datetime64
equity: numpy array of equity
'''
assert(len(timestamps) == len(equity))
if not len(timestamps): return np.array([], dtype='M8[ns]'), np.array([], dtype=np.float)
s = pd.Series(equity, index=timestamps)
rolling_max = s.expanding(min_periods=1).max()
dd = np.where(s >= rolling_max, 0.0, -(s - rolling_max) / rolling_max)
return timestamps, dd
def compute_maxdd_pct(rolling_dd: np.ndarray) -> float:
'''Compute max drawdown percentage given a numpy array of rolling drawdowns, ignoring NaNs'''
if not len(rolling_dd): return np.nan
return np.nanmax(rolling_dd)
def compute_maxdd_date(rolling_dd_dates: np.ndarray, rolling_dd: np.ndarray) -> float:
''' Compute date of max drawdown given numpy array of timestamps, and corresponding rolling dd percentages'''
if not len(rolling_dd_dates): return pd.NaT
assert(len(rolling_dd_dates) == len(rolling_dd))
return rolling_dd_dates[np.argmax(rolling_dd)]
def compute_maxdd_start(rolling_dd_dates: np.ndarray, rolling_dd: np.ndarray, mdd_date: np.datetime64) -> np.datetime64:
'''Compute date when max drawdown starts, given numpy array of timestamps corresponding rolling dd
percentages and date of the max draw down'''
if not len(rolling_dd_dates) or pd.isnull(mdd_date): return pd.NaT
assert(len(rolling_dd_dates) == len(rolling_dd))
return rolling_dd_dates[(rolling_dd <= 0) & (rolling_dd_dates < mdd_date)][-1]
def compute_mar(returns: np.ndarray, periods_per_year: float, mdd_pct: float) -> float:
'''Compute MAR ratio, which is annualized return divided by biggest drawdown since inception.'''
if not len(returns) or np.isnan(mdd_pct) or mdd_pct == 0: return np.nan
return np.mean(returns) * periods_per_year / mdd_pct
def compute_dates_3yr(timestamps: np.ndarray) -> np.ndarray:
''' Given an array of numpy datetimes, return those that are within 3 years of the last date in the array'''
if not len(timestamps): return np.array([], dtype='M8[D]')
last_date = timestamps[-1]
d = pd.to_datetime(last_date)
start_3yr = np.datetime64(d.replace(year=d.year - 3))
return timestamps[timestamps > start_3yr]
def compute_returns_3yr(timestamps: np.ndarray, returns: np.ndarray) -> np.ndarray:
'''Given an array of numpy datetimes and an array of returns, return those that are within 3 years
of the last date in the datetime array '''
if not len(timestamps): return np.array([], dtype=np.float)
assert(len(timestamps) == len(returns))
timestamps_3yr = compute_dates_3yr(timestamps)
return returns[timestamps >= timestamps_3yr[0]]
def compute_rolling_dd_3yr(timestamps: np.ndarray, equity: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
'''Compute rolling drawdowns over the last 3 years'''
if not len(timestamps): return np.array([], dtype='M8[D]')
last_date = timestamps[-1]
d = pd.to_datetime(last_date)
start_3yr = np.datetime64(d.replace(year=d.year - 3))
equity = equity[timestamps >= start_3yr]
timestamps = timestamps[timestamps >= start_3yr]
return compute_rolling_dd(timestamps, equity)
def compute_maxdd_pct_3yr(rolling_dd_3yr: np.ndarray) -> float:
'''Compute max drawdown percentage over the last 3 years'''
return compute_maxdd_pct(rolling_dd_3yr)
def compute_maxdd_date_3yr(rolling_dd_3yr_timestamps: np.ndarray, rolling_dd_3yr: np.ndarray) -> np.datetime64:
'''Compute max drawdown date over the last 3 years'''
return compute_maxdd_date(rolling_dd_3yr_timestamps, rolling_dd_3yr)
def compute_maxdd_start_3yr(rolling_dd_3yr_timestamps: np.ndarray, rolling_dd_3yr: np.ndarray, mdd_date_3yr: np.datetime64) -> np.datetime64:
'''Comput max drawdown start date over the last 3 years'''
return compute_maxdd_start(rolling_dd_3yr_timestamps, rolling_dd_3yr, mdd_date_3yr)
def compute_calmar(returns_3yr: np.ndarray, periods_per_year: float, mdd_pct_3yr: float) -> float:
'''Compute Calmar ratio, which is the annualized return divided by max drawdown over the last 3 years'''
return compute_mar(returns_3yr, periods_per_year, mdd_pct_3yr)
def compute_bucketed_returns(timestamps: np.ndarray, returns: np.ndarray) -> Tuple[Sequence[int], Sequence[np.ndarray]]:
'''
Bucket returns by year
Returns:
A tuple with the first element being a list of years and the second a list of
numpy arrays containing returns for each corresponding year
'''
assert(len(timestamps) == len(returns))
if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float)
s = pd.Series(returns, index=timestamps)
years_list = []
rets_list = []
for year, rets in s.groupby(s.index.map(lambda x: x.year)):
years_list.append(year)
rets_list.append(rets.values)
return years_list, rets_list
def compute_annual_returns(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> Tuple[np.ndarray, np.ndarray]:
'''Takes the output of compute_bucketed_returns and returns geometric mean of returns by year
Returns:
A tuple with the first element being an array of years (integer) and the second element
an array of annualized returns for those years
'''
assert(len(timestamps) == len(returns) and periods_per_year > 0)
if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float)
df = pd.DataFrame({'ret': returns, 'timestamp': timestamps})
years = []
gmeans = []
for k, g in df.groupby(df.timestamp.map(lambda x: x.year)):
years.append(k)
gmeans.append(compute_gmean(g.timestamp.values, g.ret.values, periods_per_year))
return np.array(years), np.array(gmeans)
class Evaluator:
"""You add functions to the evaluator that are dependent on the outputs of other functions.
The evaluator will call these functions in the right order
so dependencies are computed first before the functions that need their output.
You can retrieve the output of a metric using the metric member function
>>> evaluator = Evaluator(initial_metrics={'x': np.array([1, 2, 3]), 'y': np.array([3, 4, 5])})
>>> evaluator.add_metric('z', lambda x, y: sum(x, y), dependencies=['x', 'y'])
>>> evaluator.compute()
>>> evaluator.metric('z')
array([ 9, 10, 11])
"""
def __init__(self, initial_metrics: Dict[str, Any]) -> None:
"""Inits Evaluator with a dictionary of initial metrics that are used to compute subsequent metrics
Args:
initial_metrics: a dictionary of string name -> metric. metric can be any object including a scalar,
an array or a tuple
"""
assert(type(initial_metrics) == dict)
self.metric_values: Dict[str, Any] = initial_metrics.copy()
self._metrics: MutableMapping[str, Tuple[Callable, Sequence[str]]] = {}
def add_metric(self, name: str, func: Callable, dependencies: Sequence[str]) -> None:
self._metrics[name] = (func, dependencies)
def compute(self, metric_names: Sequence[str] = None) -> None:
'''Compute metrics using the internal dependency graph
Args:
metric_names: an array of metric names. If not passed in, evaluator will compute and store all metrics
'''
if metric_names is None: metric_names = list(self._metrics.keys())
for metric_name in metric_names:
self.compute_metric(metric_name)
def compute_metric(self, metric_name: str) -> None:
'''
Compute and store a single metric:
Args:
metric_name: string representing the metric to compute
'''
func, dependencies = self._metrics[metric_name]
for dependency in dependencies:
if dependency not in self.metric_values:
self.compute_metric(dependency)
dependency_values = {k: self.metric_values[k] for k in dependencies}
values = func(**dependency_values)
self.metric_values[metric_name] = values
def metric(self, metric_name: str) -> Any:
'''Return the value of a single metric given its name'''
return self.metric_values[metric_name]
def metrics(self) -> Mapping[str, Any]:
'''Return a dictionary of metric name -> metric value'''
return self.metric_values
def handle_non_finite_returns(timestamps: np.ndarray,
rets: np.ndarray,
leading_non_finite_to_zeros: bool,
subsequent_non_finite_to_zeros: bool) -> Tuple[np.ndarray, np.ndarray]:
'''
>>> np.set_printoptions(formatter={'float': '{: .6g}'.format})
>>> timestamps = np.arange(np.datetime64('2019-01-01'), np.datetime64('2019-01-07'))
>>> rets = np.array([np.nan, np.nan, 3, 4, np.nan, 5])
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = True)
(array(['2019-01-03', '2019-01-04', '2019-01-05', '2019-01-06'], dtype='datetime64[D]'), array([ 3, 4, 0, 5]))
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = True, subsequent_non_finite_to_zeros = False)
(array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-06'], dtype='datetime64[D]'), array([ 0, 0, 3, 4, 5]))
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = False)
(array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-06'], dtype='datetime64[D]'), array([ 0, 0, 3, 4, 5]))
>>> rets = np.array([1, 2, 3, 4, 4.5, 5])
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = True)
(array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-05', '2019-01-06'],
dtype='datetime64[D]'), array([ 1, 2, 3, 4, 4.5, 5]))
'''
first_non_nan_index = np.ravel(np.nonzero(~np.isnan(rets)))
if len(first_non_nan_index):
first_non_nan_index = first_non_nan_index[0]
else:
first_non_nan_index = -1
if first_non_nan_index > 0 and first_non_nan_index < len(rets):
if leading_non_finite_to_zeros:
rets[:first_non_nan_index] = np.nan_to_num(rets[:first_non_nan_index])
else:
timestamps = timestamps[first_non_nan_index:]
rets = rets[first_non_nan_index:]
if subsequent_non_finite_to_zeros:
rets = np.nan_to_num(rets)
else:
timestamps = timestamps[np.isfinite(rets)]
rets = rets[np.isfinite(rets)]
return timestamps, rets
def compute_return_metrics(timestamps: np.ndarray,
rets: np.ndarray,
starting_equity: float,
leading_non_finite_to_zeros: bool = False,
subsequent_non_finite_to_zeros: bool = True) -> Evaluator:
'''
Compute a set of common metrics using returns (for example, of an instrument or a portfolio)
Args:
timestamps (np.array of datetime64): Timestamps for the returns
rets (nd.array of float): The returns, use 0.01 for 1%
starting_equity (float): Starting equity value in your portfolio
leading_non_finite_to_zeros (bool, optional): If set, we replace leading nan, inf, -inf returns with zeros.
For example, you may need a warmup period for moving averages. Default False
subsequent_non_finite_to_zeros (bool, optional): If set, we replace any nans that follow the first non nan value with zeros.
There may be periods where you have no prices but removing these returns would result in incorrect annualization.
Default True
Returns:
An Evaluator object containing computed metrics off the returns passed in.
If needed, you can add your own metrics to this object based on the values of existing metrics and recompute the Evaluator.
Otherwise, you can just use the output of the evaluator using the metrics function.
>>> timestamps = np.array(['2015-01-01', '2015-03-01', '2015-05-01', '2015-09-01'], dtype='M8[D]')
>>> rets = np.array([0.01, 0.02, np.nan, -0.015])
>>> starting_equity = 1.e6
>>> ev = compute_return_metrics(timestamps, rets, starting_equity)
>>> metrics = ev.metrics()
>>> assert(round(metrics['gmean'], 6) == 0.021061)
>>> assert(round(metrics['sharpe'], 6) == 0.599382)
>>> assert(all(metrics['returns_3yr'] == np.array([0.01, 0.02, 0, -0.015])))
'''
assert(starting_equity > 0.)
assert(type(rets) == np.ndarray and rets.dtype == np.float64)
assert(type(timestamps) == np.ndarray and np.issubdtype(timestamps.dtype, np.datetime64) and monotonically_increasing(timestamps))
timestamps, rets = handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros, subsequent_non_finite_to_zeros)
ev = Evaluator({'timestamps': timestamps, 'returns': rets, 'starting_equity': starting_equity})
ev.add_metric('periods_per_year', compute_periods_per_year, dependencies=['timestamps'])
ev.add_metric('amean', compute_amean, dependencies=['returns', 'periods_per_year'])
ev.add_metric('std', compute_std, dependencies=['returns'])
ev.add_metric('up_periods', lambda returns: len(returns[returns > 0]), dependencies=['returns'])
ev.add_metric('down_periods', lambda returns: len(returns[returns < 0]), dependencies=['returns'])
ev.add_metric('up_pct',
lambda up_periods, down_periods: up_periods * 1.0 / (up_periods + down_periods) if (up_periods + down_periods) != 0 else np.nan,
dependencies=['up_periods', 'down_periods'])
ev.add_metric('gmean', compute_gmean, dependencies=['timestamps', 'returns', 'periods_per_year'])
ev.add_metric('sharpe', compute_sharpe, dependencies=['returns', 'periods_per_year', 'amean'])
ev.add_metric('sortino', compute_sortino, dependencies=['returns', 'periods_per_year', 'amean'])
ev.add_metric('equity', compute_equity, dependencies=['timestamps', 'starting_equity', 'returns'])
ev.add_metric('k_ratio', compute_k_ratio, dependencies=['equity', 'periods_per_year'])
ev.add_metric('k_ratio_weighted', lambda equity, periods_per_year: compute_k_ratio(equity, periods_per_year, 3),
dependencies=['equity', 'periods_per_year'])
# Drawdowns
ev.add_metric('rolling_dd', compute_rolling_dd, dependencies=['timestamps', 'equity'])
ev.add_metric('mdd_pct', lambda rolling_dd: compute_maxdd_pct(rolling_dd[1]), dependencies=['rolling_dd'])
ev.add_metric('mdd_date', lambda rolling_dd: compute_maxdd_date(rolling_dd[0], rolling_dd[1]), dependencies=['rolling_dd'])
ev.add_metric('mdd_start', lambda rolling_dd, mdd_date: compute_maxdd_start(rolling_dd[0], rolling_dd[1], mdd_date),
dependencies=['rolling_dd', 'mdd_date'])
ev.add_metric('mar', compute_mar, dependencies=['returns', 'periods_per_year', 'mdd_pct'])
ev.add_metric('timestamps_3yr', compute_dates_3yr, dependencies=['timestamps'])
ev.add_metric('returns_3yr', compute_returns_3yr, dependencies=['timestamps', 'returns'])
ev.add_metric('rolling_dd_3yr', compute_rolling_dd_3yr, dependencies=['timestamps', 'equity'])
ev.add_metric('mdd_pct_3yr', lambda rolling_dd_3yr: compute_maxdd_pct_3yr(rolling_dd_3yr[1]), dependencies=['rolling_dd_3yr'])
ev.add_metric('mdd_date_3yr', lambda rolling_dd_3yr: compute_maxdd_date_3yr(rolling_dd_3yr[0], rolling_dd_3yr[1]),
dependencies=['rolling_dd_3yr'])
ev.add_metric('mdd_start_3yr', lambda rolling_dd_3yr, mdd_date_3yr:
compute_maxdd_start_3yr(rolling_dd_3yr[0], rolling_dd_3yr[1], mdd_date_3yr),
dependencies=['rolling_dd_3yr', 'mdd_date_3yr'])
ev.add_metric('calmar', compute_calmar, dependencies=['returns_3yr', 'periods_per_year', 'mdd_pct_3yr'])
ev.add_metric('annual_returns', compute_annual_returns, dependencies=['timestamps', 'returns', 'periods_per_year'])
ev.add_metric('bucketed_returns', compute_bucketed_returns, dependencies=['timestamps', 'returns'])
ev.compute()
return ev
def display_return_metrics(metrics: Mapping[str, Any], float_precision: int = 3) -> pd.DataFrame:
'''
Creates a dataframe making it convenient to view the output of the metrics obtained using the compute_return_metrics function.
Args:
float_precision: Change if you want to display floats with more or less significant figures than the default,
3 significant figures.
Returns:
A one row dataframe with formatted metrics.
'''
from IPython.core.display import display
_metrics = {}
cols = ['gmean', 'amean', 'std', 'shrp', 'srt', 'k', 'calmar', 'mar', 'mdd_pct', 'mdd_start', 'mdd_date', 'dd_3y_pct',
'up_periods', 'down_periods', 'up_pct', 'mdd_start_3yr', 'mdd_date_3yr']
translate = {'shrp': 'sharpe', 'srt': 'sortino', 'dd_3y_pct': 'mdd_pct_3yr', 'k': 'k_ratio'}
for col in cols:
key = col
if col in translate: key = translate[col]
_metrics[col] = metrics[key]
_metrics['mdd_dates'] = f'{str(metrics["mdd_start"])[:10]}/{str(metrics["mdd_date"])[:10]}'
_metrics['up_dwn'] = f'{metrics["up_periods"]}/{metrics["down_periods"]}/{metrics["up_pct"]:.3g}'
_metrics['dd_3y_timestamps'] = f'{str(metrics["mdd_start_3yr"])[:10]}/{str(metrics["mdd_date_3yr"])[:10]}'
years = metrics['annual_returns'][0]
ann_rets = metrics['annual_returns'][1]
for i, year in enumerate(years):
_metrics[str(year)] = ann_rets[i]
format_str = '{:.' + str(float_precision) + 'g}'
for k, v in _metrics.items():
if isinstance(v, np.float) or isinstance(v, float):
_metrics[k] = format_str.format(v)
cols = ['gmean', 'amean', 'std', 'shrp', 'srt', 'k', 'calmar', 'mar', 'mdd_pct', 'mdd_dates', 'dd_3y_pct', 'dd_3y_timestamps', 'up_dwn'] + [
str(year) for year in sorted(years)]
df = pd.DataFrame(index=[''])
for metric_name, metric_value in _metrics.items():
df.insert(0, metric_name, metric_value)
df = df[cols]
display(df)
return df
def plot_return_metrics(metrics: Mapping[str, Any], title: str = None) -> Optional[Tuple[mpl_fig.Figure, mpl.axes.Axes]]:
'''
Plot equity, rolling drawdowns and and a boxplot of annual returns given the output of compute_return_metrics.
'''
timestamps = metrics['timestamps']
equity = metrics['equity']
equity = TimeSeries('equity', timestamps=timestamps, values=equity)
mdd_date, mdd_start = metrics['mdd_start'], metrics['mdd_date']
mdd_date_3yr, mdd_start_3yr = metrics['mdd_start_3yr'], metrics['mdd_date_3yr']
drawdown_lines = [DateLine(name='max dd', date=mdd_start, color='red'),
DateLine(date=mdd_date, color='red'),
DateLine(name='3y dd', date=mdd_start_3yr, color='orange'),
DateLine(date=mdd_date_3yr, color='orange')]
equity_subplot = Subplot(equity, ylabel='Equity', height_ratio=0.6, log_y=True, y_tick_format='${x:,.0f}',
date_lines=drawdown_lines, horizontal_lines=[HorizontalLine(metrics['starting_equity'], color='black')])
rolling_dd = TimeSeries('drawdowns', timestamps=metrics['rolling_dd'][0], values=metrics['rolling_dd'][1])
zero_line = HorizontalLine(y=0, color='black')
dd_subplot = Subplot(rolling_dd, ylabel='Drawdowns', height_ratio=0.2, date_lines=drawdown_lines, horizontal_lines=[zero_line])
years = metrics['bucketed_returns'][0]
ann_rets = metrics['bucketed_returns'][1]
ann_ret = BucketedValues('annual returns', bucket_names=years, bucket_values=ann_rets)
ann_ret_subplot = Subplot(ann_ret, ylabel='Annual Returns', height_ratio=0.2, horizontal_lines=[zero_line])
plt = Plot([equity_subplot, dd_subplot, ann_ret_subplot], title=title)
return plt.draw()
def test_evaluator() -> None:
from datetime import datetime, timedelta
np.random.seed(10)
timestamps = np.arange(datetime(2018, 1, 1), datetime(2018, 3, 1), timedelta(days=1))
rets = np.random.normal(size=len(timestamps)) / 1000
starting_equity = 1.e6
ev = compute_return_metrics(timestamps, rets, starting_equity)
display_return_metrics(ev.metrics())
plot_return_metrics(ev.metrics())
assert(round(ev.metric('sharpe'), 6) == 2.932954)
assert(round(ev.metric('sortino'), 6) == 5.690878)
assert(ev.metric('annual_returns')[0] == [2018])
assert(round(ev.metric('annual_returns')[1][0], 6) == [0.063530])
assert(ev.metric('mdd_start') == np.datetime64('2018-01-19'))
assert(ev.metric('mdd_date') == np.datetime64('2018-01-22'))
if __name__ == "__main__":
test_evaluator()
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| [
"numpy.sqrt",
"IPython.core.display.display",
"numpy.log",
"math.sqrt",
"math.log",
"numpy.nanmean",
"numpy.array",
"numpy.isfinite",
"datetime.timedelta",
"pandas.to_datetime",
"datetime.datetime",
"numpy.mean",
"numpy.where",
"pyqstrat.plot.Subplot",
"numpy.exp",
"numpy.issubdtype",
... | [((1059, 1086), 'pyqstrat.pq_utils.infer_frequency', 'infer_frequency', (['timestamps'], {}), '(timestamps)\n', (1074, 1086), False, 'from pyqstrat.pq_utils import monotonically_increasing, infer_frequency\n'), ((2343, 2379), 'pyqstrat.pq_utils.monotonically_increasing', 'monotonically_increasing', (['timestamps'], {}), '(timestamps)\n', (2367, 2379), False, 'from pyqstrat.pq_utils import monotonically_increasing, infer_frequency\n'), ((3178, 3198), 'numpy.isfinite', 'np.isfinite', (['returns'], {}), '(returns)\n', (3189, 3198), True, 'import numpy as np\n'), ((3637, 3655), 'numpy.nanstd', 'np.nanstd', (['returns'], {}), '(returns)\n', (3646, 3655), True, 'import numpy as np\n'), ((4254, 4291), 'numpy.where', 'np.where', (['(returns > 0.0)', '(0.0)', 'returns'], {}), '(returns > 0.0, 0.0, returns)\n', (4262, 4291), True, 'import numpy as np\n'), ((4312, 4335), 'numpy.std', 'np.std', (['normalized_rets'], {}), '(normalized_rets)\n', (4318, 4335), True, 'import numpy as np\n'), ((5102, 5117), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (5108, 5117), True, 'import numpy as np\n'), ((6478, 6492), 'numpy.log', 'np.log', (['equity'], {}), '(equity)\n', (6484, 6492), True, 'import numpy as np\n'), ((7828, 7863), 'pandas.Series', 'pd.Series', (['equity'], {'index': 'timestamps'}), '(equity, index=timestamps)\n', (7837, 7863), True, 'import pandas as pd\n'), ((7924, 7989), 'numpy.where', 'np.where', (['(s >= rolling_max)', '(0.0)', '(-(s - rolling_max) / rolling_max)'], {}), '(s >= rolling_max, 0.0, -(s - rolling_max) / rolling_max)\n', (7932, 7989), True, 'import numpy as np\n'), ((8225, 8246), 'numpy.nanmax', 'np.nanmax', (['rolling_dd'], {}), '(rolling_dd)\n', (8234, 8246), True, 'import numpy as np\n'), ((9691, 9716), 'pandas.to_datetime', 'pd.to_datetime', (['last_date'], {}), '(last_date)\n', (9705, 9716), True, 'import pandas as pd\n'), ((10540, 10565), 'pandas.to_datetime', 'pd.to_datetime', (['last_date'], {}), '(last_date)\n', (10554, 10565), True, 'import pandas as pd\n'), ((12251, 12287), 'pandas.Series', 'pd.Series', (['returns'], {'index': 'timestamps'}), '(returns, index=timestamps)\n', (12260, 12287), True, 'import pandas as pd\n'), ((13087, 13142), 'pandas.DataFrame', 'pd.DataFrame', (["{'ret': returns, 'timestamp': timestamps}"], {}), "({'ret': returns, 'timestamp': timestamps})\n", (13099, 13142), True, 'import pandas as pd\n'), ((25970, 25994), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['']"}), "(index=[''])\n", (25982, 25994), True, 'import pandas as pd\n'), ((26129, 26140), 'IPython.core.display.display', 'display', (['df'], {}), '(df)\n', (26136, 26140), False, 'from IPython.core.display import display\n'), ((26493, 26551), 'pyqstrat.plot.TimeSeries', 'TimeSeries', (['"""equity"""'], {'timestamps': 'timestamps', 'values': 'equity'}), "('equity', timestamps=timestamps, values=equity)\n", (26503, 26551), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((27258, 27356), 'pyqstrat.plot.TimeSeries', 'TimeSeries', (['"""drawdowns"""'], {'timestamps': "metrics['rolling_dd'][0]", 'values': "metrics['rolling_dd'][1]"}), "('drawdowns', timestamps=metrics['rolling_dd'][0], values=metrics\n ['rolling_dd'][1])\n", (27268, 27356), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((27368, 27402), 'pyqstrat.plot.HorizontalLine', 'HorizontalLine', ([], {'y': '(0)', 'color': '"""black"""'}), "(y=0, color='black')\n", (27382, 27402), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((27420, 27539), 'pyqstrat.plot.Subplot', 'Subplot', (['rolling_dd'], {'ylabel': '"""Drawdowns"""', 'height_ratio': '(0.2)', 'date_lines': 'drawdown_lines', 'horizontal_lines': '[zero_line]'}), "(rolling_dd, ylabel='Drawdowns', height_ratio=0.2, date_lines=\n drawdown_lines, horizontal_lines=[zero_line])\n", (27427, 27539), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((27643, 27719), 'pyqstrat.plot.BucketedValues', 'BucketedValues', (['"""annual returns"""'], {'bucket_names': 'years', 'bucket_values': 'ann_rets'}), "('annual returns', bucket_names=years, bucket_values=ann_rets)\n", (27657, 27719), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((27742, 27835), 'pyqstrat.plot.Subplot', 'Subplot', (['ann_ret'], {'ylabel': '"""Annual Returns"""', 'height_ratio': '(0.2)', 'horizontal_lines': '[zero_line]'}), "(ann_ret, ylabel='Annual Returns', height_ratio=0.2,\n horizontal_lines=[zero_line])\n", (27749, 27835), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((27847, 27911), 'pyqstrat.plot.Plot', 'Plot', (['[equity_subplot, dd_subplot, ann_ret_subplot]'], {'title': 'title'}), '([equity_subplot, dd_subplot, ann_ret_subplot], title=title)\n', (27851, 27911), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((28016, 28034), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (28030, 28034), True, 'import numpy as np\n'), ((28805, 28862), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.NORMALIZE_WHITESPACE'}), '(optionflags=doctest.NORMALIZE_WHITESPACE)\n', (28820, 28862), False, 'import doctest\n'), ((1564, 1583), 'numpy.nanmean', 'np.nanmean', (['returns'], {}), '(returns)\n', (1574, 1583), True, 'import numpy as np\n'), ((3401, 3435), 'numpy.power', 'np.power', (['g_mean', 'periods_per_year'], {}), '(g_mean, periods_per_year)\n', (3409, 3435), True, 'import numpy as np\n'), ((6444, 6463), 'numpy.isfinite', 'np.isfinite', (['equity'], {}), '(equity)\n', (6455, 6463), True, 'import numpy as np\n'), ((6699, 6712), 'numpy.exp', 'np.exp', (['(k * t)'], {}), '(k * t)\n', (6705, 6712), True, 'import numpy as np\n'), ((7385, 7410), 'numpy.cumprod', 'np.cumprod', (['(1.0 + returns)'], {}), '(1.0 + returns)\n', (7395, 7410), True, 'import numpy as np\n'), ((8579, 8600), 'numpy.argmax', 'np.argmax', (['rolling_dd'], {}), '(rolling_dd)\n', (8588, 8600), True, 'import numpy as np\n'), ((8918, 8937), 'pandas.isnull', 'pd.isnull', (['mdd_date'], {}), '(mdd_date)\n', (8927, 8937), True, 'import pandas as pd\n'), ((9307, 9324), 'numpy.isnan', 'np.isnan', (['mdd_pct'], {}), '(mdd_pct)\n', (9315, 9324), True, 'import numpy as np\n'), ((9624, 9651), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""M8[D]"""'}), "([], dtype='M8[D]')\n", (9632, 9651), True, 'import numpy as np\n'), ((10097, 10125), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (10105, 10125), True, 'import numpy as np\n'), ((10473, 10500), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""M8[D]"""'}), "([], dtype='M8[D]')\n", (10481, 10500), True, 'import numpy as np\n'), ((13362, 13377), 'numpy.array', 'np.array', (['years'], {}), '(years)\n', (13370, 13377), True, 'import numpy as np\n'), ((13379, 13395), 'numpy.array', 'np.array', (['gmeans'], {}), '(gmeans)\n', (13387, 13395), True, 'import numpy as np\n'), ((18299, 18318), 'numpy.nan_to_num', 'np.nan_to_num', (['rets'], {}), '(rets)\n', (18312, 18318), True, 'import numpy as np\n'), ((20564, 20610), 'numpy.issubdtype', 'np.issubdtype', (['timestamps.dtype', 'np.datetime64'], {}), '(timestamps.dtype, np.datetime64)\n', (20577, 20610), True, 'import numpy as np\n'), ((20615, 20651), 'pyqstrat.pq_utils.monotonically_increasing', 'monotonically_increasing', (['timestamps'], {}), '(timestamps)\n', (20639, 20651), False, 'from pyqstrat.pq_utils import monotonically_increasing, infer_frequency\n'), ((26726, 26778), 'pyqstrat.plot.DateLine', 'DateLine', ([], {'name': '"""max dd"""', 'date': 'mdd_start', 'color': '"""red"""'}), "(name='max dd', date=mdd_start, color='red')\n", (26734, 26778), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((26802, 26838), 'pyqstrat.plot.DateLine', 'DateLine', ([], {'date': 'mdd_date', 'color': '"""red"""'}), "(date=mdd_date, color='red')\n", (26810, 26838), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((26862, 26920), 'pyqstrat.plot.DateLine', 'DateLine', ([], {'name': '"""3y dd"""', 'date': 'mdd_start_3yr', 'color': '"""orange"""'}), "(name='3y dd', date=mdd_start_3yr, color='orange')\n", (26870, 26920), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((26944, 26987), 'pyqstrat.plot.DateLine', 'DateLine', ([], {'date': 'mdd_date_3yr', 'color': '"""orange"""'}), "(date=mdd_date_3yr, color='orange')\n", (26952, 26987), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((28062, 28082), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (28070, 28082), False, 'from datetime import datetime, timedelta\n'), ((28084, 28104), 'datetime.datetime', 'datetime', (['(2018)', '(3)', '(1)'], {}), '(2018, 3, 1)\n', (28092, 28104), False, 'from datetime import datetime, timedelta\n'), ((28106, 28123), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (28115, 28123), False, 'from datetime import datetime, timedelta\n'), ((28634, 28661), 'numpy.datetime64', 'np.datetime64', (['"""2018-01-19"""'], {}), "('2018-01-19')\n", (28647, 28661), True, 'import numpy as np\n'), ((28699, 28726), 'numpy.datetime64', 'np.datetime64', (['"""2018-01-22"""'], {}), "('2018-01-22')\n", (28712, 28726), True, 'import numpy as np\n'), ((4111, 4129), 'numpy.isfinite', 'np.isfinite', (['amean'], {}), '(amean)\n', (4122, 4129), True, 'import numpy as np\n'), ((4195, 4215), 'numpy.isfinite', 'np.isfinite', (['returns'], {}), '(returns)\n', (4206, 4215), True, 'import numpy as np\n'), ((4973, 4991), 'numpy.isfinite', 'np.isfinite', (['amean'], {}), '(amean)\n', (4984, 4991), True, 'import numpy as np\n'), ((5057, 5077), 'numpy.isfinite', 'np.isfinite', (['returns'], {}), '(returns)\n', (5068, 5077), True, 'import numpy as np\n'), ((6612, 6625), 'math.log', 'math.log', (['(0.5)'], {}), '(0.5)\n', (6620, 6625), False, 'import math\n'), ((7761, 7789), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""M8[ns]"""'}), "([], dtype='M8[ns]')\n", (7769, 7789), True, 'import numpy as np\n'), ((7791, 7819), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (7799, 7819), True, 'import numpy as np\n'), ((9367, 9383), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (9374, 9383), True, 'import numpy as np\n'), ((12186, 12212), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.str'}), '([], dtype=np.str)\n', (12194, 12212), True, 'import numpy as np\n'), ((12214, 12242), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (12222, 12242), True, 'import numpy as np\n'), ((13021, 13047), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.str'}), '([], dtype=np.str)\n', (13029, 13047), True, 'import numpy as np\n'), ((13049, 13077), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (13057, 13077), True, 'import numpy as np\n'), ((18080, 18121), 'numpy.nan_to_num', 'np.nan_to_num', (['rets[:first_non_nan_index]'], {}), '(rets[:first_non_nan_index])\n', (18093, 18121), True, 'import numpy as np\n'), ((18361, 18378), 'numpy.isfinite', 'np.isfinite', (['rets'], {}), '(rets)\n', (18372, 18378), True, 'import numpy as np\n'), ((18400, 18417), 'numpy.isfinite', 'np.isfinite', (['rets'], {}), '(rets)\n', (18411, 18417), True, 'import numpy as np\n'), ((4409, 4434), 'numpy.sqrt', 'np.sqrt', (['periods_per_year'], {}), '(periods_per_year)\n', (4416, 4434), True, 'import numpy as np\n'), ((5166, 5191), 'numpy.sqrt', 'np.sqrt', (['periods_per_year'], {}), '(periods_per_year)\n', (5173, 5191), True, 'import numpy as np\n'), ((6808, 6887), 'statsmodels.regression.linear_model.WLS', 'sm.regression.linear_model.WLS', ([], {'endog': 'equity', 'exog': 't', 'weights': 'w', 'hasconst': '(False)'}), '(endog=equity, exog=t, weights=w, hasconst=False)\n', (6838, 6887), True, 'import statsmodels as sm\n'), ((7072, 7099), 'math.sqrt', 'math.sqrt', (['periods_per_year'], {}), '(periods_per_year)\n', (7081, 7099), False, 'import math\n'), ((17780, 17794), 'numpy.isnan', 'np.isnan', (['rets'], {}), '(rets)\n', (17788, 17794), True, 'import numpy as np\n'), ((27175, 27232), 'pyqstrat.plot.HorizontalLine', 'HorizontalLine', (["metrics['starting_equity']"], {'color': '"""black"""'}), "(metrics['starting_equity'], color='black')\n", (27189, 27232), False, 'from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot\n'), ((2440, 2462), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (2454, 2462), True, 'import numpy as np\n')] |
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
Provides some tools related to PDEs.
Currently includes:
- Projector - to project a discontinuous function onto a continuous function
- Locator - to trace values in data objects at a certain location
- TimeIntegrationManager - to handle extrapolation in time
- SaddlePointProblem - solver for Saddle point problems using the inexact uszawa scheme
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
from . import escriptcpp as escore
from . import linearPDEs
from . import util
import math
import numpy
class TimeIntegrationManager(object):
"""
A simple mechanism to manage time dependend values.
Typical usage is::
dt=0.1 # time increment
tm=TimeIntegrationManager(inital_value,p=1)
while t<1.
v_guess=tm.extrapolate(dt) # extrapolate to t+dt
v=...
tm.checkin(dt,v)
t+=dt
:note: currently only p=1 is supported.
"""
def __init__(self,*inital_values,**kwargs):
"""
Sets up the value manager where ``inital_values`` are the initial values
and p is the order used for extrapolation.
"""
if "p" in kwargs:
self.__p=kwargs["p"]
else:
self.__p=1
if "time" in kwargs:
self.__t=kwargs["time"]
else:
self.__t=0.
self.__v_mem=[inital_values]
self.__order=0
self.__dt_mem=[]
self.__num_val=len(inital_values)
def getTime(self):
return self.__t
def getValue(self):
out=self.__v_mem[0]
if len(out)==1:
return out[0]
else:
return out
def checkin(self,dt,*values):
"""
Adds new values to the manager. The p+1 last values are lost.
"""
o=min(self.__order+1,self.__p)
self.__order=min(self.__order+1,self.__p)
v_mem_new=[values]
dt_mem_new=[dt]
for i in range(o-1):
v_mem_new.append(self.__v_mem[i])
dt_mem_new.append(self.__dt_mem[i])
v_mem_new.append(self.__v_mem[o-1])
self.__order=o
self.__v_mem=v_mem_new
self.__dt_mem=dt_mem_new
self.__t+=dt
def extrapolate(self,dt):
"""
Extrapolates to ``dt`` forward in time.
"""
if self.__order==0:
out=self.__v_mem[0]
else:
out=[]
for i in range(self.__num_val):
out.append((1.+dt/self.__dt_mem[0])*self.__v_mem[0][i]-dt/self.__dt_mem[0]*self.__v_mem[1][i])
if len(out)==0:
return None
elif len(out)==1:
return out[0]
else:
return out
class Projector(object):
"""
The Projector is a factory which projects a discontinuous function onto a
continuous function on a given domain.
"""
def __init__(self, domain, reduce=True, fast=True):
"""
Creates a continuous function space projector for a domain.
:param domain: Domain of the projection.
:param reduce: Flag to reduce projection order
:param fast: Flag to use a fast method based on matrix lumping
"""
self.__pde = linearPDEs.LinearPDE(domain)
if fast:
self.__pde.getSolverOptions().setSolverMethod(linearPDEs.SolverOptions.LUMPING)
self.__pde.setSymmetryOn()
self.__pde.setReducedOrderTo(reduce)
self.__pde.setValue(D = 1.)
return
def getSolverOptions(self):
"""
Returns the solver options of the PDE solver.
:rtype: `linearPDEs.SolverOptions`
"""
return self.__pde.getSolverOptions()
def getValue(self, input_data):
"""
Projects ``input_data`` onto a continuous function.
:param input_data: the data to be projected
"""
return self(input_data)
def __call__(self, input_data):
"""
Projects ``input_data`` onto a continuous function.
:param input_data: the data to be projected
"""
out=escore.Data(0.,input_data.getShape(),self.__pde.getFunctionSpaceForSolution())
self.__pde.setValue(Y = escore.Data(), Y_reduced = escore.Data())
if input_data.getRank()==0:
self.__pde.setValue(Y = input_data)
out=self.__pde.getSolution()
elif input_data.getRank()==1:
for i0 in range(input_data.getShape()[0]):
self.__pde.setValue(Y = input_data[i0])
out[i0]=self.__pde.getSolution()
elif input_data.getRank()==2:
for i0 in range(input_data.getShape()[0]):
for i1 in range(input_data.getShape()[1]):
self.__pde.setValue(Y = input_data[i0,i1])
out[i0,i1]=self.__pde.getSolution()
elif input_data.getRank()==3:
for i0 in range(input_data.getShape()[0]):
for i1 in range(input_data.getShape()[1]):
for i2 in range(input_data.getShape()[2]):
self.__pde.setValue(Y = input_data[i0,i1,i2])
out[i0,i1,i2]=self.__pde.getSolution()
else:
for i0 in range(input_data.getShape()[0]):
for i1 in range(input_data.getShape()[1]):
for i2 in range(input_data.getShape()[2]):
for i3 in range(input_data.getShape()[3]):
self.__pde.setValue(Y = input_data[i0,i1,i2,i3])
out[i0,i1,i2,i3]=self.__pde.getSolution()
return out
class NoPDE(object):
"""
Solves the following problem for u:
*kronecker[i,j]*D[j]*u[j]=Y[i]*
with constraint
*u[j]=r[j]* where *q[j]>0*
where *D*, *Y*, *r* and *q* are given functions of rank 1.
In the case of scalars this takes the form
*D*u=Y*
with constraint
*u=r* where *q>0*
where *D*, *Y*, *r* and *q* are given scalar functions.
The constraint overwrites any other condition.
:note: This class is similar to the `linearPDEs.LinearPDE` class with
A=B=C=X=0 but has the intention that all input parameters are given
in `Solution` or `ReducedSolution`.
"""
# The whole thing is a bit strange and I blame <NAME> (CSIRO) for
# this.
def __init__(self,domain,D=None,Y=None,q=None,r=None):
"""
Initializes the problem.
:param domain: domain of the PDE
:type domain: `Domain`
:param D: coefficient of the solution
:type D: ``float``, ``int``, ``numpy.ndarray``, `Data`
:param Y: right hand side
:type Y: ``float``, ``int``, ``numpy.ndarray``, `Data`
:param q: location of constraints
:type q: ``float``, ``int``, ``numpy.ndarray``, `Data`
:param r: value of solution at locations of constraints
:type r: ``float``, ``int``, ``numpy.ndarray``, `Data`
"""
self.__domain=domain
self.__D=D
self.__Y=Y
self.__q=q
self.__r=r
self.__u=None
self.__function_space=escore.Solution(self.__domain)
def setReducedOn(self):
"""
Sets the `FunctionSpace` of the solution to `ReducedSolution`.
"""
self.__function_space=escore.ReducedSolution(self.__domain)
self.__u=None
def setReducedOff(self):
"""
Sets the `FunctionSpace` of the solution to `Solution`.
"""
self.__function_space=escore.Solution(self.__domain)
self.__u=None
def setValue(self,D=None,Y=None,q=None,r=None):
"""
Assigns values to the parameters.
:param D: coefficient of the solution
:type D: ``float``, ``int``, ``numpy.ndarray``, `Data`
:param Y: right hand side
:type Y: ``float``, ``int``, ``numpy.ndarray``, `Data`
:param q: location of constraints
:type q: ``float``, ``int``, ``numpy.ndarray``, `Data`
:param r: value of solution at locations of constraints
:type r: ``float``, ``int``, ``numpy.ndarray``, `Data`
"""
if not D is None:
self.__D=D
self.__u=None
if not Y is None:
self.__Y=Y
self.__u=None
if not q is None:
self.__q=q
self.__u=None
if not r is None:
self.__r=r
self.__u=None
def getSolution(self):
"""
Returns the solution.
:return: the solution of the problem
:rtype: `Data` object in the `FunctionSpace` `Solution` or
`ReducedSolution`
"""
if self.__u is None:
if self.__D is None:
raise ValueError("coefficient D is undefined")
D=escore.Data(self.__D,self.__function_space)
if D.getRank()>1:
raise ValueError("coefficient D must have rank 0 or 1")
if self.__Y is None:
self.__u=escore.Data(0.,D.getShape(),self.__function_space)
else:
self.__u=1./D*self.__Y
if not self.__q is None:
q=util.wherePositive(escore.Data(self.__q,self.__function_space))
self.__u*=(1.-q)
if not self.__r is None: self.__u+=q*self.__r
return self.__u
class Locator(object):
"""
Locator provides access to the values of data objects at a given spatial
coordinate x.
In fact, a Locator object finds the sample in the set of samples of a
given function space or domain which is closest to the given point x.
"""
def __init__(self,where,x=numpy.zeros((3,))):
"""
Initializes a Locator to access values in Data objects on the Doamin
or FunctionSpace for the sample point which is closest to the given
point x.
:param where: function space
:type where: `escript.FunctionSpace`
:param x: location(s) of the Locator
:type x: ``numpy.ndarray`` or ``list`` of ``numpy.ndarray``
"""
if isinstance(where,escore.FunctionSpace):
self.__function_space=where
else:
self.__function_space=escore.ContinuousFunction(where)
iterative=False
if isinstance(x, list):
if len(x)==0:
raise ValueError("At least one point must be given.")
try:
iter(x[0])
iterative=True
except TypeError:
iterative=False
xxx=self.__function_space.getX()
if iterative:
self.__id=[]
for p in x:
self.__id.append(util.length(xxx-p[:self.__function_space.getDim()]).internal_minGlobalDataPoint())
else:
self.__id=util.length(xxx-x[:self.__function_space.getDim()]).internal_minGlobalDataPoint()
def __str__(self):
"""
Returns the coordinates of the Locator as a string.
"""
x=self.getX()
if isinstance(x,list):
out="["
first=True
for xx in x:
if not first:
out+=","
else:
first=False
out+=str(xx)
out+="]>"
else:
out=str(x)
return out
def getX(self):
"""
Returns the exact coordinates of the Locator.
"""
return self(self.getFunctionSpace().getX())
def getFunctionSpace(self):
"""
Returns the function space of the Locator.
"""
return self.__function_space
def getId(self,item=None):
"""
Returns the identifier of the location.
"""
if item is None:
return self.__id
else:
if isinstance(self.__id,list):
return self.__id[item]
else:
return self.__id
def __call__(self,data):
"""
Returns the value of data at the Locator of a Data object.
"""
return self.getValue(data)
def getValue(self,data):
"""
Returns the value of ``data`` at the Locator if ``data`` is a `Data`
object otherwise the object is returned.
"""
if isinstance(data,escore.Data):
dat=util.interpolate(data,self.getFunctionSpace())
ii=self.getId()
r=data.getRank()
if isinstance(ii,list):
out=[]
for i in ii:
o=numpy.array(dat.getTupleForGlobalDataPoint(*i))
if data.getRank()==0:
out.append(o[0])
else:
out.append(o)
return out
else:
out=numpy.array(dat.getTupleForGlobalDataPoint(*ii))
if data.getRank()==0:
return out[0]
else:
return out
else:
return data
def setValue(self, data, v):
"""
Sets the value of the ``data`` at the Locator.
"""
if isinstance(data, escore.Data):
if data.getFunctionSpace()!=self.getFunctionSpace():
raise TypeError("setValue: FunctionSpace of Locator and Data object must match.")
data.expand()
ii=self.getId()
if isinstance(ii, list):
for i in id:
data._setTupleForGlobalDataPoint(i[1], i[0], v)
else:
data._setTupleForGlobalDataPoint(ii[1], ii[0], v)
else:
raise TypeError("setValue: Invalid argument type.")
def getInfLocator(arg):
"""
Return a Locator for a point with the inf value over all arg.
"""
if not isinstance(arg, escore.Data):
raise TypeError("getInfLocator: Unknown argument type.")
a_inf=util.inf(arg)
loc=util.length(arg-a_inf).internal_minGlobalDataPoint() # This gives us the location but not coords
x=arg.getFunctionSpace().getX()
x_min=x.getTupleForGlobalDataPoint(*loc)
return Locator(arg.getFunctionSpace(),x_min)
def getSupLocator(arg):
"""
Return a Locator for a point with the sup value over all arg.
"""
if not isinstance(arg, escore.Data):
raise TypeError("getSupLocator: Unknown argument type.")
a_inf=util.sup(arg)
loc=util.length(arg-a_inf).internal_minGlobalDataPoint() # This gives us the location but not coords
x=arg.getFunctionSpace().getX()
x_min=x.getTupleForGlobalDataPoint(*loc)
return Locator(arg.getFunctionSpace(),x_min)
class SolverSchemeException(Exception):
"""
This is a generic exception thrown by solvers.
"""
pass
class IndefinitePreconditioner(SolverSchemeException):
"""
Exception thrown if the preconditioner is not positive definite.
"""
pass
class MaxIterReached(SolverSchemeException):
"""
Exception thrown if the maximum number of iteration steps is reached.
"""
pass
class CorrectionFailed(SolverSchemeException):
"""
Exception thrown if no convergence has been achieved in the solution
correction scheme.
"""
pass
class IterationBreakDown(SolverSchemeException):
"""
Exception thrown if the iteration scheme encountered an incurable breakdown.
"""
pass
class NegativeNorm(SolverSchemeException):
"""
Exception thrown if a norm calculation returns a negative norm.
"""
pass
def PCG(r, Aprod, x, Msolve, bilinearform, atol=0, rtol=1.e-8, iter_max=100, initial_guess=True, verbose=False):
"""
Solver for
*Ax=b*
with a symmetric and positive definite operator A (more details required!).
It uses the conjugate gradient method with preconditioner M providing an
approximation of A.
The iteration is terminated if
*|r| <= atol+rtol*|r0|*
where *r0* is the initial residual and *|.|* is the energy norm. In fact
*|r| = sqrt( bilinearform(Msolve(r),r))*
For details on the preconditioned conjugate gradient method see the book:
"Templates for the Solution of Linear Systems by <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>".
:param r: initial residual *r=b-Ax*. ``r`` is altered.
:type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
:param x: an initial guess for the solution
:type x: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
:param Aprod: returns the value Ax
:type Aprod: function ``Aprod(x)`` where ``x`` is of the same object like
argument ``x``. The returned object needs to be of the same type
like argument ``r``.
:param Msolve: solves Mx=r
:type Msolve: function ``Msolve(r)`` where ``r`` is of the same type like
argument ``r``. The returned object needs to be of the same
type like argument ``x``.
:param bilinearform: inner product ``<x,r>``
:type bilinearform: function ``bilinearform(x,r)`` where ``x`` is of the same
type like argument ``x`` and ``r`` is. The returned value
is a ``float``.
:param atol: absolute tolerance
:type atol: non-negative ``float``
:param rtol: relative tolerance
:type rtol: non-negative ``float``
:param iter_max: maximum number of iteration steps
:type iter_max: ``int``
:return: the solution approximation and the corresponding residual
:rtype: ``tuple``
:warning: ``r`` and ``x`` are altered.
"""
iter=0
rhat=Msolve(r)
d = rhat
rhat_dot_r = bilinearform(rhat, r)
if rhat_dot_r<0: raise NegativeNorm("negative norm.")
norm_r0=math.sqrt(rhat_dot_r)
atol2=atol+rtol*norm_r0
if atol2<=0:
raise ValueError("Non-positive tolerance.")
atol2=max(atol2, 100. * util.EPSILON * norm_r0)
if verbose: print(("PCG: initial residual norm = %e (absolute tolerance = %e)"%(norm_r0, atol2)))
while not math.sqrt(rhat_dot_r) <= atol2:
iter+=1
if iter >= iter_max: raise MaxIterReached("maximum number of %s steps reached."%iter_max)
q=Aprod(d)
alpha = rhat_dot_r / bilinearform(d, q)
x += alpha * d
if isinstance(q,ArithmeticTuple):
r += q * (-alpha) # Doing it the other way calls the float64.__mul__ not AT.__rmul__
else:
r += (-alpha) * q
rhat=Msolve(r)
rhat_dot_r_new = bilinearform(rhat, r)
beta = rhat_dot_r_new / rhat_dot_r
rhat+=beta * d
d=rhat
rhat_dot_r = rhat_dot_r_new
if rhat_dot_r<0: raise NegativeNorm("negative norm.")
if verbose: print(("PCG: iteration step %s: residual norm = %e"%(iter, math.sqrt(rhat_dot_r))))
if verbose: print(("PCG: tolerance reached after %s steps."%iter))
return x,r,math.sqrt(rhat_dot_r)
class Defect(object):
"""
Defines a non-linear defect F(x) of a variable x. This class includes
two functions (bilinearform and eval) that must be overridden by subclassing
before use.
"""
def __init__(self):
"""
Initializes defect.
"""
self.setDerivativeIncrementLength()
def bilinearform(self, x0, x1):
"""
Returns the inner product of x0 and x1
NOTE: MUST BE OVERRIDDEN BY A SUBCLASS
:param x0: value for x0
:param x1: value for x1
:return: the inner product of x0 and x1
:rtype: ``float``
"""
raise NotImplementedError("Defect bilinearform method not overridden")
def norm(self,x):
"""
Returns the norm of argument ``x``.
:param x: a value
:return: norm of argument x
:rtype: ``float``
:note: by default ``sqrt(self.bilinearform(x,x)`` is returned.
"""
s=self.bilinearform(x,x)
if s<0: raise NegativeNorm("negative norm.")
return math.sqrt(s)
def eval(self,x):
"""
Returns the value F of a given ``x``.
NOTE: MUST BE OVERRIDDEN BY A SUBCLASS
:param x: value for which the defect ``F`` is evaluated
:return: value of the defect at ``x``
"""
raise NotImplementedError("Defect eval() method not overridden")
def __call__(self,x):
return self.eval(x)
def setDerivativeIncrementLength(self,inc=1000.*math.sqrt(util.EPSILON)):
"""
Sets the relative length of the increment used to approximate the
derivative of the defect. The increment is inc*norm(x)/norm(v)*v in the
direction of v with x as a starting point.
:param inc: relative increment length
:type inc: positive ``float``
"""
if inc<=0: raise ValueError("positive increment required.")
self.__inc=inc
def getDerivativeIncrementLength(self):
"""
Returns the relative increment length used to approximate the
derivative of the defect.
:return: value of the defect at ``x``
:rtype: positive ``float``
"""
return self.__inc
def derivative(self, F0, x0, v, v_is_normalised=True):
"""
Returns the directional derivative at ``x0`` in the direction of ``v``.
:param F0: value of this defect at x0
:param x0: value at which derivative is calculated
:param v: direction
:param v_is_normalised: True to indicate that ``v`` is nomalized
(self.norm(v)=0)
:return: derivative of this defect at x0 in the direction of ``v``
:note: by default numerical evaluation (self.eval(x0+eps*v)-F0)/eps is
used but this method maybe overwritten to use exact evaluation.
"""
normx=self.norm(x0)
if normx>0:
epsnew = self.getDerivativeIncrementLength() * normx
else:
epsnew = self.getDerivativeIncrementLength()
if not v_is_normalised:
normv=self.norm(v)
if normv<=0:
return F0*0
else:
epsnew /= normv
F1=self.eval(x0 + epsnew * v)
return (F1-F0)/epsnew
######################################
def NewtonGMRES(defect, x, iter_max=100, sub_iter_max=20, atol=0,rtol=1.e-4, subtol_max=0.5, gamma=0.9, verbose=False):
"""
Solves a non-linear problem *F(x)=0* for unknown *x* using the stopping
criterion:
*norm(F(x) <= atol + rtol * norm(F(x0)*
where *x0* is the initial guess.
:param defect: object defining the function *F*. ``defect.norm`` defines the
*norm* used in the stopping criterion.
:type defect: `Defect`
:param x: initial guess for the solution, ``x`` is altered.
:type x: any object type allowing basic operations such as
``numpy.ndarray``, `Data`
:param iter_max: maximum number of iteration steps
:type iter_max: positive ``int``
:param sub_iter_max: maximum number of inner iteration steps
:type sub_iter_max: positive ``int``
:param atol: absolute tolerance for the solution
:type atol: positive ``float``
:param rtol: relative tolerance for the solution
:type rtol: positive ``float``
:param gamma: tolerance safety factor for inner iteration
:type gamma: positive ``float``, less than 1
:param subtol_max: upper bound for inner tolerance
:type subtol_max: positive ``float``, less than 1
:return: an approximation of the solution with the desired accuracy
:rtype: same type as the initial guess
"""
lmaxit=iter_max
if atol<0: raise ValueError("atol needs to be non-negative.")
if rtol<0: raise ValueError("rtol needs to be non-negative.")
if rtol+atol<=0: raise ValueError("rtol or atol needs to be non-negative.")
if gamma<=0 or gamma>=1: raise ValueError("tolerance safety factor for inner iteration (gamma =%s) needs to be positive and less than 1."%gamma)
if subtol_max<=0 or subtol_max>=1: raise ValueError("upper bound for inner tolerance for inner iteration (subtol_max =%s) needs to be positive and less than 1."%subtol_max)
F=defect(x)
fnrm=defect.norm(F)
stop_tol=atol + rtol*fnrm
subtol=subtol_max
if verbose: print(("NewtonGMRES: initial residual = %e."%fnrm))
if verbose: print((" tolerance = %e."%subtol))
iter=1
#
# main iteration loop
#
while not fnrm<=stop_tol:
if iter >= iter_max: raise MaxIterReached("maximum number of %s steps reached."%iter_max)
#
# adjust subtol_
#
if iter > 1:
rat=fnrm/fnrmo
subtol_old=subtol
subtol=gamma*rat**2
if gamma*subtol_old**2 > .1: subtol=max(subtol,gamma*subtol_old**2)
subtol=max(min(subtol,subtol_max), .5*stop_tol/fnrm)
#
# calculate newton increment xc
# if iter_max in __FDGMRES is reached MaxIterReached is thrown
# if iter_restart -1 is returned as sub_iter
# if atol is reached sub_iter returns the numer of steps performed to get there
#
#
if verbose: print((" subiteration (GMRES) is called with relative tolerance %e."%subtol))
try:
xc, sub_iter=__FDGMRES(F, defect, x, subtol*fnrm, iter_max=iter_max-iter, iter_restart=sub_iter_max)
except MaxIterReached:
raise MaxIterReached("maximum number of %s steps reached."%iter_max)
if sub_iter<0:
iter+=sub_iter_max
else:
iter+=sub_iter
# ====
x+=xc
F=defect(x)
iter+=1
fnrmo, fnrm=fnrm, defect.norm(F)
if verbose: print((" step %s: residual %e."%(iter,fnrm)))
if verbose: print(("NewtonGMRES: completed after %s steps."%iter))
return x
def __givapp(c,s,vin):
"""
Applies a sequence of Givens rotations (c,s) recursively to the vector
``vin``
:warning: ``vin`` is altered.
"""
vrot=vin
if isinstance(c,float):
vrot=[c*vrot[0]-s*vrot[1],s*vrot[0]+c*vrot[1]]
else:
for i in range(len(c)):
w1=c[i]*vrot[i]-s[i]*vrot[i+1]
w2=s[i]*vrot[i]+c[i]*vrot[i+1]
vrot[i]=w1
vrot[i+1]=w2
return vrot
def __FDGMRES(F0, defect, x0, atol, iter_max=100, iter_restart=20):
h=numpy.zeros((iter_restart,iter_restart),numpy.float64)
c=numpy.zeros(iter_restart,numpy.float64)
s=numpy.zeros(iter_restart,numpy.float64)
g=numpy.zeros(iter_restart,numpy.float64)
v=[]
rho=defect.norm(F0)
if rho<=0.: return x0*0
v.append(-F0/rho)
g[0]=rho
iter=0
while rho > atol and iter<iter_restart-1:
if iter >= iter_max:
raise MaxIterReached("maximum number of %s steps reached."%iter_max)
p=defect.derivative(F0,x0,v[iter], v_is_normalised=True)
v.append(p)
v_norm1=defect.norm(v[iter+1])
# Modified Gram-Schmidt
for j in range(iter+1):
h[j,iter]=defect.bilinearform(v[j],v[iter+1])
v[iter+1]-=h[j,iter]*v[j]
h[iter+1,iter]=defect.norm(v[iter+1])
v_norm2=h[iter+1,iter]
# Reorthogonalize if needed
if v_norm1 + 0.001*v_norm2 == v_norm1: #Brown/Hindmarsh condition (default)
for j in range(iter+1):
hr=defect.bilinearform(v[j],v[iter+1])
h[j,iter]=h[j,iter]+hr
v[iter+1] -= hr*v[j]
v_norm2=defect.norm(v[iter+1])
h[iter+1,iter]=v_norm2
# watch out for happy breakdown
if not v_norm2 == 0:
v[iter+1]=v[iter+1]/h[iter+1,iter]
# Form and store the information for the new Givens rotation
if iter > 0 :
hhat=numpy.zeros(iter+1,numpy.float64)
for i in range(iter+1) : hhat[i]=h[i,iter]
hhat=__givapp(c[0:iter],s[0:iter],hhat);
for i in range(iter+1) : h[i,iter]=hhat[i]
mu=math.sqrt(h[iter,iter]*h[iter,iter]+h[iter+1,iter]*h[iter+1,iter])
if mu!=0 :
c[iter]=h[iter,iter]/mu
s[iter]=-h[iter+1,iter]/mu
h[iter,iter]=c[iter]*h[iter,iter]-s[iter]*h[iter+1,iter]
h[iter+1,iter]=0.0
gg=__givapp(c[iter],s[iter],[g[iter],g[iter+1]])
g[iter]=gg[0]
g[iter+1]=gg[1]
# Update the residual norm
rho=abs(g[iter+1])
iter+=1
# At this point either iter > iter_max or rho < tol.
# It's time to compute x and leave.
if iter > 0 :
y=numpy.zeros(iter,numpy.float64)
y[iter-1] = g[iter-1] / h[iter-1,iter-1]
if iter > 1 :
i=iter-2
while i>=0 :
y[i] = ( g[i] - numpy.dot(h[i,i+1:iter], y[i+1:iter])) / h[i,i]
i=i-1
xhat=v[iter-1]*y[iter-1]
for i in range(iter-1):
xhat += v[i]*y[i]
else :
xhat=v[0] * 0
if iter<iter_restart-1:
stopped=iter
else:
stopped=-1
return xhat,stopped
def GMRES(r, Aprod, x, bilinearform, atol=0, rtol=1.e-8, iter_max=100, iter_restart=20, verbose=False,P_R=None):
"""
Solver for
*Ax=b*
with a general operator A (more details required!).
It uses the generalized minimum residual method (GMRES).
The iteration is terminated if
*|r| <= atol+rtol*|r0|*
where *r0* is the initial residual and *|.|* is the energy norm. In fact
*|r| = sqrt( bilinearform(r,r))*
:param r: initial residual *r=b-Ax*. ``r`` is altered.
:type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
:param x: an initial guess for the solution
:type x: same like ``r``
:param Aprod: returns the value Ax
:type Aprod: function ``Aprod(x)`` where ``x`` is of the same object like
argument ``x``. The returned object needs to be of the same
type like argument ``r``.
:param bilinearform: inner product ``<x,r>``
:type bilinearform: function ``bilinearform(x,r)`` where ``x`` is of the same
type like argument ``x`` and ``r``. The returned value is
a ``float``.
:param atol: absolute tolerance
:type atol: non-negative ``float``
:param rtol: relative tolerance
:type rtol: non-negative ``float``
:param iter_max: maximum number of iteration steps
:type iter_max: ``int``
:param iter_restart: in order to save memory the orthogonalization process
is terminated after ``iter_restart`` steps and the
iteration is restarted.
:type iter_restart: ``int``
:return: the solution approximation and the corresponding residual
:rtype: ``tuple``
:warning: ``r`` and ``x`` are altered.
"""
m=iter_restart
restarted=False
iter=0
if rtol>0:
r_dot_r = bilinearform(r, r)
if r_dot_r<0: raise NegativeNorm("negative norm.")
atol2=atol+rtol*math.sqrt(r_dot_r)
if verbose: print(("GMRES: norm of right hand side = %e (absolute tolerance = %e)"%(math.sqrt(r_dot_r), atol2)))
else:
atol2=atol
if verbose: print(("GMRES: absolute tolerance = %e"%atol2))
if atol2<=0:
raise ValueError("Non-positive tolarance.")
while True:
if iter >= iter_max: raise MaxIterReached("maximum number of %s steps reached"%iter_max)
if restarted:
r2 = r-Aprod(x-x2)
else:
r2=1*r
x2=x*1.
x,stopped=_GMRESm(r2, Aprod, x, bilinearform, atol2, iter_max=iter_max-iter, iter_restart=m, verbose=verbose,P_R=P_R)
iter+=iter_restart
if stopped: break
if verbose: print("GMRES: restart.")
restarted=True
if verbose: print("GMRES: tolerance has been reached.")
return x
def _GMRESm(r, Aprod, x, bilinearform, atol, iter_max=100, iter_restart=20, verbose=False, P_R=None):
iter=0
h=numpy.zeros((iter_restart+1,iter_restart),numpy.float64)
c=numpy.zeros(iter_restart,numpy.float64)
s=numpy.zeros(iter_restart,numpy.float64)
g=numpy.zeros(iter_restart+1,numpy.float64)
v=[]
r_dot_r = bilinearform(r, r)
if r_dot_r<0: raise NegativeNorm("negative norm.")
rho=math.sqrt(r_dot_r)
v.append(r/rho)
g[0]=rho
if verbose: print(("GMRES: initial residual %e (absolute tolerance = %e)"%(rho,atol)))
while not (rho<=atol or iter==iter_restart):
if iter >= iter_max: raise MaxIterReached("maximum number of %s steps reached."%iter_max)
if P_R!=None:
p=Aprod(P_R(v[iter]))
else:
p=Aprod(v[iter])
v.append(p)
v_norm1=math.sqrt(bilinearform(v[iter+1], v[iter+1]))
# Modified Gram-Schmidt
for j in range(iter+1):
h[j,iter]=bilinearform(v[j],v[iter+1])
v[iter+1]-=h[j,iter]*v[j]
h[iter+1,iter]=math.sqrt(bilinearform(v[iter+1],v[iter+1]))
v_norm2=h[iter+1,iter]
# Reorthogonalize if needed
if v_norm1 + 0.001*v_norm2 == v_norm1: #Brown/Hindmarsh condition (default)
for j in range(iter+1):
hr=bilinearform(v[j],v[iter+1])
h[j,iter]=h[j,iter]+hr
v[iter+1] -= hr*v[j]
v_norm2=math.sqrt(bilinearform(v[iter+1], v[iter+1]))
h[iter+1,iter]=v_norm2
# watch out for happy breakdown
if not v_norm2 == 0:
v[iter+1]=v[iter+1]/h[iter+1,iter]
# Form and store the information for the new Givens rotation
if iter > 0: h[:iter+1,iter]=__givapp(c[:iter],s[:iter],h[:iter+1,iter])
mu=math.sqrt(h[iter,iter]*h[iter,iter]+h[iter+1,iter]*h[iter+1,iter])
if mu!=0 :
c[iter]=h[iter,iter]/mu
s[iter]=-h[iter+1,iter]/mu
h[iter,iter]=c[iter]*h[iter,iter]-s[iter]*h[iter+1,iter]
h[iter+1,iter]=0.0
gg=__givapp(c[iter],s[iter],[g[iter],g[iter+1]])
g[iter]=gg[0]
g[iter+1]=gg[1]
# Update the residual norm
rho=abs(g[iter+1])
if verbose: print(("GMRES: iteration step %s: residual %e"%(iter,rho)))
iter+=1
# At this point either iter > iter_max or rho < tol.
# It's time to compute x and leave.
if verbose: print(("GMRES: iteration stopped after %s step."%iter))
if iter > 0 :
y=numpy.zeros(iter,numpy.float64)
y[iter-1] = g[iter-1] / h[iter-1,iter-1]
if iter > 1 :
i=iter-2
while i>=0 :
y[i] = ( g[i] - numpy.dot(h[i,i+1:iter], y[i+1:iter])) / h[i,i]
i=i-1
xhat=v[iter-1]*y[iter-1]
for i in range(iter-1):
xhat += v[i]*y[i]
else:
xhat=v[0] * 0
if P_R!=None:
x += P_R(xhat)
else:
x += xhat
if iter<iter_restart-1:
stopped=True
else:
stopped=False
return x,stopped
def MINRES(r, Aprod, x, Msolve, bilinearform, atol=0, rtol=1.e-8, iter_max=100):
"""
Solver for
*Ax=b*
with a symmetric and positive definite operator A (more details required!).
It uses the minimum residual method (MINRES) with preconditioner M
providing an approximation of A.
The iteration is terminated if
*|r| <= atol+rtol*|r0|*
where *r0* is the initial residual and *|.|* is the energy norm. In fact
*|r| = sqrt( bilinearform(Msolve(r),r))*
For details on the preconditioned conjugate gradient method see the book:
"Templates for the Solution of Linear Systems by <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>".
:param r: initial residual *r=b-Ax*. ``r`` is altered.
:type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
:param x: an initial guess for the solution
:type x: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
:param Aprod: returns the value Ax
:type Aprod: function ``Aprod(x)`` where ``x`` is of the same object like
argument ``x``. The returned object needs to be of the same
type like argument ``r``.
:param Msolve: solves Mx=r
:type Msolve: function ``Msolve(r)`` where ``r`` is of the same type like
argument ``r``. The returned object needs to be of the same
type like argument ``x``.
:param bilinearform: inner product ``<x,r>``
:type bilinearform: function ``bilinearform(x,r)`` where ``x`` is of the same
type like argument ``x`` and ``r`` is. The returned value
is a ``float``.
:param atol: absolute tolerance
:type atol: non-negative ``float``
:param rtol: relative tolerance
:type rtol: non-negative ``float``
:param iter_max: maximum number of iteration steps
:type iter_max: ``int``
:return: the solution approximation and the corresponding residual
:rtype: ``tuple``
:warning: ``r`` and ``x`` are altered.
"""
#------------------------------------------------------------------
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
#------------------------------------------------------------------
r1 = r
y = Msolve(r)
beta1 = bilinearform(y,r)
if beta1< 0: raise NegativeNorm("negative norm.")
# If r = 0 exactly, stop with x
if beta1==0: return x
if beta1> 0: beta1 = math.sqrt(beta1)
#------------------------------------------------------------------
# Initialize quantities.
# ------------------------------------------------------------------
iter = 0
Anorm = 0
ynorm = 0
oldb = 0
beta = beta1
dbar = 0
epsln = 0
phibar = beta1
rhs1 = beta1
rhs2 = 0
rnorm = phibar
tnorm2 = 0
ynorm2 = 0
cs = -1
sn = 0
w = r*0.
w2 = r*0.
r2 = r1
eps = 0.0001
#---------------------------------------------------------------------
# Main iteration loop.
# --------------------------------------------------------------------
while not rnorm<=atol+rtol*Anorm*ynorm: # checks ||r|| < (||A|| ||x||) * TOL
if iter >= iter_max: raise MaxIterReached("maximum number of %s steps reached."%iter_max)
iter = iter + 1
#-----------------------------------------------------------------
# Obtain quantities for the next Lanczos vector vk+1, k = 1, 2,...
# The general iteration is similar to the case k = 1 with v0 = 0:
#
# p1 = Operator * v1 - beta1 * v0,
# alpha1 = v1'p1,
# q2 = p2 - alpha1 * v1,
# beta2^2 = q2'q2,
# v2 = (1/beta2) q2.
#
# Again, y = betak P vk, where P = C**(-1).
#-----------------------------------------------------------------
s = 1/beta # Normalize previous vector (in y).
v = s*y # v = vk if P = I
y = Aprod(v)
if iter >= 2:
y = y - (beta/oldb)*r1
alfa = bilinearform(v,y) # alphak
y += (- alfa/beta)*r2
r1 = r2
r2 = y
y = Msolve(r2)
oldb = beta # oldb = betak
beta = bilinearform(y,r2) # beta = betak+1^2
if beta < 0: raise NegativeNorm("negative norm.")
beta = math.sqrt( beta )
tnorm2 = tnorm2 + alfa*alfa + oldb*oldb + beta*beta
if iter==1: # Initialize a few things.
gmax = abs( alfa ) # alpha1
gmin = gmax # alpha1
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
# Compute the next plane rotation Qk
gamma = math.sqrt(gbar*gbar+beta*beta) # gammak
gamma = max(gamma,eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x += phi*w
# Go round again.
gmax = max(gmax,gamma)
gmin = min(gmin,gamma)
z = rhs1 / gamma
ynorm2 = z*z + ynorm2
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = math.sqrt( tnorm2 )
ynorm = math.sqrt( ynorm2 )
rnorm = phibar
return x
def TFQMR(r, Aprod, x, bilinearform, atol=0, rtol=1.e-8, iter_max=100):
"""
Solver for
*Ax=b*
with a general operator A (more details required!).
It uses the Transpose-Free Quasi-Minimal Residual method (TFQMR).
The iteration is terminated if
*|r| <= atol+rtol*|r0|*
where *r0* is the initial residual and *|.|* is the energy norm. In fact
*|r| = sqrt( bilinearform(r,r))*
:param r: initial residual *r=b-Ax*. ``r`` is altered.
:type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
:param x: an initial guess for the solution
:type x: same like ``r``
:param Aprod: returns the value Ax
:type Aprod: function ``Aprod(x)`` where ``x`` is of the same object like
argument ``x``. The returned object needs to be of the same type
like argument ``r``.
:param bilinearform: inner product ``<x,r>``
:type bilinearform: function ``bilinearform(x,r)`` where ``x`` is of the same
type like argument ``x`` and ``r``. The returned value is
a ``float``.
:param atol: absolute tolerance
:type atol: non-negative ``float``
:param rtol: relative tolerance
:type rtol: non-negative ``float``
:param iter_max: maximum number of iteration steps
:type iter_max: ``int``
:rtype: ``tuple``
:warning: ``r`` and ``x`` are altered.
"""
u1=0
u2=0
y1=0
y2=0
w = r
y1 = r
iter = 0
d = 0
v = Aprod(y1)
u1 = v
theta = 0.0;
eta = 0.0;
rho=bilinearform(r,r)
if rho < 0: raise NegativeNorm("negative norm.")
tau = math.sqrt(rho)
norm_r0=tau
while tau>atol+rtol*norm_r0:
if iter >= iter_max: raise MaxIterReached("maximum number of %s steps reached."%iter_max)
sigma = bilinearform(r,v)
if sigma == 0.0: raise IterationBreakDown('TFQMR breakdown, sigma=0')
alpha = rho / sigma
for j in range(2):
#
# Compute y2 and u2 only if you have to
#
if ( j == 1 ):
y2 = y1 - alpha * v
u2 = Aprod(y2)
m = 2 * (iter+1) - 2 + (j+1)
if j==0:
w = w - alpha * u1
d = y1 + ( theta * theta * eta / alpha ) * d
if j==1:
w = w - alpha * u2
d = y2 + ( theta * theta * eta / alpha ) * d
theta = math.sqrt(bilinearform(w,w))/ tau
c = 1.0 / math.sqrt ( 1.0 + theta * theta )
tau = tau * theta * c
eta = c * c * alpha
x = x + eta * d
#
# Try to terminate the iteration at each pass through the loop
#
if rho == 0.0: raise IterationBreakDown('TFQMR breakdown, rho=0')
rhon = bilinearform(r,w)
beta = rhon / rho;
rho = rhon;
y1 = w + beta * y2;
u1 = Aprod(y1)
v = u1 + beta * ( u2 + beta * v )
iter += 1
return x
#############################################
class ArithmeticTuple(object):
"""
Tuple supporting inplace update x+=y and scaling x=a*y where ``x,y`` is an
ArithmeticTuple and ``a`` is a float.
Example of usage::
from esys.escript import Data
from numpy import array
a=eData(...)
b=array([1.,4.])
x=ArithmeticTuple(a,b)
y=5.*x
"""
def __init__(self,*args):
"""
Initializes object with elements ``args``.
:param args: tuple of objects that support inplace add (x+=y) and
scaling (x=a*y)
"""
self.__items=list(args)
def __len__(self):
"""
Returns the number of items.
:return: number of items
:rtype: ``int``
"""
return len(self.__items)
def __getitem__(self,index):
"""
Returns item at specified position.
:param index: index of item to be returned
:type index: ``int``
:return: item with index ``index``
"""
return self.__items.__getitem__(index)
def __mul__(self,other):
"""
Scales by ``other`` from the right.
:param other: scaling factor
:type other: ``float``
:return: itemwise self*other
:rtype: `ArithmeticTuple`
"""
out=[]
try:
l=len(other)
if l!=len(self):
raise ValueError("length of arguments don't match.")
for i in range(l):
if self.__isEmpty(self[i]) or self.__isEmpty(other[i]):
out.append(escore.Data())
else:
out.append(self[i]*other[i])
except TypeError:
for i in range(len(self)):
if self.__isEmpty(self[i]) or self.__isEmpty(other):
out.append(escore.Data())
else:
out.append(self[i]*other)
return ArithmeticTuple(*tuple(out))
def __rmul__(self,other):
"""
Scales by ``other`` from the left.
:param other: scaling factor
:type other: ``float``
:return: itemwise other*self
:rtype: `ArithmeticTuple`
"""
out=[]
try:
l=len(other)
if l!=len(self):
raise ValueError("length of arguments don't match.")
for i in range(l):
if self.__isEmpty(self[i]) or self.__isEmpty(other[i]):
out.append(escore.Data())
else:
out.append(other[i]*self[i])
except TypeError:
for i in range(len(self)):
if self.__isEmpty(self[i]) or self.__isEmpty(other):
out.append(escore.Data())
else:
out.append(other*self[i])
return ArithmeticTuple(*tuple(out))
def __div__(self,other):
"""
Scales by (1/``other``) from the right.
:param other: scaling factor
:type other: ``float``
:return: itemwise self/other
:rtype: `ArithmeticTuple`
"""
return self*(1/other)
def __rdiv__(self,other):
"""
Scales by (1/``other``) from the left.
:param other: scaling factor
:type other: ``float``
:return: itemwise other/self
:rtype: `ArithmeticTuple`
"""
out=[]
try:
l=len(other)
if l!=len(self):
raise ValueError("length of arguments don't match.")
for i in range(l):
if self.__isEmpty(self[i]):
raise ZeroDivisionError("in component %s"%i)
else:
if self.__isEmpty(other[i]):
out.append(escore.Data())
else:
out.append(other[i]/self[i])
except TypeError:
for i in range(len(self)):
if self.__isEmpty(self[i]):
raise ZeroDivisionError("in component %s"%i)
else:
if self.__isEmpty(other):
out.append(escore.Data())
else:
out.append(other/self[i])
return ArithmeticTuple(*tuple(out))
def __iadd__(self,other):
"""
Inplace addition of ``other`` to self.
:param other: increment
:type other: ``ArithmeticTuple``
"""
if len(self) != len(other):
raise ValueError("tuple lengths must match.")
for i in range(len(self)):
if self.__isEmpty(self.__items[i]):
self.__items[i]=other[i]
else:
self.__items[i]+=other[i]
return self
def __add__(self,other):
"""
Adds ``other`` to self.
:param other: increment
:type other: ``ArithmeticTuple``
"""
out=[]
try:
l=len(other)
if l!=len(self):
raise ValueError("length of arguments don't match.")
for i in range(l):
if self.__isEmpty(self[i]):
out.append(other[i])
elif self.__isEmpty(other[i]):
out.append(self[i])
else:
out.append(self[i]+other[i])
except TypeError:
for i in range(len(self)):
if self.__isEmpty(self[i]):
out.append(other)
elif self.__isEmpty(other):
out.append(self[i])
else:
out.append(self[i]+other)
return ArithmeticTuple(*tuple(out))
def __sub__(self,other):
"""
Subtracts ``other`` from self.
:param other: decrement
:type other: ``ArithmeticTuple``
"""
out=[]
try:
l=len(other)
if l!=len(self):
raise ValueError("length of arguments don't match.")
for i in range(l):
if self.__isEmpty(other[i]):
out.append(self[i])
elif self.__isEmpty(self[i]):
out.append(-other[i])
else:
out.append(self[i]-other[i])
except TypeError:
for i in range(len(self)):
if self.__isEmpty(other):
out.append(self[i])
elif self.__isEmpty(self[i]):
out.append(-other)
else:
out.append(self[i]-other)
return ArithmeticTuple(*tuple(out))
def __isub__(self,other):
"""
Inplace subtraction of ``other`` from self.
:param other: decrement
:type other: ``ArithmeticTuple``
"""
if len(self) != len(other):
raise ValueError("tuple length must match.")
for i in range(len(self)):
if not self.__isEmpty(other[i]):
if self.__isEmpty(self.__items[i]):
self.__items[i]=-other[i]
else:
self.__items[i]=other[i]
return self
def __neg__(self):
"""
Negates values.
"""
out=[]
for i in range(len(self)):
if self.__isEmpty(self[i]):
out.append(escore.Data())
else:
out.append(-self[i])
return ArithmeticTuple(*tuple(out))
def __isEmpty(self, d):
if isinstance(d, escore.Data):
return d.isEmpty()
else:
return False
def __str__(self):
s="("
for i in self:
s=s+str(i)+", "
s=s+")"
return s
class HomogeneousSaddlePointProblem(object):
"""
This class provides a framework for solving linear homogeneous saddle
point problems of the form::
*Av+B^*p=f*
*Bv =0*
for the unknowns *v* and *p* and given operators *A* and *B* and
given right hand side *f*. *B^** is the adjoint operator of *B*.
*A* may depend weakly on *v* and *p*.
"""
def __init__(self, **kwargs):
"""
initializes the saddle point problem
"""
self.resetControlParameters()
self.setTolerance()
self.setAbsoluteTolerance()
def resetControlParameters(self, K_p=1., K_v=1., rtol_max=0.01, rtol_min = 1.e-7, chi_max=0.5, reduction_factor=0.3, theta = 0.1):
"""
sets a control parameter
:param K_p: initial value for constant to adjust pressure tolerance
:type K_p: ``float``
:param K_v: initial value for constant to adjust velocity tolerance
:type K_v: ``float``
:param rtol_max: maximuim relative tolerance used to calculate presssure and velocity increment.
:type rtol_max: ``float``
:param chi_max: maximum tolerable converegence rate.
:type chi_max: ``float``
:param reduction_factor: reduction factor for adjustment factors.
:type reduction_factor: ``float``
"""
self.setControlParameter(K_p, K_v, rtol_max, rtol_min, chi_max, reduction_factor, theta)
def setControlParameter(self,K_p=None, K_v=None, rtol_max=None, rtol_min=None, chi_max=None, reduction_factor=None, theta=None):
"""
sets a control parameter
:param K_p: initial value for constant to adjust pressure tolerance
:type K_p: ``float``
:param K_v: initial value for constant to adjust velocity tolerance
:type K_v: ``float``
:param rtol_max: maximuim relative tolerance used to calculate presssure and velocity increment.
:type rtol_max: ``float``
:param chi_max: maximum tolerable converegence rate.
:type chi_max: ``float``
:type reduction_factor: ``float``
"""
if not K_p is None:
if K_p<1:
raise ValueError("K_p need to be greater or equal to 1.")
else:
K_p=self.__K_p
if not K_v is None:
if K_v<1:
raise ValueError("K_v need to be greater or equal to 1.")
else:
K_v=self.__K_v
if not rtol_max is None:
if rtol_max<=0 or rtol_max>=1:
raise ValueError("rtol_max needs to be positive and less than 1.")
else:
rtol_max=self.__rtol_max
if not rtol_min is None:
if rtol_min<=0 or rtol_min>=1:
raise ValueError("rtol_min needs to be positive and less than 1.")
else:
rtol_min=self.__rtol_min
if not chi_max is None:
if chi_max<=0 or chi_max>=1:
raise ValueError("chi_max needs to be positive and less than 1.")
else:
chi_max = self.__chi_max
if not reduction_factor is None:
if reduction_factor<=0 or reduction_factor>1:
raise ValueError("reduction_factor need to be between zero and one.")
else:
reduction_factor=self.__reduction_factor
if not theta is None:
if theta<=0 or theta>1:
raise ValueError("theta need to be between zero and one.")
else:
theta=self.__theta
if rtol_min>=rtol_max:
raise ValueError("rtol_max = %e needs to be greater than rtol_min = %e"%(rtol_max,rtol_min))
self.__chi_max = chi_max
self.__rtol_max = rtol_max
self.__K_p = K_p
self.__K_v = K_v
self.__reduction_factor = reduction_factor
self.__theta = theta
self.__rtol_min=rtol_min
#=============================================================
def inner_pBv(self,p,Bv):
"""
Returns inner product of element p and Bv (overwrite).
:param p: a pressure increment
:param Bv: a residual
:return: inner product of element p and Bv
:rtype: ``float``
:note: used if PCG is applied.
"""
raise NotImplementedError("no inner product for p and Bv implemented.")
def inner_p(self,p0,p1):
"""
Returns inner product of p0 and p1 (overwrite).
:param p0: a pressure
:param p1: a pressure
:return: inner product of p0 and p1
:rtype: ``float``
"""
raise NotImplementedError("no inner product for p implemented.")
def norm_v(self,v):
"""
Returns the norm of v (overwrite).
:param v: a velovity
:return: norm of v
:rtype: non-negative ``float``
"""
raise NotImplementedError("no norm of v implemented.")
def getDV(self, p, v, tol):
"""
return a correction to the value for a given v and a given p with accuracy `tol` (overwrite)
:param p: pressure
:param v: pressure
:return: dv given as *dv= A^{-1} (f-A v-B^*p)*
:note: Only *A* may depend on *v* and *p*
"""
raise NotImplementedError("no dv calculation implemented.")
def Bv(self,v, tol):
"""
Returns Bv with accuracy `tol` (overwrite)
:rtype: equal to the type of p
:note: boundary conditions on p should be zero!
"""
raise NotImplementedError("no operator B implemented.")
def norm_Bv(self,Bv):
"""
Returns the norm of Bv (overwrite).
:rtype: equal to the type of p
:note: boundary conditions on p should be zero!
"""
raise NotImplementedError("no norm of Bv implemented.")
def solve_AinvBt(self,dp, tol):
"""
Solves *A dv=B^*dp* with accuracy `tol`
:param dp: a pressure increment
:return: the solution of *A dv=B^*dp*
:note: boundary conditions on dv should be zero! *A* is the operator used in ``getDV`` and must not be altered.
"""
raise NotImplementedError("no operator A implemented.")
def solve_prec(self,Bv, tol):
"""
Provides a preconditioner for *(BA^{-1}B^ * )* applied to Bv with accuracy `tol`
:rtype: equal to the type of p
:note: boundary conditions on p should be zero!
"""
raise NotImplementedError("no preconditioner for Schur complement implemented.")
#=============================================================
def __Aprod_PCG(self,dp):
dv=self.solve_AinvBt(dp, self.__subtol)
return ArithmeticTuple(dv,self.Bv(dv, self.__subtol))
def __inner_PCG(self,p,r):
return self.inner_pBv(p,r[1])
def __Msolve_PCG(self,r):
return self.solve_prec(r[1], self.__subtol)
#=============================================================
def __Aprod_GMRES(self,p):
return self.solve_prec(self.Bv(self.solve_AinvBt(p, self.__subtol), self.__subtol), self.__subtol)
def __inner_GMRES(self,p0,p1):
return self.inner_p(p0,p1)
#=============================================================
def norm_p(self,p):
"""
calculates the norm of ``p``
:param p: a pressure
:return: the norm of ``p`` using the inner product for pressure
:rtype: ``float``
"""
f=self.inner_p(p,p)
if f<0: raise ValueError("negative pressure norm.")
return math.sqrt(f)
def solve(self,v,p,max_iter=20, verbose=False, usePCG=True, iter_restart=20, max_correction_steps=10):
"""
Solves the saddle point problem using initial guesses v and p.
:param v: initial guess for velocity
:param p: initial guess for pressure
:type v: `Data`
:type p: `Data`
:param usePCG: indicates the usage of the PCG rather than GMRES scheme.
:param max_iter: maximum number of iteration steps per correction
attempt
:param verbose: if True, shows information on the progress of the
saddlepoint problem solver.
:param iter_restart: restart the iteration after ``iter_restart`` steps
(only used if useUzaw=False)
:type usePCG: ``bool``
:type max_iter: ``int``
:type verbose: ``bool``
:type iter_restart: ``int``
:rtype: ``tuple`` of `Data` objects
:note: typically this method is overwritten by a subclass. It provides a wrapper for the ``_solve`` method.
"""
return self._solve(v=v,p=p,max_iter=max_iter,verbose=verbose, usePCG=usePCG, iter_restart=iter_restart, max_correction_steps=max_correction_steps)
def _solve(self,v,p,max_iter=20, verbose=False, usePCG=True, iter_restart=20, max_correction_steps=10):
"""
see `_solve` method.
"""
self.verbose=verbose
rtol=self.getTolerance()
atol=self.getAbsoluteTolerance()
K_p=self.__K_p
K_v=self.__K_v
correction_step=0
converged=False
chi=None
eps=None
if self.verbose: print(("HomogeneousSaddlePointProblem: start iteration: rtol= %e, atol=%e"%(rtol, atol)))
while not converged:
# get tolerance for velecity increment:
if chi is None:
rtol_v=self.__rtol_max
else:
rtol_v=min(chi/K_v,self.__rtol_max)
rtol_v=max(rtol_v, self.__rtol_min)
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: rtol_v= %e"%(correction_step,rtol_v)))
# get velocity increment:
dv1=self.getDV(p,v,rtol_v)
v1=v+dv1
Bv1=self.Bv(v1, rtol_v)
norm_Bv1=self.norm_Bv(Bv1)
norm_dv1=self.norm_v(dv1)
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: norm_Bv1 = %e, norm_dv1 = %e"%(correction_step, norm_Bv1, norm_dv1)))
if norm_dv1*self.__theta < norm_Bv1:
# get tolerance for pressure increment:
large_Bv1=True
if chi is None or eps is None:
rtol_p=self.__rtol_max
else:
rtol_p=min(chi**2*eps/K_p/norm_Bv1, self.__rtol_max)
self.__subtol=max(rtol_p**2, self.__rtol_min)
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: rtol_p= %e"%(correction_step,rtol_p)))
# now we solve for the pressure increment dp from B*A^{-1}B^* dp = Bv1
if usePCG:
dp,r,a_norm=PCG(ArithmeticTuple(v1,Bv1),self.__Aprod_PCG,0*p,self.__Msolve_PCG,self.__inner_PCG,atol=0, rtol=rtol_p,iter_max=max_iter, verbose=self.verbose)
v2=r[0]
Bv2=r[1]
else:
# don't use!!!!
dp=GMRES(self.solve_prec(Bv1,self.__subtol),self.__Aprod_GMRES, 0*p, self.__inner_GMRES,atol=0, rtol=rtol_p,iter_max=max_iter, iter_restart=iter_restart, verbose=self.verbose)
dv2=self.solve_AinvBt(dp, self.__subtol)
v2=v1-dv2
Bv2=self.Bv(v2, self.__subtol)
p2=p+dp
else:
large_Bv1=False
v2=v1
p2=p
# update business:
norm_dv2=self.norm_v(v2-v)
norm_v2=self.norm_v(v2)
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: v2 = %e, norm_dv2 = %e"%(correction_step, norm_v2, self.norm_v(v2-v))))
eps, eps_old = max(norm_Bv1, norm_dv2), eps
if eps_old is None:
chi, chi_old = None, chi
else:
chi, chi_old = min(eps/ eps_old, self.__chi_max), chi
if eps != None:
if chi !=None:
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: convergence rate = %e, correction = %e"%(correction_step,chi, eps)))
else:
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: correction = %e"%(correction_step, eps)))
if eps <= rtol*norm_v2+atol :
converged = True
else:
if correction_step>=max_correction_steps:
raise CorrectionFailed("Given up after %d correction steps."%correction_step)
if chi_old!=None:
K_p=max(1,self.__reduction_factor*K_p,(chi-chi_old)/chi_old**2*K_p)
K_v=max(1,self.__reduction_factor*K_v,(chi-chi_old)/chi_old**2*K_p)
if self.verbose: print(("HomogeneousSaddlePointProblem: step %s: new adjustment factor K = %e"%(correction_step,K_p)))
correction_step+=1
v,p =v2, p2
if self.verbose: print(("HomogeneousSaddlePointProblem: tolerance reached after %s steps."%correction_step))
return v,p
#========================================================================
def setTolerance(self,tolerance=1.e-4):
"""
Sets the relative tolerance for (v,p).
:param tolerance: tolerance to be used
:type tolerance: non-negative ``float``
"""
if tolerance<0:
raise ValueError("tolerance must be positive.")
self.__rtol=tolerance
def getTolerance(self):
"""
Returns the relative tolerance.
:return: relative tolerance
:rtype: ``float``
"""
return self.__rtol
def setAbsoluteTolerance(self,tolerance=0.):
"""
Sets the absolute tolerance.
:param tolerance: tolerance to be used
:type tolerance: non-negative ``float``
"""
if tolerance<0:
raise ValueError("tolerance must be non-negative.")
self.__atol=tolerance
def getAbsoluteTolerance(self):
"""
Returns the absolute tolerance.
:return: absolute tolerance
:rtype: ``float``
"""
return self.__atol
def MaskFromBoundaryTag(domain,*tags):
"""
Creates a mask on the Solution(domain) function space where the value is
one for samples that touch the boundary tagged by tags.
Usage: m=MaskFromBoundaryTag(domain, "left", "right")
:param domain: domain to be used
:type domain: `escript.Domain`
:param tags: boundary tags
:type tags: ``str``
:return: a mask which marks samples that are touching the boundary tagged
by any of the given tags
:rtype: `escript.Data` of rank 0
"""
pde=linearPDEs.LinearPDE(domain,numEquations=1, numSolutions=1)
d=escore.Scalar(0.,escore.FunctionOnBoundary(domain))
for t in tags: d.setTaggedValue(t,1.)
pde.setValue(y=d)
return util.whereNonZero(pde.getRightHandSide())
def MaskFromTag(domain,*tags):
"""
Creates a mask on the Solution(domain) function space where the value is
one for samples that touch regions tagged by tags.
Usage: m=MaskFromTag(domain, "ham")
:param domain: domain to be used
:type domain: `escript.Domain`
:param tags: boundary tags
:type tags: ``str``
:return: a mask which marks samples that are touching the boundary tagged
by any of the given tags
:rtype: `escript.Data` of rank 0
"""
pde=linearPDEs.LinearPDE(domain,numEquations=1, numSolutions=1)
d=escore.Scalar(0.,escore.Function(domain))
for t in tags: d.setTaggedValue(t,1.)
pde.setValue(Y=d)
return util.whereNonZero(pde.getRightHandSide())
def BoundaryValuesFromVolumeTag(domain,**values):
"""
Creates a mask on the Solution(domain) function space where the value is
one for samples that touch regions tagged by tags.
Usage: m=BoundaryValuesFromVolumeTag(domain, ham=1, f=6)
:param domain: domain to be used
:type domain: `escript.Domain`
:return: a mask which marks samples that are touching the boundary tagged
by any of the given tags
:rtype: `escript.Data` of rank 0
"""
pde=linearPDEs.LinearPDE(domain,numEquations=1, numSolutions=1)
out=escore.Scalar(0.,escore.FunctionOnBoundary(domain))
for t,v in values.items():
d=escore.Scalar(0.,escore.Function(domain))
d.setTaggedValue(t,1.)
pde.setValue(Y=d)
out+=v*util.whereZero(util.interpolate(util.whereNonZero(pde.getRightHandSide()), escore.FunctionOnBoundary(domain))-1.)
return out
| [
"numpy.dot",
"numpy.zeros",
"math.sqrt"
] | [((18362, 18383), 'math.sqrt', 'math.sqrt', (['rhat_dot_r'], {}), '(rhat_dot_r)\n', (18371, 18383), False, 'import math\n'), ((27064, 27120), 'numpy.zeros', 'numpy.zeros', (['(iter_restart, iter_restart)', 'numpy.float64'], {}), '((iter_restart, iter_restart), numpy.float64)\n', (27075, 27120), False, 'import numpy\n'), ((27124, 27164), 'numpy.zeros', 'numpy.zeros', (['iter_restart', 'numpy.float64'], {}), '(iter_restart, numpy.float64)\n', (27135, 27164), False, 'import numpy\n'), ((27169, 27209), 'numpy.zeros', 'numpy.zeros', (['iter_restart', 'numpy.float64'], {}), '(iter_restart, numpy.float64)\n', (27180, 27209), False, 'import numpy\n'), ((27214, 27254), 'numpy.zeros', 'numpy.zeros', (['iter_restart', 'numpy.float64'], {}), '(iter_restart, numpy.float64)\n', (27225, 27254), False, 'import numpy\n'), ((32527, 32587), 'numpy.zeros', 'numpy.zeros', (['(iter_restart + 1, iter_restart)', 'numpy.float64'], {}), '((iter_restart + 1, iter_restart), numpy.float64)\n', (32538, 32587), False, 'import numpy\n'), ((32589, 32629), 'numpy.zeros', 'numpy.zeros', (['iter_restart', 'numpy.float64'], {}), '(iter_restart, numpy.float64)\n', (32600, 32629), False, 'import numpy\n'), ((32634, 32674), 'numpy.zeros', 'numpy.zeros', (['iter_restart', 'numpy.float64'], {}), '(iter_restart, numpy.float64)\n', (32645, 32674), False, 'import numpy\n'), ((32679, 32723), 'numpy.zeros', 'numpy.zeros', (['(iter_restart + 1)', 'numpy.float64'], {}), '(iter_restart + 1, numpy.float64)\n', (32690, 32723), False, 'import numpy\n'), ((32823, 32841), 'math.sqrt', 'math.sqrt', (['r_dot_r'], {}), '(r_dot_r)\n', (32832, 32841), False, 'import math\n'), ((43142, 43156), 'math.sqrt', 'math.sqrt', (['rho'], {}), '(rho)\n', (43151, 43156), False, 'import math\n'), ((10472, 10489), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (10483, 10489), False, 'import numpy\n'), ((19492, 19513), 'math.sqrt', 'math.sqrt', (['rhat_dot_r'], {}), '(rhat_dot_r)\n', (19501, 19513), False, 'import math\n'), ((20578, 20590), 'math.sqrt', 'math.sqrt', (['s'], {}), '(s)\n', (20587, 20590), False, 'import math\n'), ((28685, 28770), 'math.sqrt', 'math.sqrt', (['(h[iter, iter] * h[iter, iter] + h[iter + 1, iter] * h[iter + 1, iter])'], {}), '(h[iter, iter] * h[iter, iter] + h[iter + 1, iter] * h[iter + 1, iter]\n )\n', (28694, 28770), False, 'import math\n'), ((29262, 29294), 'numpy.zeros', 'numpy.zeros', (['iter', 'numpy.float64'], {}), '(iter, numpy.float64)\n', (29273, 29294), False, 'import numpy\n'), ((34158, 34243), 'math.sqrt', 'math.sqrt', (['(h[iter, iter] * h[iter, iter] + h[iter + 1, iter] * h[iter + 1, iter])'], {}), '(h[iter, iter] * h[iter, iter] + h[iter + 1, iter] * h[iter + 1, iter]\n )\n', (34167, 34243), False, 'import math\n'), ((34901, 34933), 'numpy.zeros', 'numpy.zeros', (['iter', 'numpy.float64'], {}), '(iter, numpy.float64)\n', (34912, 34933), False, 'import numpy\n'), ((37959, 37975), 'math.sqrt', 'math.sqrt', (['beta1'], {}), '(beta1)\n', (37968, 37975), False, 'import math\n'), ((39986, 40001), 'math.sqrt', 'math.sqrt', (['beta'], {}), '(beta)\n', (39995, 40001), False, 'import math\n'), ((40750, 40786), 'math.sqrt', 'math.sqrt', (['(gbar * gbar + beta * beta)'], {}), '(gbar * gbar + beta * beta)\n', (40759, 40786), False, 'import math\n'), ((41479, 41496), 'math.sqrt', 'math.sqrt', (['tnorm2'], {}), '(tnorm2)\n', (41488, 41496), False, 'import math\n'), ((41516, 41533), 'math.sqrt', 'math.sqrt', (['ynorm2'], {}), '(ynorm2)\n', (41525, 41533), False, 'import math\n'), ((59592, 59604), 'math.sqrt', 'math.sqrt', (['f'], {}), '(f)\n', (59601, 59604), False, 'import math\n'), ((18645, 18666), 'math.sqrt', 'math.sqrt', (['rhat_dot_r'], {}), '(rhat_dot_r)\n', (18654, 18666), False, 'import math\n'), ((21024, 21047), 'math.sqrt', 'math.sqrt', (['util.EPSILON'], {}), '(util.EPSILON)\n', (21033, 21047), False, 'import math\n'), ((28476, 28512), 'numpy.zeros', 'numpy.zeros', (['(iter + 1)', 'numpy.float64'], {}), '(iter + 1, numpy.float64)\n', (28487, 28512), False, 'import numpy\n'), ((31602, 31620), 'math.sqrt', 'math.sqrt', (['r_dot_r'], {}), '(r_dot_r)\n', (31611, 31620), False, 'import math\n'), ((43864, 43894), 'math.sqrt', 'math.sqrt', (['(1.0 + theta * theta)'], {}), '(1.0 + theta * theta)\n', (43873, 43894), False, 'import math\n'), ((19383, 19404), 'math.sqrt', 'math.sqrt', (['rhat_dot_r'], {}), '(rhat_dot_r)\n', (19392, 19404), False, 'import math\n'), ((29423, 29465), 'numpy.dot', 'numpy.dot', (['h[i, i + 1:iter]', 'y[i + 1:iter]'], {}), '(h[i, i + 1:iter], y[i + 1:iter])\n', (29432, 29465), False, 'import numpy\n'), ((31711, 31729), 'math.sqrt', 'math.sqrt', (['r_dot_r'], {}), '(r_dot_r)\n', (31720, 31729), False, 'import math\n'), ((35062, 35104), 'numpy.dot', 'numpy.dot', (['h[i, i + 1:iter]', 'y[i + 1:iter]'], {}), '(h[i, i + 1:iter], y[i + 1:iter])\n', (35071, 35104), False, 'import numpy\n')] |
# coding=UTF-8
from numpy import concatenate, size, ones, zeros
import numpy as np
import maxflow
import cv2
from seamcarving.utils import cli_progress_bar, cli_progress_bar_end
class seam_carving_decomposition(object):
#
# X: input image
# deleteNumberW : Number of columns to be deleted
# deleteNumberH : Number of rows to be deleted
#
def __init__(self, X, deleteNumberW, deleteNumberH, use_integers=True):
self.X = X
self.deleteNumberW = deleteNumberW
self.deleteNumberH = deleteNumberH
self.use_integers = use_integers
self.seams = np.empty((abs(deleteNumberW), X.shape[0]))
def initD(self, Simg):
return zeros((size(Simg, 0), size(Simg, 1) - 1))
def find_neighborhood(self, image, node):
index = np.unravel_index((node), image.shape)
unraveled = ((index[0] + 1, index[1] - 1), (index[0] + 1, index[1]), (index[0] + 1, index[1] + 1))
return unraveled
def find_node(self, index, image):
if index[0] < 0 or index[0] >= image.shape[0] or index[1] >= image.shape[1] or index[1] < 0:
return None
else:
return np.ravel_multi_index(index, image.shape)
def generate_graph(self, I):
g = maxflow.Graph[float]()
i_inf = np.inf
i_mult = 1
if self.use_integers:
g = maxflow.Graph[int]()
i_inf = 10000000
i_mult = 10000
nodeids = g.add_grid_nodes(I.shape)
links = zeros((I.shape[0], I.shape[1], 4))
# SU
# LR I(i,j+1)- I(i,j-1) (SU)
links[:, 1:-1, 0] = np.abs(I[:, 2:] - I[:, 0:-2])
links[:, -2, 0] = i_inf
links[:, 0, 0] = i_inf
# -LU I(i+1,j)- I(i,j-1) (DESTRA)
links[0:-1, 1:, 1] = np.abs(I[1:, 1:] - I[0:-1, 0:-1])
# LU (SINISTRA)
# I(i-1,j)-I(i,j-1)
links[1:, 1:, 2] = np.abs(I[0:-1, 1:] - I[1:, 0:-1])
# GIU
links[:, :, 3] = i_inf
links = links * i_mult
structure = np.array([[i_inf, 0, 0],
[i_inf, 0, 0],
[i_inf, 0, 0]
])
g.add_grid_edges(nodeids, structure=structure, symmetric=False)
# From Left to Right
weights = links[:, :, 0]
structure = np.zeros((3, 3))
structure[1, 2] = 1
g.add_grid_edges(nodeids, structure=structure, weights=weights, symmetric=False)
# GIU = destra
weights = links[:, :, 1]
structure = np.zeros((3, 3))
structure[2, 1] = 1
g.add_grid_edges(nodeids, structure=structure, weights=weights, symmetric=False)
# SU = sinistra
weights = links[:, :, 2]
structure = np.zeros((3, 3))
structure[0, 1] = 1
g.add_grid_edges(nodeids, structure=structure, weights=weights, symmetric=False)
left_most = concatenate((np.arange(I.shape[0]).reshape(1, I.shape[0]), zeros((1, I.shape[0])))).astype(np.uint64)
left_most = np.ravel_multi_index(left_most, I.shape)
g.add_grid_tedges(left_most, i_inf, 0)
right_most = concatenate((np.arange(I.shape[0]).reshape(1, I.shape[0]), ones((1, I.shape[0])) * (size(I, 1) - 1))).astype(np.uint64)
right_most = np.ravel_multi_index(right_most, I.shape)
g.add_grid_tedges(right_most, 0, i_inf)
return g, nodeids
def graph_cut(self, I):
g, nodeids = self.generate_graph(I)
g.maxflow()
I = g.get_grid_segments(nodeids)
I = (I == False).sum(1) - 1
I = I.reshape(I.shape[0], 1)
return I
## Given the actual state matrixes of the algorithm, it applies the seam merging to each of them.
# @I A vector that maps a certain row with a certain column, and represents which pixel of each row should be merged with the right neighbour
# @q11 A matrix for mean pixel value calculation
# @upQ11 Look-forward version of q11 (representing the value of every pixel merged with its right neighbour)
# @q12 The actual inverse value of the skeletal image, without applying the mean value
# @upQ12 Look-forward version of q12
# @p12 A 4-components (that represents the four directions) structure of the image. See initialization for more details.
# @upP12 Look-forward version of p12
# @p22 The square value of p12 (p12**2), precomputed.
# @upP22 Look-forward version of p22
# @Simg The actual skeletal image value (with the mean applied). It's equivalent to -q12/q11
# @v The look-forward version of Simg
# @Z A matrix that contains the original image, the structure image and a matrix of ones.
#
# Returns:
# All the updated matrixes ready for the next iteration
#
# This method applies the merge in two steps:
# * Deletion: For each row, deletes a value according to I.
# * Merge/substitution: For each row, it replaces the actual value of the seam with it's look-forwarded version, according to I
# The only exception is Z, that is not precomputed and should be calculated in real time.
def apply_seam_carving(self, I, Simg, Z):
reduced_size_1, reduced_size_2 = size(Simg, 0), size(Simg, 1) - 1
## Deletion:
# Generating a deletion mask n x m. It's a binary matrix that contains True if the pixel should be keeped, False if they should be deleted.
# The total number of Falses and Trues at each like should be the same.
# Applying that matrix to a standard numpy array, it efficiently generates a clone matrix with the deleted values
mask = np.arange(size(Z, 1)) != np.vstack(I)
SimgCopy = Simg[mask].reshape(reduced_size_1, reduced_size_2)
ZCopy = Z[mask].reshape(reduced_size_1, reduced_size_2, Z.shape[2])
return SimgCopy, ZCopy
## Starting from the energy map and the path map, it generates vector pix, a vector that maps, for each row, the column of the seam to be merged.
# @Pot The energy map. The position of minimum value of the last row of Pot represents the starting pixel of the seam (with a bottom-up strategy)
# @pathMap A matrix that maps, for each position, the best direction to be taken to find the lower energy seam.
#
# Returns:
# @pix the seam coordinates map.
#
# Example:
# pix = [3, 4, 5, 5, 4, 5]
# That maps this list of coordinates:
# (0, 3), (1, 4), (2, 5), (3, 5), (4, 5), (5, 5)
# def generateSeamPath(self, Pot, pathMap):
# s_Pot_1 = Pot.shape[0]
# pix = empty((s_Pot_1, 1))
# Pot_last_line = Pot[-1, :]
# # mn, pix[-1] = Pot_last_line.min(axis=0), Pot_last_line.argmin(axis=0)
# # Finding the minimum value from Pot's last line's values.
# mn = Pot_last_line.min(axis=0)
# # Searching the list of indexes that have the minimum energy
# pp = where(Pot_last_line == mn)[0]
# # If there's more than one, it's random choosen
# pix[-1] = pp[int(random() * amax(pp.shape))]
# # Starting from the bottom
# for ii in reversed(xrange(0, s_Pot_1 - 1)): # xrange(s_Pot_1 - 2, -1, -1):
# # Directions expressed in pathMap uses this rule: 0 => upper-left, 1 => upper, 2 => upper-right
# # They are remapped to be like that: -1 => upper-left, 0 => upper, 1 => upper-right
# # To calculate the coordinate at step ii, you should map with: coordinate(ii + 1) + remapped direction
# pix[ii] = pix[ii + 1] + pathMap[ii + 1, int(pix[ii + 1])] - 1
# return pix
def generate(self):
X = self.X
S = cv2.cvtColor(X, cv2.COLOR_BGR2GRAY).astype(np.float64)
Z = np.copy(X)
# Cloning S [To be fixed]
Simg = np.copy(S)
# For each seam I want to merge
num_seams = self.deleteNumberW + self.deleteNumberH
for i in xrange(num_seams):
cli_progress_bar(i, num_seams)
# pathmap is a matrix that, for each position, specifies the best direction
# to be taken to minimize the cost.
pix = self.graph_cut(Simg)
I = pix.transpose()[0]
self.seams[i] = I
Simg, Z = self.apply_seam_carving(I, Simg, Z)
cli_progress_bar_end()
return Z
| [
"numpy.abs",
"numpy.copy",
"seamcarving.utils.cli_progress_bar_end",
"numpy.ones",
"numpy.ravel_multi_index",
"numpy.size",
"numpy.array",
"numpy.zeros",
"numpy.unravel_index",
"numpy.vstack",
"cv2.cvtColor",
"numpy.arange",
"seamcarving.utils.cli_progress_bar"
] | [((752, 787), 'numpy.unravel_index', 'np.unravel_index', (['node', 'image.shape'], {}), '(node, image.shape)\n', (768, 787), True, 'import numpy as np\n'), ((1383, 1417), 'numpy.zeros', 'zeros', (['(I.shape[0], I.shape[1], 4)'], {}), '((I.shape[0], I.shape[1], 4))\n', (1388, 1417), False, 'from numpy import concatenate, size, ones, zeros\n'), ((1485, 1514), 'numpy.abs', 'np.abs', (['(I[:, 2:] - I[:, 0:-2])'], {}), '(I[:, 2:] - I[:, 0:-2])\n', (1491, 1514), True, 'import numpy as np\n'), ((1635, 1668), 'numpy.abs', 'np.abs', (['(I[1:, 1:] - I[0:-1, 0:-1])'], {}), '(I[1:, 1:] - I[0:-1, 0:-1])\n', (1641, 1668), True, 'import numpy as np\n'), ((1737, 1770), 'numpy.abs', 'np.abs', (['(I[0:-1, 1:] - I[1:, 0:-1])'], {}), '(I[0:-1, 1:] - I[1:, 0:-1])\n', (1743, 1770), True, 'import numpy as np\n'), ((1854, 1909), 'numpy.array', 'np.array', (['[[i_inf, 0, 0], [i_inf, 0, 0], [i_inf, 0, 0]]'], {}), '([[i_inf, 0, 0], [i_inf, 0, 0], [i_inf, 0, 0]])\n', (1862, 1909), True, 'import numpy as np\n'), ((2128, 2144), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2136, 2144), True, 'import numpy as np\n'), ((2319, 2335), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2327, 2335), True, 'import numpy as np\n'), ((2511, 2527), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2519, 2527), True, 'import numpy as np\n'), ((2772, 2812), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['left_most', 'I.shape'], {}), '(left_most, I.shape)\n', (2792, 2812), True, 'import numpy as np\n'), ((3011, 3052), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['right_most', 'I.shape'], {}), '(right_most, I.shape)\n', (3031, 3052), True, 'import numpy as np\n'), ((7187, 7197), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (7194, 7197), True, 'import numpy as np\n'), ((7240, 7250), 'numpy.copy', 'np.copy', (['S'], {}), '(S)\n', (7247, 7250), True, 'import numpy as np\n'), ((7681, 7703), 'seamcarving.utils.cli_progress_bar_end', 'cli_progress_bar_end', ([], {}), '()\n', (7701, 7703), False, 'from seamcarving.utils import cli_progress_bar, cli_progress_bar_end\n'), ((1090, 1130), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['index', 'image.shape'], {}), '(index, image.shape)\n', (1110, 1130), True, 'import numpy as np\n'), ((4828, 4841), 'numpy.size', 'size', (['Simg', '(0)'], {}), '(Simg, 0)\n', (4832, 4841), False, 'from numpy import concatenate, size, ones, zeros\n'), ((5253, 5265), 'numpy.vstack', 'np.vstack', (['I'], {}), '(I)\n', (5262, 5265), True, 'import numpy as np\n'), ((7382, 7412), 'seamcarving.utils.cli_progress_bar', 'cli_progress_bar', (['i', 'num_seams'], {}), '(i, num_seams)\n', (7398, 7412), False, 'from seamcarving.utils import cli_progress_bar, cli_progress_bar_end\n'), ((660, 673), 'numpy.size', 'size', (['Simg', '(0)'], {}), '(Simg, 0)\n', (664, 673), False, 'from numpy import concatenate, size, ones, zeros\n'), ((4843, 4856), 'numpy.size', 'size', (['Simg', '(1)'], {}), '(Simg, 1)\n', (4847, 4856), False, 'from numpy import concatenate, size, ones, zeros\n'), ((5238, 5248), 'numpy.size', 'size', (['Z', '(1)'], {}), '(Z, 1)\n', (5242, 5248), False, 'from numpy import concatenate, size, ones, zeros\n'), ((7123, 7158), 'cv2.cvtColor', 'cv2.cvtColor', (['X', 'cv2.COLOR_BGR2GRAY'], {}), '(X, cv2.COLOR_BGR2GRAY)\n', (7135, 7158), False, 'import cv2\n'), ((675, 688), 'numpy.size', 'size', (['Simg', '(1)'], {}), '(Simg, 1)\n', (679, 688), False, 'from numpy import concatenate, size, ones, zeros\n'), ((2713, 2735), 'numpy.zeros', 'zeros', (['(1, I.shape[0])'], {}), '((1, I.shape[0]))\n', (2718, 2735), False, 'from numpy import concatenate, size, ones, zeros\n'), ((2933, 2954), 'numpy.ones', 'ones', (['(1, I.shape[0])'], {}), '((1, I.shape[0]))\n', (2937, 2954), False, 'from numpy import concatenate, size, ones, zeros\n'), ((2667, 2688), 'numpy.arange', 'np.arange', (['I.shape[0]'], {}), '(I.shape[0])\n', (2676, 2688), True, 'import numpy as np\n'), ((2887, 2908), 'numpy.arange', 'np.arange', (['I.shape[0]'], {}), '(I.shape[0])\n', (2896, 2908), True, 'import numpy as np\n'), ((2958, 2968), 'numpy.size', 'size', (['I', '(1)'], {}), '(I, 1)\n', (2962, 2968), False, 'from numpy import concatenate, size, ones, zeros\n')] |
import pathlib
import numpy as np
import xarray as xr
from numpy import ma
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.style
from matplotlib.colors import LogNorm
from ._base_saver import _BaseSaver
def save_loss(loss_data_dir, img_dir, run_number, vmin=0, vmax=0.01):
mpl.style.use('classic')
fontsize = 32
fontname = 'Times New Roman'
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
plt.rc('font', family=fontname)
axis_font = {'fontname': fontname, 'size': fontsize}
title_font = {'fontname': fontname, 'size': fontsize, 'color': 'black',
'verticalalignment':'bottom'}
# Load results
result_files = sorted(list(loss_data_dir.glob(f'checkpoint*.nc')))
ds = xr.open_mfdataset(result_files, concat_dim='epochs', compat='no_conflicts', combine='nested')
loss_type = ds.attrs['loss_type']
epochs = ds['epochs'].values
modes = {}
modes['train'] = ('r-', r'${\rm Training}$')
modes['val'] = ('b-', r'${\rm Validation}$')
modes['test'] = ('g-', r'${\rm Test}$')
fig, ax = plt.subplots(figsize=(12,12))
for mode, (ls, name) in modes.items():
losses = ds[f'{mode}_losses'].values
ax.plot(epochs, losses, ls, lw=3, label=name)
ax.set_xlabel(r'${\rm epochs}$', **axis_font)
loss_label = r'${\rm MSE}$ ${\rm loss}$' if loss_type == 'MSE' else r'${\rm MAE}$ ${\rm loss}$'
ax.set_ylabel(loss_label, **axis_font)
ax.set_ylim(ymin=vmin, ymax=vmax)
ax.legend(prop={'size':fontsize*1.2})
ax.grid(ls='dashed', lw=1)
fig.tight_layout()
fig.savefig(img_dir / f'loss_{run_number}.png')
plt.close('all')
def to_numpy(var):
return np.squeeze(var.numpy()) if var.device == 'cpu' else np.squeeze(var.cpu().numpy())
class _CityTransformerImageSaver(_BaseSaver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.clip = kwargs.get('clip')
self.super_precision = kwargs.get('super_precision', False)
self.n_precision_enhancers = kwargs.get('n_precision_enhancers', 0)
self.criteria = kwargs.get('criteria', 0.5)
self.alpha = kwargs.get('alpha', 0.5)
self.vmin = kwargs.get('vmin')
self.vmax = kwargs.get('vmax')
self.cmap = kwargs.get('cmap', 'hot')
mpl.style.use('classic')
self.fontsize = 36
self.fontname = 'Times New Roman'
plt.rc('xtick', labelsize=self.fontsize)
plt.rc('ytick', labelsize=self.fontsize)
plt.rc('font', family=self.fontname)
self.title_font = {'fontname':self.fontname, 'size':self.fontsize, 'color':'black',
'verticalalignment':'bottom'}
self.axis_font = {'fontname':self.fontname, 'size':self.fontsize}
self.xmax = 1024
self.ymax = 1024
self.extent = [-self.xmax, self.xmax, -self.ymax, self.ymax]
def _save(self, *args, **kwargs):
levelset = kwargs.get('levelset')
release_points = kwargs.get('release_points')
ref = kwargs.get('ref')
pred = kwargs.get('pred')
mode = kwargs.get('mode')
epoch = kwargs.get('epoch')
n_cols = kwargs.get('n_cols', 4)
for i_precision in range(self.n_precision_enhancers+1):
self.__save_images(i_precision, levelset, release_points, ref, mode, n_cols, 'ref', epoch)
self.__save_images(i_precision, levelset, release_points, pred, mode, n_cols, 'pred', epoch)
def __save_images(self, i_precision, levelset, release_points, imgs, mode, n_cols, name, epoch):
# First save images
if type(imgs) is tuple:
imgs, zeros_map = imgs
assert imgs.shape[1] == self.n_precision_enhancers+1
# Access to the specified precision
imgs = imgs[:, i_precision]
imgs, zeros_map = to_numpy(imgs), to_numpy(zeros_map)
levelset = to_numpy(levelset)
release_points = to_numpy(release_points)
## Creaet mask based on binary_map and levelset
mask = np.logical_or(zeros_map < self.criteria, levelset >= 0.)
imgs = 10**imgs
imgs = np.where(mask, -1, imgs) * self.clip
n_samples = len(imgs)
n_rows = n_samples // n_cols
fig, axes = plt.subplots(n_rows, n_cols, figsize=(24,24), subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in np.ndenumerate(axes.ravel()):
masked_imgs = ma.masked_where(imgs[i] <= 0, imgs[i])
im = ax.imshow(levelset[i] < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = ax.imshow(masked_imgs, cmap=self.cmap, origin='lower', extent=self.extent, alpha=self.alpha, norm=LogNorm())
# Add source locations
x_, y_ = release_points[i]
ax.plot(x_, y_, '*', markersize=10)
# Set title and filename
title = f'{name} (epoch = {epoch:03})'
filename = f'{mode}_{name}_refine{i_precision}_epoch{epoch:03}.png'
fig.colorbar(im, ax=axes.ravel().tolist())
fig.suptitle(title, **self.title_font, y=0.9)
fig_dir = self.out_dir / mode
fig.savefig(fig_dir / filename)
plt.close('all')
class _CityTransformerInverseImageSaver(_BaseSaver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vmin = kwargs.get('vmin', 0)
self.vmax = kwargs.get('vmax', 2000)
self.cmap = kwargs.get('cmap', 'seismic')
mpl.style.use('classic')
self.fontsize = 36
self.fontname = 'Times New Roman'
plt.rc('xtick', labelsize=self.fontsize)
plt.rc('ytick', labelsize=self.fontsize)
plt.rc('font', family=self.fontname)
self.title_font = {'fontname':self.fontname, 'size':self.fontsize, 'color':'black',
'verticalalignment':'bottom'}
self.axis_font = {'fontname':self.fontname, 'size':self.fontsize}
self.cmap = 'seismic'
self.xmax = 1024
self.ymax = 1024
self.alpha = 0.5
self.extent = [-self.xmax, self.xmax, -self.ymax, self.ymax]
def _save(self, *args, **kwargs):
levelset = kwargs.get('levelset')
release_points = kwargs.get('release_points')
ref = kwargs.get('ref')
pred = kwargs.get('pred')
mode = kwargs.get('mode')
epoch = kwargs.get('epoch')
n_cols = kwargs.get('n_cols', 4)
data_dict = {'ref': ref,
'pred': pred,}
for name, data in data_dict.items():
self.__save_images(levelset, release_points, data, mode, n_cols, name, epoch)
def __save_images(self, levelset, release_points, imgs, mode, n_cols, name, epoch):
imgs = to_numpy(imgs)
levelset = to_numpy(levelset)
release_points = to_numpy(release_points)
n_samples = len(imgs)
n_rows = n_samples // n_cols
fig, axes = plt.subplots(n_rows, n_cols, figsize=(24,24), subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in np.ndenumerate(axes.ravel()):
im = ax.imshow(levelset[i] < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = ax.imshow(imgs[i], cmap=self.cmap, origin='lower', extent=self.extent, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax)
# Add source locations
x_, y_ = release_points[i]
# Estimated
x_pred, y_pred = self.__pred_source_location(imgs[i])
ax.plot(x_, y_, color='none', marker='*', markeredgecolor='r', markeredgewidth=2, markersize=12)
ax.plot(x_pred, y_pred, color='none', marker='^', markeredgecolor='b', markeredgewidth=2, markersize=12)
title = f'{name} (epoch = {epoch:03})'
filename = f'{mode}_{name}_epoch{epoch}.png'
fig.colorbar(im, ax=axes.ravel().tolist())
fig.suptitle(title, **self.title_font, y=0.9)
fig_dir = self.out_dir / mode
fig.savefig(fig_dir / filename)
plt.close('all')
def __pred_source_location(self, distance_function):
# Get the index where the distance function takes the minimum value
ny, nx = distance_function.shape
x, y = np.linspace(-self.xmax, self.xmax, nx), np.linspace(-self.ymax, self.ymax, ny)
idx_y, idx_x = np.unravel_index(np.argmin(distance_function, axis=None), (ny, nx))
return x[idx_x], y[idx_y]
| [
"xarray.open_mfdataset",
"matplotlib.use",
"numpy.where",
"numpy.logical_or",
"numpy.ma.masked_where",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.style.use",
"numpy.argmin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"matplotlib.colors.LogNorm"
] | [((100, 114), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (107, 114), True, 'import matplotlib as mpl\n'), ((320, 344), 'matplotlib.style.use', 'mpl.style.use', (['"""classic"""'], {}), "('classic')\n", (333, 344), True, 'import matplotlib as mpl\n'), ((400, 435), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'fontsize'}), "('xtick', labelsize=fontsize)\n", (406, 435), True, 'import matplotlib.pyplot as plt\n'), ((440, 475), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'fontsize'}), "('ytick', labelsize=fontsize)\n", (446, 475), True, 'import matplotlib.pyplot as plt\n'), ((480, 511), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': 'fontname'}), "('font', family=fontname)\n", (486, 511), True, 'import matplotlib.pyplot as plt\n'), ((804, 901), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['result_files'], {'concat_dim': '"""epochs"""', 'compat': '"""no_conflicts"""', 'combine': '"""nested"""'}), "(result_files, concat_dim='epochs', compat='no_conflicts',\n combine='nested')\n", (821, 901), True, 'import xarray as xr\n'), ((1147, 1177), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (1159, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1723), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1716, 1723), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2407), 'matplotlib.style.use', 'mpl.style.use', (['"""classic"""'], {}), "('classic')\n", (2396, 2407), True, 'import matplotlib as mpl\n'), ((2486, 2526), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'self.fontsize'}), "('xtick', labelsize=self.fontsize)\n", (2492, 2526), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2575), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'self.fontsize'}), "('ytick', labelsize=self.fontsize)\n", (2541, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2620), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': 'self.fontname'}), "('font', family=self.fontname)\n", (2590, 2620), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4179), 'numpy.logical_or', 'np.logical_or', (['(zeros_map < self.criteria)', '(levelset >= 0.0)'], {}), '(zeros_map < self.criteria, levelset >= 0.0)\n', (4135, 4179), True, 'import numpy as np\n'), ((5336, 5352), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5345, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5660), 'matplotlib.style.use', 'mpl.style.use', (['"""classic"""'], {}), "('classic')\n", (5649, 5660), True, 'import matplotlib as mpl\n'), ((5739, 5779), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'self.fontsize'}), "('xtick', labelsize=self.fontsize)\n", (5745, 5779), True, 'import matplotlib.pyplot as plt\n'), ((5788, 5828), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'self.fontsize'}), "('ytick', labelsize=self.fontsize)\n", (5794, 5828), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5873), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': 'self.fontname'}), "('font', family=self.fontname)\n", (5843, 5873), True, 'import matplotlib.pyplot as plt\n'), ((8356, 8372), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8365, 8372), True, 'import matplotlib.pyplot as plt\n'), ((4218, 4242), 'numpy.where', 'np.where', (['mask', '(-1)', 'imgs'], {}), '(mask, -1, imgs)\n', (4226, 4242), True, 'import numpy as np\n'), ((4582, 4620), 'numpy.ma.masked_where', 'ma.masked_where', (['(imgs[i] <= 0)', 'imgs[i]'], {}), '(imgs[i] <= 0, imgs[i])\n', (4597, 4620), False, 'from numpy import ma\n'), ((8563, 8601), 'numpy.linspace', 'np.linspace', (['(-self.xmax)', 'self.xmax', 'nx'], {}), '(-self.xmax, self.xmax, nx)\n', (8574, 8601), True, 'import numpy as np\n'), ((8603, 8641), 'numpy.linspace', 'np.linspace', (['(-self.ymax)', 'self.ymax', 'ny'], {}), '(-self.ymax, self.ymax, ny)\n', (8614, 8641), True, 'import numpy as np\n'), ((8682, 8721), 'numpy.argmin', 'np.argmin', (['distance_function'], {'axis': 'None'}), '(distance_function, axis=None)\n', (8691, 8721), True, 'import numpy as np\n'), ((4853, 4862), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (4860, 4862), False, 'from matplotlib.colors import LogNorm\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from scipy import sparse
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import matplotlib.colors as colors
from matplotlib import cm
def load_detected_cartels(years, cartel_dir):
cartel_table_list = []
group_id_offset = 0
for year in years:
cartel_table = pd.read_csv(
"{root}/cartels-{year}.csv".format(root=cartel_dir, year=year), sep="\t"
)
cartel_table["year"] = year
cartel_table["group_id"] += group_id_offset
group_id_offset = np.max(cartel_table["group_id"].values) + 1
cartel_table_list += [cartel_table]
cartel_table = pd.concat(cartel_table_list, ignore_index=True)
return cartel_table
if __name__ == "__main__":
CARTEL_DIR = sys.argv[1]
OUTPUT = sys.argv[2]
cartel_table = load_detected_cartels(np.arange(2000, 2020), CARTEL_DIR)
# Count the number of detected groups in each year
num_cartel = (
cartel_table.groupby("year")
.apply(lambda x: x[["group_id"]].drop_duplicates().shape[0])
.reset_index()
.rename(columns={0: "num_cartel"})
)
# Compute the size
cartel_sz = (
cartel_table.groupby(["year", "group_id"])
.apply(lambda x: x.shape[0])
.reset_index()
.rename(columns={0: "sz"})
)
# Compute the maximum size for each year
maxsz = (
cartel_sz.groupby("year")
.apply(lambda dg: dg["sz"].max())
.reset_index()
.rename(columns={0: "sz"})
)
maxsz["year"] = maxsz["year"] - 2000
# Set up the canvas
sns.set_style("white")
sns.set(font_scale=1.5)
sns.set_style("ticks")
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))
#
# Plot the number of cartels in each year
#
ax = sns.barplot(
data=num_cartel,
x="year",
y="num_cartel",
color=sns.color_palette("Set1").as_hex()[1],
ax=axes[0],
)
# Labels
ax.set_ylabel("Number of detected cartels")
ax.set_xlabel("Year")
# Ticks
ax.set_xticks(np.arange(0, 20, 2))
ax.set_xticklabels(["`%02d" % d for d in np.arange(2000, 2020, 2) - 2000])
#
# Plot the size of cartels detected in each year
#
ax = sns.boxplot(data=cartel_sz, x="year", y="sz",)
# Remove colors of the boxes
for i, box in enumerate(ax.artists):
box.set_edgecolor("black")
box.set_facecolor("white")
for j in range(6 * i, 6 * (i + 1)):
ax.lines[j].set_color("black")
ax.set_yscale("log")
ax.set_ylim(bottom=1, top=100)
ax.set_ylabel("Number of journals in a cartel")
ax.set_xlabel("Year")
ax.set_xticks(np.arange(0, 20, 2))
ax.set_xticklabels(["`%02d" % d for d in np.arange(2000, 2020, 2) - 2000])
# Annotate
axes[0].annotate('(a)', xy=(0.01, 0.9), textcoords = "axes fraction", fontsize = 30)
axes[1].text(0.01, 0.9, '(b)', transform = axes[1].transAxes, fontsize = 30)
plt.savefig(OUTPUT, bbox_inches="tight", dpi=300)
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"seaborn.color_palette",
"numpy.max",
"seaborn.set_style",
"seaborn.boxplot",
"pandas.concat",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((703, 750), 'pandas.concat', 'pd.concat', (['cartel_table_list'], {'ignore_index': '(True)'}), '(cartel_table_list, ignore_index=True)\n', (712, 750), True, 'import pandas as pd\n'), ((1653, 1675), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (1666, 1675), True, 'import seaborn as sns\n'), ((1680, 1703), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.5)'}), '(font_scale=1.5)\n', (1687, 1703), True, 'import seaborn as sns\n'), ((1708, 1730), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (1721, 1730), True, 'import seaborn as sns\n'), ((1748, 1795), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(15, 5)'}), '(nrows=1, ncols=2, figsize=(15, 5))\n', (1760, 1795), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2361), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'cartel_sz', 'x': '"""year"""', 'y': '"""sz"""'}), "(data=cartel_sz, x='year', y='sz')\n", (2327, 2361), True, 'import seaborn as sns\n'), ((3052, 3101), 'matplotlib.pyplot.savefig', 'plt.savefig', (['OUTPUT'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(OUTPUT, bbox_inches='tight', dpi=300)\n", (3063, 3101), True, 'import matplotlib.pyplot as plt\n'), ((901, 922), 'numpy.arange', 'np.arange', (['(2000)', '(2020)'], {}), '(2000, 2020)\n', (910, 922), True, 'import numpy as np\n'), ((2141, 2160), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (2150, 2160), True, 'import numpy as np\n'), ((2761, 2780), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (2770, 2780), True, 'import numpy as np\n'), ((596, 635), 'numpy.max', 'np.max', (["cartel_table['group_id'].values"], {}), "(cartel_table['group_id'].values)\n", (602, 635), True, 'import numpy as np\n'), ((2207, 2231), 'numpy.arange', 'np.arange', (['(2000)', '(2020)', '(2)'], {}), '(2000, 2020, 2)\n', (2216, 2231), True, 'import numpy as np\n'), ((2827, 2851), 'numpy.arange', 'np.arange', (['(2000)', '(2020)', '(2)'], {}), '(2000, 2020, 2)\n', (2836, 2851), True, 'import numpy as np\n'), ((1958, 1983), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {}), "('Set1')\n", (1975, 1983), True, 'import seaborn as sns\n')] |
import numpy as np
import spectral
from matplotlib import pyplot as plt
# minimum noise filter
def MNF(hydata, output_bands=20, denoise_bands=40, band_range=None, inplace=False):
"""
Apply a minimum noise filter to a hyperspectral image.
*Arguments*:
- hydata = A HyData instance containing the source dataset (e.g. image or point cloud).
- output_bands = the number of bands to keep after MNF (dimensionality reduction). Default is 20.
- denoise_bands = number of high-noise bands to treat as noise for denoising.
- band_range = the spectral range to perform the MNF over. If (int,int) is passed then the values are treated as
min/max band IDs, if (float,float) is passed then values are treated as wavelenghts (in nm). If None is
passed (default) then the MNF is computed using all bands. Note that wavelengths can only be passed
if image is a hyImage object.
- inplace = True if the original image should be denoised based on the MNF transform. Default is False.
*Returns*:
- mnf = a HyData instance containing the MNF bands.Note that only bands 0:*out_bands* will be kept in this dataset.
- factors = A 2D numpy array containing the factors applied to the input datset. Useful
for plotting/interpreting the regions each MNF band is sensitive too.
"""
# prepare data for MNF
wav = hydata.get_wavelengths()
decomp = False
if hydata.is_int():
hydata.decompress() #MNF doesn't work very well with ints....
decomp=True #so we can compress again afterwards
data = hydata.data
# get range of bands to include in calculation
if band_range is None: # default to all bands
minb = 0
maxb = data.shape[-1]
else:
minb = hydata.get_band_index( band_range[0] )
maxb = hydata.get_band_index( band_range[1] )
assert minb < maxb, "Error - invalid range... band_range[0] > band_range[1]??"
assert minb < data.shape[-1], "Error - band_range[0] out of range."
if maxb == -1 or maxb > data.shape[-1]: maxb = data.shape[-1]
# remove invalid bands
valid_bands = []
for b in range(minb,maxb):
if np.isfinite(data[..., b]).any() \
and not (np.nanmax(data[..., b]) == 0).all():
valid_bands.append(b)
#remove invalid bands
data = np.array(data[..., valid_bands])
# warn if bands have negative values...
if np.nanmin(data) < 0.0:
print("Warning - image contains negative pixels. This can cause unstable behaviour...")
# calculate signal stats (as in spectral.calc_stats(...) but allowing for nans)
X = data.reshape(-1, data.shape[-1]).T # reshape to 1D list of pixels for each band
X = X[:, np.isfinite(np.sum(X, axis=0))] # drop columns containing nans
X = X[:, np.sum(X, axis=0) > 0 ] #drop columns containing all zeros
mean = np.mean(X, axis = 1)
cov = np.cov(X)
n = X.shape[1]
signal = spectral.GaussianStats(mean, cov, n)
# calculate noise as per spectral.noise_from_diffs (but allowing for nans)
if len(data.shape) == 3: # image data
deltas = data[:-1, :-1, :] - data[1:, 1:, :] #estimate noise by subtracting adjacent pixels
elif len(data.shape) == 2: #point cloud data
deltas = data[:-1, :] - data[1:, :] # estimate noise by subtracting adjacent points
X = deltas.reshape(-1, deltas.shape[-1]).T
X = X[:, np.isfinite(np.sum(X, axis=0))] # drop columns containing nans
X = X[:, np.sum(X, axis=0) > 0] # drop columns containing all zeros
X = X[:, np.sum(X, axis=0) < np.nanpercentile( np.sum(X,axis=0), 50) ] #drop high noise data (these relate to edges)
mean = np.mean(X, axis=1)
cov = np.cov(X)
n = X.shape[1]
noise = spectral.GaussianStats(mean, cov, n)
mnfr = spectral.mnf(signal, noise)
# reduce bands
reduced = mnfr.reduce(data, num=output_bands)
#apply sign correction so there are less positive pixels than negative ones (sign is aribrary, helps maintain
#consistency for plotting etc. by having low-valued background with some high-value regions (<50%)
sign = np.nanmedian(reduced / np.abs(reduced)) #n.b. this will always be 1.0 or -1.0
assert np.isfinite(sign), "Weird error - no non-nan values in MNF result?"
reduced *= sign
# denoise and export
denoise = mnfr.denoise(data, num=denoise_bands)
#update original image bands?
if inplace:
data[..., valid_bands] = denoise
#calculate factors (for determining "important" bands)
# noinspection PyProtectedMember
factors = sign*mnfr.get_reduction_transform(num=output_bands)._A
if not wav is None:
wav = wav[valid_bands]
#compress input dataset (so we don't change it)
if decomp:
hydata.compress()
#prepare output
out = hydata.copy(data=False)
out.header.drop_all_bands() # drop band specific attributes
out.data = reduced
out.push_to_header()
return out, factors
def plotMNF(data, n, factors, wavelengths=None, flip=False, **kwds):
"""
Utility function for plotting minimum noise fractions and their associated band weights.
*Arguments*:
- data = a HyData instance containing the MNF.
- n = the nth mininimum noise fraction will be plotted.
- factors = the factors array returned by MNF( ... ).
- wavelength = the wavelengths corresponding to each factor. If None (default) indices are used instead.
- flip = True if the sign of the minimum noise fraction and associated weights should be flipped. Default is False.
*Keywords*:
- cam = a camera object if data is a HyCloud instance. By default the header file will be searched for cameras.
- other keywords are passed to HyData.quick_plot( ... ).
*Returns*:
- fig, ax = the figure and list of associated axes.
"""
sign = 1
if flip:
sign = -1
assert data.is_image() or data.is_point(), "Error - MNF data instance must be a HyImage or HyCloud."
# create plot of mnf band
data.data[..., n] *= sign # flip sign if needed
kwds['vmin'] = kwds.get('vmin', np.nanpercentile(data.data[..., n], 1))
kwds['vmax'] = kwds.get('vmax', np.nanpercentile(data.data[..., n], 99))
if data.is_image(): # plot image
aspx = data.aspx()
fig, ax = plt.subplots(1, 2, figsize=(18 * 1.15, 18 * aspx),
gridspec_kw={'width_ratios': [10, 1], 'wspace': -0.11})
data.quick_plot(n, ax=ax[0], **kwds)
else: # plot point cloud
kwds['cam'] = kwds.get("cam", data.header.get_camera())
cam = kwds['cam']
assert cam is not None, "Error - no valid camera object found. Try passing 'cam' as a keyword."
aspx = cam.dims[1] / cam.dims[0]
fig, ax = plt.subplots(1, 2, figsize=(18 * 1.15, 18 * aspx),
gridspec_kw={'width_ratios': [10, 1], 'wspace': -0.11})
data.quick_plot(bands=n, ax=ax[0],**kwds)
ax[0].set_xticks([])
ax[0].set_yticks([])
data.data[..., n] *= sign # flip sign back
# plot component weights
if wavelengths is None:
wavelengths = np.arange( factors[n].shape )
assert wavelengths.shape[0] == factors[n].shape[0], "Error - number of wavelengths (%d) != number of factors (%d) " \
% (wavelengths.shape[0], factors[n].shape[0])
ax[1].fill(np.hstack([[0], factors[n]*sign, [0]]),
np.hstack([wavelengths[0], wavelengths, wavelengths[-1]]),
color='k', alpha=0.2)
ax[1].plot(factors[n]*sign, wavelengths, color='k')
ax[1].axvline(0, color='k')
ax[1].set_title("Band weights")
ax[1].set_xticks([])
ax[1].yaxis.tick_right()
fig.subplots_adjust(wspace=None, hspace=None)
fig.show()
return fig, ax | [
"numpy.mean",
"numpy.abs",
"numpy.nanpercentile",
"numpy.hstack",
"numpy.array",
"spectral.GaussianStats",
"numpy.sum",
"numpy.isfinite",
"numpy.nanmax",
"numpy.nanmin",
"numpy.cov",
"matplotlib.pyplot.subplots",
"numpy.arange",
"spectral.mnf"
] | [((2395, 2427), 'numpy.array', 'np.array', (['data[..., valid_bands]'], {}), '(data[..., valid_bands])\n', (2403, 2427), True, 'import numpy as np\n'), ((2933, 2951), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (2940, 2951), True, 'import numpy as np\n'), ((2964, 2973), 'numpy.cov', 'np.cov', (['X'], {}), '(X)\n', (2970, 2973), True, 'import numpy as np\n'), ((3006, 3042), 'spectral.GaussianStats', 'spectral.GaussianStats', (['mean', 'cov', 'n'], {}), '(mean, cov, n)\n', (3028, 3042), False, 'import spectral\n'), ((3737, 3755), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (3744, 3755), True, 'import numpy as np\n'), ((3766, 3775), 'numpy.cov', 'np.cov', (['X'], {}), '(X)\n', (3772, 3775), True, 'import numpy as np\n'), ((3807, 3843), 'spectral.GaussianStats', 'spectral.GaussianStats', (['mean', 'cov', 'n'], {}), '(mean, cov, n)\n', (3829, 3843), False, 'import spectral\n'), ((3856, 3883), 'spectral.mnf', 'spectral.mnf', (['signal', 'noise'], {}), '(signal, noise)\n', (3868, 3883), False, 'import spectral\n'), ((4272, 4289), 'numpy.isfinite', 'np.isfinite', (['sign'], {}), '(sign)\n', (4283, 4289), True, 'import numpy as np\n'), ((2480, 2495), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (2489, 2495), True, 'import numpy as np\n'), ((6176, 6214), 'numpy.nanpercentile', 'np.nanpercentile', (['data.data[..., n]', '(1)'], {}), '(data.data[..., n], 1)\n', (6192, 6214), True, 'import numpy as np\n'), ((6252, 6291), 'numpy.nanpercentile', 'np.nanpercentile', (['data.data[..., n]', '(99)'], {}), '(data.data[..., n], 99)\n', (6268, 6291), True, 'import numpy as np\n'), ((6375, 6486), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(18 * 1.15, 18 * aspx)', 'gridspec_kw': "{'width_ratios': [10, 1], 'wspace': -0.11}"}), "(1, 2, figsize=(18 * 1.15, 18 * aspx), gridspec_kw={\n 'width_ratios': [10, 1], 'wspace': -0.11})\n", (6387, 6486), True, 'from matplotlib import pyplot as plt\n'), ((6840, 6951), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(18 * 1.15, 18 * aspx)', 'gridspec_kw': "{'width_ratios': [10, 1], 'wspace': -0.11}"}), "(1, 2, figsize=(18 * 1.15, 18 * aspx), gridspec_kw={\n 'width_ratios': [10, 1], 'wspace': -0.11})\n", (6852, 6951), True, 'from matplotlib import pyplot as plt\n'), ((7206, 7233), 'numpy.arange', 'np.arange', (['factors[n].shape'], {}), '(factors[n].shape)\n', (7215, 7233), True, 'import numpy as np\n'), ((7475, 7515), 'numpy.hstack', 'np.hstack', (['[[0], factors[n] * sign, [0]]'], {}), '([[0], factors[n] * sign, [0]])\n', (7484, 7515), True, 'import numpy as np\n'), ((7530, 7587), 'numpy.hstack', 'np.hstack', (['[wavelengths[0], wavelengths, wavelengths[-1]]'], {}), '([wavelengths[0], wavelengths, wavelengths[-1]])\n', (7539, 7587), True, 'import numpy as np\n'), ((4206, 4221), 'numpy.abs', 'np.abs', (['reduced'], {}), '(reduced)\n', (4212, 4221), True, 'import numpy as np\n'), ((2798, 2815), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2804, 2815), True, 'import numpy as np\n'), ((2863, 2880), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2869, 2880), True, 'import numpy as np\n'), ((3480, 3497), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3486, 3497), True, 'import numpy as np\n'), ((3545, 3562), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3551, 3562), True, 'import numpy as np\n'), ((3618, 3635), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3624, 3635), True, 'import numpy as np\n'), ((2227, 2252), 'numpy.isfinite', 'np.isfinite', (['data[..., b]'], {}), '(data[..., b])\n', (2238, 2252), True, 'import numpy as np\n'), ((3656, 3673), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3662, 3673), True, 'import numpy as np\n'), ((2286, 2309), 'numpy.nanmax', 'np.nanmax', (['data[..., b]'], {}), '(data[..., b])\n', (2295, 2309), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for tf op module"""
import tempfile
import os
import logging
import tensorflow as tf
import numpy as np
import tvm
from tvm import te
from tvm.contrib import tf_op
def test_use_tvmdso_op():
"""main test function"""
def export_cpu_add_lib():
"""create cpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name='ph_a')
ph_b = te.placeholder((n,), name='ph_b')
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name='ph_c')
sched = te.create_schedule(ph_c.op)
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "c", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def export_gpu_add_lib():
"""create gpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name='ph_a')
ph_b = te.placeholder((n,), name='ph_b')
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name='ph_c')
sched = te.create_schedule(ph_c.op)
b_axis, t_axis = sched[ph_c].split(ph_c.op.axis[0], factor=64)
sched[ph_c].bind(b_axis, te.thread_axis("blockIdx.x"))
sched[ph_c].bind(t_axis, te.thread_axis("threadIdx.x"))
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "cuda", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_cuda_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def test_add(session, lib_path, tf_device):
"""test add lib with TensorFlow wrapper"""
module = tf_op.OpModule(lib_path)
left = tf.placeholder("float32", shape=[4])
right = tf.placeholder("float32", shape=[4])
feed_dict = {left: [1.0, 2.0, 3.0, 4.0], right: [5.0, 6.0, 7.0, 8.0]}
expect = np.asarray([6.0, 8.0, 10.0, 12.0])
add1 = module.func("vector_add", output_shape=[4], output_dtype="float")
add2 = module.func("vector_add", output_shape=tf.shape(left), output_dtype="float")
add3 = module.func("vector_add", output_shape=[tf.shape(left)[0]], output_dtype="float")
with tf.device(tf_device):
output1 = session.run(add1(left, right), feed_dict)
np.testing.assert_equal(output1, expect)
output2 = session.run(add2(left, right), feed_dict)
np.testing.assert_equal(output2, expect)
output3 = session.run(add3(left, right), feed_dict)
np.testing.assert_equal(output3, expect)
def cpu_test(session):
"""test function for cpu"""
cpu_lib = None
try:
cpu_lib = export_cpu_add_lib()
test_add(session, cpu_lib, "/cpu:0")
finally:
if cpu_lib is not None:
os.remove(cpu_lib)
def gpu_test(session):
"""test function for gpu"""
gpu_lib = None
try:
gpu_lib = export_gpu_add_lib()
test_add(session, gpu_lib, "/gpu:0")
finally:
if gpu_lib is not None:
os.remove(gpu_lib)
with tf.Session() as session:
if tvm.runtime.enabled("cpu"):
logging.info("Test TensorFlow op on cpu kernel")
cpu_test(session)
if tvm.runtime.enabled("gpu"):
logging.info("Test TensorFlow op on gpu kernel")
gpu_test(session)
if __name__ == "__main__":
test_use_tvmdso_op()
| [
"tensorflow.device",
"tensorflow.shape",
"tvm.te.var",
"numpy.testing.assert_equal",
"tensorflow.placeholder",
"tensorflow.Session",
"tvm.te.create_schedule",
"numpy.asarray",
"tvm.te.thread_axis",
"tvm.te.placeholder",
"tempfile.mktemp",
"tvm.build",
"tvm.contrib.tf_op.OpModule",
"tvm.te.... | [((1123, 1134), 'tvm.te.var', 'te.var', (['"""n"""'], {}), "('n')\n", (1129, 1134), False, 'from tvm import te\n'), ((1150, 1183), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""ph_a"""'}), "((n,), name='ph_a')\n", (1164, 1183), False, 'from tvm import te\n'), ((1199, 1232), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""ph_b"""'}), "((n,), name='ph_b')\n", (1213, 1232), False, 'from tvm import te\n'), ((1248, 1312), 'tvm.te.compute', 'te.compute', (['ph_a.shape', '(lambda i: ph_a[i] + ph_b[i])'], {'name': '"""ph_c"""'}), "(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name='ph_c')\n", (1258, 1312), False, 'from tvm import te\n'), ((1329, 1356), 'tvm.te.create_schedule', 'te.create_schedule', (['ph_c.op'], {}), '(ph_c.op)\n', (1347, 1356), False, 'from tvm import te\n'), ((1378, 1438), 'tvm.build', 'tvm.build', (['sched', '[ph_a, ph_b, ph_c]', '"""c"""'], {'name': '"""vector_add"""'}), "(sched, [ph_a, ph_b, ph_c], 'c', name='vector_add')\n", (1387, 1438), False, 'import tvm\n'), ((1458, 1491), 'tempfile.mktemp', 'tempfile.mktemp', (['"""tvm_add_dll.so"""'], {}), "('tvm_add_dll.so')\n", (1473, 1491), False, 'import tempfile\n'), ((1640, 1651), 'tvm.te.var', 'te.var', (['"""n"""'], {}), "('n')\n", (1646, 1651), False, 'from tvm import te\n'), ((1667, 1700), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""ph_a"""'}), "((n,), name='ph_a')\n", (1681, 1700), False, 'from tvm import te\n'), ((1716, 1749), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""ph_b"""'}), "((n,), name='ph_b')\n", (1730, 1749), False, 'from tvm import te\n'), ((1765, 1829), 'tvm.te.compute', 'te.compute', (['ph_a.shape', '(lambda i: ph_a[i] + ph_b[i])'], {'name': '"""ph_c"""'}), "(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name='ph_c')\n", (1775, 1829), False, 'from tvm import te\n'), ((1846, 1873), 'tvm.te.create_schedule', 'te.create_schedule', (['ph_c.op'], {}), '(ph_c.op)\n', (1864, 1873), False, 'from tvm import te\n'), ((2093, 2156), 'tvm.build', 'tvm.build', (['sched', '[ph_a, ph_b, ph_c]', '"""cuda"""'], {'name': '"""vector_add"""'}), "(sched, [ph_a, ph_b, ph_c], 'cuda', name='vector_add')\n", (2102, 2156), False, 'import tvm\n'), ((2176, 2214), 'tempfile.mktemp', 'tempfile.mktemp', (['"""tvm_add_cuda_dll.so"""'], {}), "('tvm_add_cuda_dll.so')\n", (2191, 2214), False, 'import tempfile\n'), ((2401, 2425), 'tvm.contrib.tf_op.OpModule', 'tf_op.OpModule', (['lib_path'], {}), '(lib_path)\n', (2415, 2425), False, 'from tvm.contrib import tf_op\n'), ((2442, 2478), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""'], {'shape': '[4]'}), "('float32', shape=[4])\n", (2456, 2478), True, 'import tensorflow as tf\n'), ((2495, 2531), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""'], {'shape': '[4]'}), "('float32', shape=[4])\n", (2509, 2531), True, 'import tensorflow as tf\n'), ((2628, 2662), 'numpy.asarray', 'np.asarray', (['[6.0, 8.0, 10.0, 12.0]'], {}), '([6.0, 8.0, 10.0, 12.0])\n', (2638, 2662), True, 'import numpy as np\n'), ((3895, 3907), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3905, 3907), True, 'import tensorflow as tf\n'), ((3931, 3957), 'tvm.runtime.enabled', 'tvm.runtime.enabled', (['"""cpu"""'], {}), "('cpu')\n", (3950, 3957), False, 'import tvm\n'), ((4061, 4087), 'tvm.runtime.enabled', 'tvm.runtime.enabled', (['"""gpu"""'], {}), "('gpu')\n", (4080, 4087), False, 'import tvm\n'), ((1978, 2006), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (1992, 2006), False, 'from tvm import te\n'), ((2041, 2070), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (2055, 2070), False, 'from tvm import te\n'), ((2948, 2968), 'tensorflow.device', 'tf.device', (['tf_device'], {}), '(tf_device)\n', (2957, 2968), True, 'import tensorflow as tf\n'), ((3046, 3086), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output1', 'expect'], {}), '(output1, expect)\n', (3069, 3086), True, 'import numpy as np\n'), ((3164, 3204), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output2', 'expect'], {}), '(output2, expect)\n', (3187, 3204), True, 'import numpy as np\n'), ((3282, 3322), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output3', 'expect'], {}), '(output3, expect)\n', (3305, 3322), True, 'import numpy as np\n'), ((3971, 4019), 'logging.info', 'logging.info', (['"""Test TensorFlow op on cpu kernel"""'], {}), "('Test TensorFlow op on cpu kernel')\n", (3983, 4019), False, 'import logging\n'), ((4101, 4149), 'logging.info', 'logging.info', (['"""Test TensorFlow op on gpu kernel"""'], {}), "('Test TensorFlow op on gpu kernel')\n", (4113, 4149), False, 'import logging\n'), ((2799, 2813), 'tensorflow.shape', 'tf.shape', (['left'], {}), '(left)\n', (2807, 2813), True, 'import tensorflow as tf\n'), ((3585, 3603), 'os.remove', 'os.remove', (['cpu_lib'], {}), '(cpu_lib)\n', (3594, 3603), False, 'import os\n'), ((3866, 3884), 'os.remove', 'os.remove', (['gpu_lib'], {}), '(gpu_lib)\n', (3875, 3884), False, 'import os\n'), ((2892, 2906), 'tensorflow.shape', 'tf.shape', (['left'], {}), '(left)\n', (2900, 2906), True, 'import tensorflow as tf\n')] |
import numpy as np
from id3 import id3
test_playtennis = np.array(
[
[0, 2, 1, 0, 0],
[0, 2, 1, 1, 0],
[1, 2, 1, 0, 1],
[2, 1, 1, 0, 1],
[2, 0, 0, 0, 1],
[2, 0, 0, 1, 0],
[1, 0, 0, 1, 1],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 1],
[2, 1, 0, 0, 1],
[0, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 2, 0, 0, 1],
[2, 1, 1, 1, 0],
]
)
test_nclasses = [3, 3, 2, 2]
data = test_playtennis[:, 0:4]
target = test_playtennis[:, 4]
tree = id3(data, target, test_nclasses)
tree.show()
| [
"numpy.array",
"id3.id3"
] | [((59, 320), 'numpy.array', 'np.array', (['[[0, 2, 1, 0, 0], [0, 2, 1, 1, 0], [1, 2, 1, 0, 1], [2, 1, 1, 0, 1], [2, 0,\n 0, 0, 1], [2, 0, 0, 1, 0], [1, 0, 0, 1, 1], [0, 1, 1, 0, 0], [0, 0, 0, \n 0, 1], [2, 1, 0, 0, 1], [0, 1, 0, 1, 1], [1, 1, 1, 1, 1], [1, 2, 0, 0, \n 1], [2, 1, 1, 1, 0]]'], {}), '([[0, 2, 1, 0, 0], [0, 2, 1, 1, 0], [1, 2, 1, 0, 1], [2, 1, 1, 0, 1\n ], [2, 0, 0, 0, 1], [2, 0, 0, 1, 0], [1, 0, 0, 1, 1], [0, 1, 1, 0, 0],\n [0, 0, 0, 0, 1], [2, 1, 0, 0, 1], [0, 1, 0, 1, 1], [1, 1, 1, 1, 1], [1,\n 2, 0, 0, 1], [2, 1, 1, 1, 0]])\n', (67, 320), True, 'import numpy as np\n'), ((533, 565), 'id3.id3', 'id3', (['data', 'target', 'test_nclasses'], {}), '(data, target, test_nclasses)\n', (536, 565), False, 'from id3 import id3\n')] |
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MS-SSIM metrics for image diversity evaluation.
More details could be found from section 5.3:
https://arxiv.org/pdf/1710.08446.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from compare_gan.src import image_similarity
import numpy as np
from six.moves import range
import tensorflow as tf
logging = tf.logging
def get_metric_function(generated_images, num_batches):
"""Get a fn returning the ms ssim score for generated images.
Args:
generated_images: TF Tensor of shape [batch_size, dim, dim, 3] which
evaluates to a batch of generated images. Should be in range [0..255].
num_eval_images: Number of (generated/ground_truth) images to evaluate.
Returns:
eval_fn: a function which takes a session as an argument and returns the
average ms ssim score among all the possible image pairs from
generated_images.
"""
batch_size = int(generated_images.get_shape()[0])
assert batch_size > 1
# Generate all possible image pairs from input set of imgs.
pair1 = tf.tile(generated_images, [batch_size, 1, 1, 1])
pair2 = tf.reshape(
tf.tile(generated_images, [1, batch_size, 1, 1]), [
batch_size * batch_size, generated_images.shape[1],
generated_images.shape[2], generated_images.shape[3]
])
# Compute the mean of the scores (but ignore the 'identical' images - which
# should get 1.0 from the MultiscaleSSIM)
score = tf.reduce_sum(image_similarity.MultiscaleSSIM(pair1,
pair2)) - batch_size
score = tf.div(score, batch_size * batch_size - batch_size)
# Define a function which wraps some session.run calls to generate a large
# number of images and compute multiscale ssim metric on them.
def eval_fn(session):
"""Function which wraps session.run calls to compute given metric."""
logging.info("Computing MS-SSIM score...")
scores = []
for _ in range(num_batches):
scores.append(session.run(score))
result = np.mean(scores)
return result
return eval_fn
| [
"tensorflow.div",
"tensorflow.tile",
"numpy.mean",
"six.moves.range",
"compare_gan.src.image_similarity.MultiscaleSSIM"
] | [((1700, 1748), 'tensorflow.tile', 'tf.tile', (['generated_images', '[batch_size, 1, 1, 1]'], {}), '(generated_images, [batch_size, 1, 1, 1])\n', (1707, 1748), True, 'import tensorflow as tf\n'), ((2236, 2287), 'tensorflow.div', 'tf.div', (['score', '(batch_size * batch_size - batch_size)'], {}), '(score, batch_size * batch_size - batch_size)\n', (2242, 2287), True, 'import tensorflow as tf\n'), ((1777, 1825), 'tensorflow.tile', 'tf.tile', (['generated_images', '[1, batch_size, 1, 1]'], {}), '(generated_images, [1, batch_size, 1, 1])\n', (1784, 1825), True, 'import tensorflow as tf\n'), ((2605, 2623), 'six.moves.range', 'range', (['num_batches'], {}), '(num_batches)\n', (2610, 2623), False, 'from six.moves import range\n'), ((2679, 2694), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2686, 2694), True, 'import numpy as np\n'), ((2110, 2155), 'compare_gan.src.image_similarity.MultiscaleSSIM', 'image_similarity.MultiscaleSSIM', (['pair1', 'pair2'], {}), '(pair1, pair2)\n', (2141, 2155), False, 'from compare_gan.src import image_similarity\n')] |
import random
from kaggle_environments.envs.halite.helpers import *
import numpy as np
###################
# Helper Function #
###################
def index_to_position(index: int, size: int):
"""
Converts an index in the observation.halite list to a 2d position in the form (x, y).
"""
y, x = divmod(index, size)
return Point(x, (size - y - 1))
# TODO: refactor for vectorized calculation
def cal_dis(x, y):
"""
Calculate Manhattan Distance for two points
"""
return sum(abs(x - y))
def estimate_gain(halite, dis, t, collect_rate=0.25, regen_rate=0.02):
"""
Calculate halite gain for given number of turn.
"""
if dis >= t:
return 0
else:
# Halite will regenerate before ship arrives
new_halite = halite * (1 + regen_rate) ** max(0, dis - 1)
# Ship costs (dis) rounds to arrive destination, uses (t - dis) rounds to collect halite
return new_halite * (1 - (1 - collect_rate) ** (t - dis))
def unify_pos(pos, size):
"""
Convert position into standard one.
Example: Given size = 5, Point(-2, -7) -> Point(3, 3)
"""
return pos % size
def get_shorter_move(move, size):
"""
Given one dimension move (x or y), return the shorter move comparing with opposite move.
The Board is actually round, ship can move to destination by any direction.
Example: Given board size = 5, move = 3, opposite_move = -2, return -2 since abs(-2) < abs(3).
"""
if move == 0:
return 0
elif move > 0:
opposite_move = move - size
else:
opposite_move = move + size
return min([move, opposite_move], key=abs)
#############
# Bot Class #
#############
class SilverBot:
def __init__(self, obs, config):
self.obs = obs
self.config = config
self.board = Board(obs, config)
self.size = config.size
self.me = self.board.current_player
self.SHIP_ACTION_DICT = {
(1, 0): ShipAction.EAST,
(-1, 0): ShipAction.WEST,
(0, 1): ShipAction.NORTH,
(0, -1): ShipAction.SOUTH,
(0, 0): None,
}
# TODO: legacy
self.halite_map = None
self.unit_map = None
self.unit_radar = {}
self.radar_params = {}
self.ship_state = {}
self.ship_next_pos = set()
self.ship_wait_log = {}
# TODO: legacy
def get_map(self):
"""
In the beginning of each turn, update halite & unit map.
"""
# Rotate the halite map so that 2-D Point can be used as an array index
self.halite_map = np.rot90(np.reshape(self.board.observation['halite'], (self.size, self.size)), k=3)
# Initialize unit map with all zeros
self.unit_map = np.zeros((self.size, self.size))
for i, (_, shipyards, ships) in enumerate(self.board.observation['players']):
if i == self.me.id:
for index in shipyards.values():
self.unit_map[index_to_position(index, self.size)] += 2
for index, _ in ships.values():
self.unit_map[index_to_position(index, self.size)] += 1
else:
for index in shipyards.values():
self.unit_map[index_to_position(index, self.size)] += -2
for index, _ in ships.values():
self.unit_map[index_to_position(index, self.size)] += -1
# TODO: refactor for efficiency
def radar(self, unit: Union[Ship, Shipyard], dis: int = 2):
"""
Radar Scanning for ship & shipyard.
Gather information of [ally, enemy, halite, free halite].
Note: free halite is available halite here, which is estimated gain given number of turns in free area.
Args:
unit: Ship or shipyard
dis: Manhattan Distance for radar scanning
"""
pos = unit.position
halite, free_halite = {}, {}
ally_ship, ally_shipyard = [], []
enemy_ship, enemy_shipyard = [], []
# Start scanning
for x in range(-dis, dis + 1):
for y in range(abs(x) - dis, dis - abs(x) + 1):
scan_pos = unify_pos(pos + (x, y), self.size)
scan_cell = self.board[scan_pos]
halite[scan_pos] = scan_cell.halite
if scan_cell.ship:
if scan_cell.ship.player == self.me:
if scan_pos != pos:
ally_ship.append(scan_cell.position)
else:
# scan_pos == pos, add ship current position into free_halite.
free_halite[scan_pos] = estimate_gain(scan_cell.halite, dis=0, t=dis + 1)
else:
enemy_ship.append(scan_cell.position)
# Enemy ship with rich halite is considered as free_halite.
if isinstance(unit, Ship) and scan_cell.ship.halite > unit.halite:
free_halite[scan_pos] = estimate_gain(
scan_cell.halite, dis=0, t=dis + 1) + scan_cell.ship.halite
elif scan_cell.shipyard:
if scan_cell.shipyard.player == self.me:
ally_shipyard.append(scan_cell.position)
else:
enemy_shipyard.append(scan_cell.position)
else:
# Cell is empty, calculate estimated halite gain for (dis + 1) turns.
free_halite[scan_pos] = estimate_gain(scan_cell.halite, dis=cal_dis(pos, scan_pos), t=dis + 1)
self.unit_radar[unit.id] = {
'dis': dis,
'halite': halite,
# Note: Different with BronzeBot, the value is float instead of list.
'free_halite': free_halite,
'ally_ship': ally_ship,
'enemy_ship': enemy_ship,
'ally_shipyard': ally_shipyard,
'enemy_shipyard': enemy_shipyard,
}
def navigate(self, ship: Ship, des: Point):
"""
Navigate ship to destination, give out optimal action for current turn.
Args:
ship: Ship.
des: destination position.
"""
# There are actually 4 different paths, find out the shortest one.
move_x, move_y = unify_pos(des, self.size) - ship.position
move_x, move_y = get_shorter_move(move_x, self.size), get_shorter_move(move_y, self.size)
candidate_move = []
dangerous_move = []
wait_move = []
for move in [(np.sign(move_x), 0), (0, np.sign(move_y))]:
if move != (0, 0):
pos_access = self.case_analysis(ship, move)
if pos_access == 'MOVE':
candidate_move.append(move)
elif pos_access == 'DETOUR':
dangerous_move.append(move)
else:
wait_move.append(move)
# Case 1: Randomly choose an action in candidate_move.
# Case 2: Immediately make detour given dangerous_move.
# Case 3: Add WAIT order in candidate_move, so the ship has probability to detour or wait.
if candidate_move:
ship.next_action = self.SHIP_ACTION_DICT[random.choice(candidate_move)]
elif dangerous_move:
self.make_detour(ship, dangerous_move, wait_prob=0)
else:
self.make_detour(ship, wait_move, wait_prob=0.5)
def make_detour(self, ship, not_move_list, wait_prob: float = 0):
"""
Strategy_1: Randomly assign the ship with an available move action excluding from not_move_list.
Strategy_2(New): If there's no such move action and wait_prob = 0, check if ship.halite > ConvertCost.
If so then CONVERT, else leave the ship WAIT.
"""
candidate_move = []
for move in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
if move not in not_move_list:
if self.case_analysis(ship, move) == 'MOVE':
candidate_move.append(move)
if wait_prob == 0.5:
candidate_move += [(0, 0)] * len(candidate_move)
elif wait_prob != 0:
raise ValueError('Invalid wait_prob value, only 0 or 0.5 is allowed.')
# Randomly choose a move order for ship.
if candidate_move:
ship.next_action = self.SHIP_ACTION_DICT[random.choice(candidate_move)]
# If not able to move, then CONVERT.
elif wait_prob == 0 and ship.halite >= self.config.convertCost:
ship.next_action = ShipAction.CONVERT
self.ship_state[ship.id] = 'CONVERT'
def case_analysis(self, ship, move) -> str:
"""
Check if ship can move to next_pos.
Args:
ship: Ship
move: Tuple
Returns: True if next_pos is accessible.
"""
next_pos = unify_pos(ship.position + move, self.size)
next_cell = self.board[next_pos]
# Basic condition
# Check next_pos current occupation condition
cell_condition_1 = next_cell.ship is not None
cell_condition_2 = next_cell.shipyard is not None
# DETOUR
detour_case_1 = cell_condition_1 and next_cell.ship.player != self.me and next_cell.ship.halite <= ship.halite
detour_case_2 = cell_condition_2 and next_cell.shipyard.player != self.me
detour_condition = detour_case_1 or detour_case_2
# MOVE
# Check if there's any nearby enemy ship for next_pos
safe_condition = self.find_close_enemy(ship, dis=1, pos=next_pos) == []
# Check if next_pos is accessible next round
next_condition = next_cell.position not in self.ship_next_pos
# Move cases
move_case_1 = not cell_condition_1 and not cell_condition_2
move_case_2 = cell_condition_1 and not cell_condition_2 and next_cell.ship.player != self.me and next_cell.ship.halite > ship.halite
move_case_3 = not cell_condition_1 and cell_condition_2 and next_cell.shipyard.player == self.me
move_case_4 = cell_condition_1 and next_cell.ship.next_action is not None
move_cases = (move_case_1 or move_case_2 or move_case_3 or move_case_4)
move_condition = move_cases and safe_condition and next_condition
if detour_condition:
return 'DETOUR'
elif move_condition:
return 'MOVE'
else:
return 'WAIT'
def course_reversal(self, ship: Ship):
"""
Command function for DEPOSIT ship navigation.
"""
if self.me.shipyards:
nearest_shipyard = min(self.me.shipyards, key=lambda x: cal_dis(ship.position, x.position))
else:
shipyards = [ship for ship in self.me.ships if self.ship_state.get(ship.id) == 'CONVERT']
nearest_shipyard = min(shipyards, key=lambda x: cal_dis(ship.position, x.position))
self.navigate(ship, nearest_shipyard.position)
def find_close_enemy(self, ship: Ship, dis: int = 1, pos: Point = None) -> list:
"""
Find dangerous enemy ship in given distance.
Args:
ship: Ship
dis: Int, Default = 1. The distance of security_check.
pos: Point, Default = None. If is given, then take pos as the security check center.
Returns: List of close enemy, if clear then return an empty list.
"""
radar = self.unit_radar[ship.id]
close_enemy = []
if not pos:
pos = ship.position
for enemy_pos in radar['enemy_ship']:
enemy_ship = self.board[enemy_pos]
if 0 < cal_dis(pos, enemy_pos) <= dis and ship.halite >= enemy_ship.halite:
close_enemy.append(enemy_pos)
return close_enemy
def explore_command(self, ship: Ship, radar: dict, deposit_halite: int = 500,
security_dis: int = 1, convert_sum: float = 1000):
"""
Command function for EXPLORE.
Strategy_1: if ship state is EXPLORE, navigate to the position with max free halite.
Strategy_2: if ship is in the max free halite position, turn EXPLORE to COLLECT.
Strategy_3(New): if ship radar area free halite is rich, and there's not ally shipyard nearby then CONVERT.
"""
# Sum up radar area halite excluding ship current cell.
halite_sum = np.sum(list(radar['halite'].values())) - ship.cell.halite
# Check if this area is rich and hasn't been developed (there's no shipyard in 4 distance area).
if halite_sum >= convert_sum and all(
[cal_dis(ship.position, shipyard.position) > 4 for shipyard in self.me.shipyards]):
ship.next_action = ShipAction.CONVERT
self.ship_state[ship.id] = 'CONVERT'
else:
max_free_halite = np.max(list(radar['free_halite'].values()))
# Check if ship has arrived max free halite position
if radar['free_halite'][ship.position] == max_free_halite:
# Change ship state, ship.next_action = None
self.ship_state[ship.id] = 'COLLECT'
else:
# If there's no halite, expand radar distance
if max_free_halite == 0:
self.ship_command(ship, radar['dis'] + 1, deposit_halite, security_dis, convert_sum)
else:
candidate = []
for pos, free_halite in radar['free_halite'].items():
if free_halite == max_free_halite:
candidate.append(pos)
# Randomly choose a destination from candidate
des = random.choice(candidate)
self.navigate(ship, des)
def ship_command(self, ship: Ship, radar_dis: int = 2, deposit_halite: int = 500,
security_dis: int = 1, convert_sum: float = 1000):
"""
For each turn, update action of each ship.
Args:
ship: Ship
radar_dis: The radar scan distance of ship.
deposit_halite: The threshold halite value for ship to hold.
security_dis: The distance for security check.
convert_sum: The threshold of EXPLORE ship to CONVERT to shipyard.
"""
# Before giving action, do radar first.
self.radar(ship, radar_dis)
radar = self.unit_radar[ship.id]
# Assign EXPLORE to new ship
if ship.id not in self.ship_state:
self.ship_state[ship.id] = 'EXPLORE'
# DEPOSIT
# Strategy_1: if ship is in a shipyard, and ship.halite is 0, turn DEPOSIT to EXPLORE.
# Strategy_2: if ship state is DEPOSIT, navigate to nearest shipyard.
# Strategy_3: if ship halite is lower than deposit_halite and radar is clear, turn DEPOSIT to EXPLORE.
if self.ship_state[ship.id] == 'DEPOSIT':
# If ship has deposited halite to shipyard, assign EXPLORE to ship.
if ship.cell.shipyard and ship.halite == 0:
self.ship_state[ship.id] = 'EXPLORE'
self.ship_command(ship, radar_dis, deposit_halite, security_dis)
else:
# Collect enough halite, back to shipyard.
if ship.halite >= deposit_halite:
self.course_reversal(ship)
else:
# Clear, ship back to EXPLORE.
if not self.find_close_enemy(ship, security_dis):
self.ship_state[ship.id] = 'EXPLORE'
self.ship_command(ship, radar_dis, deposit_halite, security_dis)
else:
# Enemy is nearby, stick to DEPOSIT ship state.
self.course_reversal(ship)
# EXPLORE
elif self.ship_state[ship.id] == 'EXPLORE':
if ship.halite >= deposit_halite:
self.ship_state[ship.id] = 'DEPOSIT'
self.ship_command(ship, radar_dis, deposit_halite, security_dis)
else:
self.explore_command(ship, radar, deposit_halite, security_dis, convert_sum)
# COLLECT
# Strategy_1: if ship halite reaches deposit_halite, turn COLLECT to DEPOSIT
# Strategy_2: if enemy ship shows in radar, turn COLLECT TO DEPOSIT
elif self.ship_state[ship.id] == 'COLLECT':
if ship.halite >= deposit_halite:
self.ship_state[ship.id] = 'DEPOSIT'
self.ship_command(ship, radar_dis, deposit_halite, security_dis)
else:
if not self.find_close_enemy(ship, security_dis):
self.explore_command(ship, radar, deposit_halite, security_dis, convert_sum)
else:
self.ship_state[ship.id] = 'DEPOSIT'
self.course_reversal(ship)
def spawn_command(self, max_num_ship):
"""
Command function for shipyard to SPAWN ship.
Strategy_1(New): Sort empty_shipyard list so that spawning from richest shipyard.
Strategy_2(New): Dynamically control the max_num_ship, keep me holding the most number of ships in the game.
Args:
max_num_ship: The upper limit of ships.
"""
# Gather all empty shipyard and sort by radar area's free_halite sum value.
empty_shipyard = []
for shipyard in self.me.shipyards:
if not shipyard.cell.ship:
empty_shipyard.append(shipyard)
self.radar(shipyard, dis=2)
empty_shipyard.sort(key=lambda x: np.sum(
list(self.unit_radar[x.id]['free_halite'].values())
))
# Dynamically set up the max_num_ship, keep me having the same number of ship with TOP 1 player.
max_num_ship = max(max_num_ship, len(max(self.obs.players, key=lambda x: x[0])[-1]))
new_ship = 0
# Spawn Condition:
# 1. There are available empty shipyards.
# 2. Current and next turn ship number is lower than max_num_ship.
# 3. Player's halite is more than Spawn Cost.
while len(empty_shipyard) > 0 and len(self.me.ships) + new_ship < max_num_ship and self.me.halite > self.config.spawnCost:
shipyard = empty_shipyard.pop()
shipyard.next_action = ShipyardAction.SPAWN
new_ship += 1
# Add new ship position into self.ship_next_pos
self.ship_next_pos.add(shipyard.position)
def convert_base_command(self):
"""
Command function for ship to CONVERT to shipyard. This is base strategy to ensure there's always at least one
shipyard.
Strategy: if there's no shipyard, randomly pick a ship with min cell halite to convert.
"""
if not self.me.shipyards:
min_cell_halite = np.min([ship.cell.halite for ship in self.me.ships])
ship_candidate = [ship for ship in self.me.ships if ship.cell.halite == min_cell_halite]
convert_ship = random.choice(ship_candidate)
convert_ship.next_action = ShipAction.CONVERT
self.ship_state[convert_ship.id] = 'CONVERT'
def update_ship_next_pos(self, ship):
"""
Update self.ship_next_pos by ship.next_action for next turn.
"""
pos = ship.position
if ship.next_action is None:
self.ship_next_pos.add(pos)
elif ship.next_action != ShipAction.CONVERT:
if ship.next_action == ShipAction.NORTH:
next_pos = pos + (0, 1)
elif ship.next_action == ShipAction.SOUTH:
next_pos = pos + (0, -1)
elif ship.next_action == ShipAction.WEST:
next_pos = pos + (-1, 0)
else:
next_pos = pos + (1, 0)
self.ship_next_pos.add(unify_pos(next_pos, self.size))
def play(self, radar_dis=2, deposit_halite=500, security_dis=1, convert_sum: float = 1000, max_ship=5):
"""
Main Function
Regular flow: SPAWN -> CONVERT -> SHIP MOVE.
Ending case: CONVERT all ships with enough halite.
"""
# print('MY TURN {}'.format(self.board.observation['step']))
# Strategy: if the current turn is 398 (last turn is 399), make all ships with enough halite CONVERT.
if self.obs.step == 398:
for ship in self.me.ships:
if ship.halite >= self.config.convertCost:
ship.next_action = ShipAction.CONVERT
else:
self.spawn_command(max_ship)
self.convert_base_command()
for ship in self.me.ships:
# print('-- command {}'.format(ship.id))
self.ship_command(ship, radar_dis, deposit_halite, security_dis, convert_sum)
self.update_ship_next_pos(ship)
# print('---- ship state: {}'.format(self.ship_state[ship.id]))
# print('---- ship next action: {}'.format(ship.next_action))
# print('---- ship halite: {}'.format(ship.halite))
return self.me.next_actions
############
# Launcher #
############
def agent(obs,config):
bot = SilverBot(obs,config)
actions = bot.play(radar_dis=2, deposit_halite=300, security_dis=1, convert_sum=1500, max_ship=20)
return actions
| [
"random.choice",
"numpy.reshape",
"numpy.zeros",
"numpy.sign",
"numpy.min"
] | [((2795, 2827), 'numpy.zeros', 'np.zeros', (['(self.size, self.size)'], {}), '((self.size, self.size))\n', (2803, 2827), True, 'import numpy as np\n'), ((2650, 2718), 'numpy.reshape', 'np.reshape', (["self.board.observation['halite']", '(self.size, self.size)'], {}), "(self.board.observation['halite'], (self.size, self.size))\n", (2660, 2718), True, 'import numpy as np\n'), ((18919, 18971), 'numpy.min', 'np.min', (['[ship.cell.halite for ship in self.me.ships]'], {}), '([ship.cell.halite for ship in self.me.ships])\n', (18925, 18971), True, 'import numpy as np\n'), ((19100, 19129), 'random.choice', 'random.choice', (['ship_candidate'], {}), '(ship_candidate)\n', (19113, 19129), False, 'import random\n'), ((6648, 6663), 'numpy.sign', 'np.sign', (['move_x'], {}), '(move_x)\n', (6655, 6663), True, 'import numpy as np\n'), ((6673, 6688), 'numpy.sign', 'np.sign', (['move_y'], {}), '(move_y)\n', (6680, 6688), True, 'import numpy as np\n'), ((7337, 7366), 'random.choice', 'random.choice', (['candidate_move'], {}), '(candidate_move)\n', (7350, 7366), False, 'import random\n'), ((8472, 8501), 'random.choice', 'random.choice', (['candidate_move'], {}), '(candidate_move)\n', (8485, 8501), False, 'import random\n'), ((13772, 13796), 'random.choice', 'random.choice', (['candidate'], {}), '(candidate)\n', (13785, 13796), False, 'import random\n')] |
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
import dynet as dy
import dynet_modules as dm
import numpy as np
import random
from utils import *
from data import flatten
from time import time
from modules.seq_encoder import SeqEncoder
from modules.bag_encoder import BagEncoder
from modules.tree_encoder import TreeEncoder
class TSPDecoder(Decoder):
def __init__(self, args, model, full = False):
super().__init__(args, model)
self.train_input_key = 'input_tokens'
self.train_output_key = 'gold_linearized_tokens'
self.pred_input_key = 'input_tokens'
self.pred_output_key = 'linearized_tokens'
self.vec_key = 'tsp_vec'
if 'seq' in self.args.tree_vecs:
self.seq_encoder = SeqEncoder(self.args, self.model, 'tsp_seq')
if 'bag' in self.args.tree_vecs:
self.bag_encoder = BagEncoder(self.args, self.model, 'tsp_bag')
if 'tree' in self.args.tree_vecs:
self.tree_encoder = TreeEncoder(self.args, self.model, 'tsp_tree')
self.full = full
self.special = self.model.add_lookup_parameters((2, self.args.token_dim))
self.biaffine = dm.BiaffineAttention(self.model, self.args.token_dim, self.args.hid_dim)
if not full:
self.f_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.b_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.log(f'Initialized <{self.__class__.__name__}>, params = {self.model.parameter_count()}')
def encode(self, sent):
# encode
if 'seq' in self.args.tree_vecs:
self.seq_encoder.encode(sent, 'linearized_tokens' if self.args.pred_seq else 'gold_linearized_tokens')
if 'bag' in self.args.tree_vecs:
self.bag_encoder.encode(sent)
if 'tree' in self.args.tree_vecs:
self.tree_encoder.encode(sent, self.args.pred_tree)
sum_vecs(sent, self.vec_key, ['feat', 'tsp_seq', 'tsp_bag', 'tsp_tree'])
# print([t['lemma'] for t in sent['gold_linearized_tokens']])
# print([t['lemma'] for t in sent.tokens])
# exit()
def decode(self, tokens, constraints=[], train_mode=False):
loss = 0
errs = []
fr_vecs = [self.special[0]] + [t.vecs[self.vec_key] for t in tokens]
to_vecs = [self.special[1]] + [t.vecs[self.vec_key] for t in tokens]
score_mat = self.biaffine.attend(fr_vecs, to_vecs)
scores = score_mat.npvalue()
if train_mode:
oids = [0] + [t['original_id'] for t in tokens]
gold_path = np.argsort(oids).tolist() + [0]
trans_mat = dy.transpose(score_mat)
for i, j in zip(gold_path, gold_path[1:]):
errs.append(dy.hinge(score_mat[i], j))
errs.append(dy.hinge(trans_mat[j], i))
if errs:
loss = dy.average(errs)
costs = (1000 * (scores.max() - scores)).astype(int).tolist()
solution = solve_tsp(costs, constraints, self.args.guided_local_search) # first is best
if not solution:
# self.log('no solution, remove constraints')
solution = solve_tsp(costs, [], self.args.guided_local_search)
assert solution != []
seq = [tokens[i-1] for i in solution[1:-1]]
return {'loss': loss,
'seq': seq}
def get_subtree_constraints(self, head):
lin_order = [head['domain'].index(t)+1 for t in head['order']]
constraints = list(zip(lin_order, lin_order[1:]))
return constraints
def get_tree_constraints(self, sent):
constraints = []
tokens = sent[self.pred_input_key]
for token in tokens:
lin_order = [tokens.index(t)+1 for t in token['order']]
constraints += list(zip(lin_order, lin_order[1:]))
return constraints
def predict(self, sent, pipeline=False):
self.encode(sent)
if self.full:
constraints = [] if self.args.no_lin_constraint else self.get_tree_constraints(sent)
res = self.decode(sent[self.pred_input_key], constraints)
sent['linearized_tokens'] = res['seq']
else:
for token in traverse_bottomup(sent.root):
domain = ([token] + token['pdeps']) if self.args.pred_tree else token['domain']
if len(domain) > 1:
constraints = [] if self.args.no_lin_constraint else self.get_subtree_constraints(token)
res = self.decode(domain, constraints)
token['linearized_domain'] = res['seq']
# add predicted sequential information
f_vec = self.f_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq']])[-1]
b_vec = self.b_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq'][::-1]])[-1]
token.vecs[self.vec_key] += (f_vec + b_vec)
else:
token['linearized_domain'] = [token]
sent['linearized_tokens'] = flatten(token, 'linearized_domain')
def train_one_step(self, sent):
total = correct = loss = 0
t0 = time()
self.encode(sent)
if self.full:
constraints = [] if self.args.no_lin_constraint else self.get_tree_constraints(sent)
res = self.decode(sent[self.train_input_key], constraints, True)
loss = res['loss']
total += 1
sent['linearized_tokens'] = res['seq']
correct += int(sent['linearized_tokens'] == sent['gold_linearized_tokens'] )
else:
for token in traverse_bottomup(sent.root):
domain = ([token] + token['pdeps']) if self.args.pred_tree else token['domain']
if len(domain) > 1:
constraints = [] if self.args.no_lin_constraint else self.get_subtree_constraints(token)
res = self.decode(domain, constraints, True)
token['linearized_domain'] = res['seq']
loss += res['loss']
total += 1
correct += int(token['linearized_domain'] == token['gold_linearized_domain'])
# add predicted sequential information
f_vec = self.f_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq']])[-1]
b_vec = self.b_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq'][::-1]])[-1]
token.vecs[self.vec_key] += (f_vec + b_vec)
else:
token['linearized_domain'] = [token]
sent['linearized_tokens'] = flatten(token, 'linearized_domain')
loss_value = loss.value() if loss else 0
return {'time': time()-t0,
'loss': loss_value,
'loss_expr': loss,
'total': total,
'correct': correct
}
def evaluate(self, sents):
gold_seqs = [sent[self.train_output_key] for sent in sents]
pred_seqs = [sent[self.pred_output_key] for sent in sents]
pred_bleu = eval_all(gold_seqs, pred_seqs)
print([t['lemma'] for t in gold_seqs[0]])
return pred_bleu
def solve_tsp(costs, constraints=[], beam_size=1, gls=False):
manager = pywrapcp.RoutingIndexManager(len(costs), 1, 0)
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return costs[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
solver = routing.solver()
# linear order constraints
if constraints:
order_callback_index = routing.RegisterUnaryTransitCallback(lambda x: 1) # always add 1
routing.AddDimension(order_callback_index, 0, len(costs)+1, True, 'Order')
order = routing.GetDimensionOrDie('Order')
for i, j in constraints:
solver.Add(order.CumulVar(i) < order.CumulVar(j))
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = routing_enums_pb2.FirstSolutionStrategy.GLOBAL_CHEAPEST_ARC
search_parameters.time_limit.seconds = 1
search_parameters.solution_limit = 100
search_parameters.log_search = False
if gls:
search_parameters.local_search_metaheuristic = routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
# Solve the problem.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
out = []
index = routing.Start(0)
while not routing.IsEnd(index):
out.append(manager.IndexToNode(index))
index = assignment.Value(routing.NextVar(index))
out.append(manager.IndexToNode(index))
return out
else:
return []
| [
"data.flatten",
"ortools.constraint_solver.pywrapcp.RoutingModel",
"modules.bag_encoder.BagEncoder",
"dynet_modules.BiaffineAttention",
"dynet.VanillaLSTMBuilder",
"ortools.constraint_solver.pywrapcp.DefaultRoutingSearchParameters",
"dynet.average",
"dynet.transpose",
"numpy.argsort",
"modules.tre... | [((7525, 7555), 'ortools.constraint_solver.pywrapcp.RoutingModel', 'pywrapcp.RoutingModel', (['manager'], {}), '(manager)\n', (7546, 7555), False, 'from ortools.constraint_solver import pywrapcp\n'), ((8537, 8578), 'ortools.constraint_solver.pywrapcp.DefaultRoutingSearchParameters', 'pywrapcp.DefaultRoutingSearchParameters', ([], {}), '()\n', (8576, 8578), False, 'from ortools.constraint_solver import pywrapcp\n'), ((1217, 1289), 'dynet_modules.BiaffineAttention', 'dm.BiaffineAttention', (['self.model', 'self.args.token_dim', 'self.args.hid_dim'], {}), '(self.model, self.args.token_dim, self.args.hid_dim)\n', (1237, 1289), True, 'import dynet_modules as dm\n'), ((5300, 5306), 'time.time', 'time', ([], {}), '()\n', (5304, 5306), False, 'from time import time\n'), ((802, 846), 'modules.seq_encoder.SeqEncoder', 'SeqEncoder', (['self.args', 'self.model', '"""tsp_seq"""'], {}), "(self.args, self.model, 'tsp_seq')\n", (812, 846), False, 'from modules.seq_encoder import SeqEncoder\n'), ((919, 963), 'modules.bag_encoder.BagEncoder', 'BagEncoder', (['self.args', 'self.model', '"""tsp_bag"""'], {}), "(self.args, self.model, 'tsp_bag')\n", (929, 963), False, 'from modules.bag_encoder import BagEncoder\n'), ((1038, 1084), 'modules.tree_encoder.TreeEncoder', 'TreeEncoder', (['self.args', 'self.model', '"""tsp_tree"""'], {}), "(self.args, self.model, 'tsp_tree')\n", (1049, 1084), False, 'from modules.tree_encoder import TreeEncoder\n'), ((1338, 1411), 'dynet.VanillaLSTMBuilder', 'dy.VanillaLSTMBuilder', (['(1)', 'self.args.token_dim', 'self.args.token_dim', 'model'], {}), '(1, self.args.token_dim, self.args.token_dim, model)\n', (1359, 1411), True, 'import dynet as dy\n'), ((1438, 1511), 'dynet.VanillaLSTMBuilder', 'dy.VanillaLSTMBuilder', (['(1)', 'self.args.token_dim', 'self.args.token_dim', 'model'], {}), '(1, self.args.token_dim, self.args.token_dim, model)\n', (1459, 1511), True, 'import dynet as dy\n'), ((2740, 2763), 'dynet.transpose', 'dy.transpose', (['score_mat'], {}), '(score_mat)\n', (2752, 2763), True, 'import dynet as dy\n'), ((5178, 5213), 'data.flatten', 'flatten', (['token', '"""linearized_domain"""'], {}), "(token, 'linearized_domain')\n", (5185, 5213), False, 'from data import flatten\n'), ((6814, 6849), 'data.flatten', 'flatten', (['token', '"""linearized_domain"""'], {}), "(token, 'linearized_domain')\n", (6821, 6849), False, 'from data import flatten\n'), ((2973, 2989), 'dynet.average', 'dy.average', (['errs'], {}), '(errs)\n', (2983, 2989), True, 'import dynet as dy\n'), ((6925, 6931), 'time.time', 'time', ([], {}), '()\n', (6929, 6931), False, 'from time import time\n'), ((2847, 2872), 'dynet.hinge', 'dy.hinge', (['score_mat[i]', 'j'], {}), '(score_mat[i], j)\n', (2855, 2872), True, 'import dynet as dy\n'), ((2902, 2927), 'dynet.hinge', 'dy.hinge', (['trans_mat[j]', 'i'], {}), '(trans_mat[j], i)\n', (2910, 2927), True, 'import dynet as dy\n'), ((2684, 2700), 'numpy.argsort', 'np.argsort', (['oids'], {}), '(oids)\n', (2694, 2700), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing import image
import sys
#This function will require that Unix directory naming convention is applied, Directories start with a capital letter
#whatOrgan = sys.argv[1] # The organ will be the first argument passed to the function
#isSemantic = sys.argv[2] # The boolean value determining whether it is a semantic network
#img = sys.argv[3] # The location of the image to be processed
#TODO: No error handling regarding the organ and image in question.
def driver(whatOrgan, isSemantic, img):
img_width = 200
img_height = 200
im = image.load_img(img, target_size=(img_width, img_height))
x = image.img_to_array(im)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
if(isSemantic): # If it is semantic, the semantic network will be run
file = '../'+whatOrgan+'/'+whatOrgan+'_Model_Semantic.h5'
model = tf.keras.models.load_model(file) # Load the model
return model.predict_classes(images)
else: # Else a normal network will be applied
file = '../'+whatOrgan+'/'+whatOrgan+'_Model.h5'
model = tf.keras.models.load_model(file) # Load the model
return int(str(model.predict_classes(images)).strip('[').strip(']'))
| [
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.models.load_model",
"numpy.vstack",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array"
] | [((805, 861), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img'], {'target_size': '(img_width, img_height)'}), '(img, target_size=(img_width, img_height))\n', (819, 861), False, 'from tensorflow.keras.preprocessing import image\n'), ((870, 892), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['im'], {}), '(im)\n', (888, 892), False, 'from tensorflow.keras.preprocessing import image\n'), ((901, 926), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (915, 926), True, 'import numpy as np\n'), ((941, 955), 'numpy.vstack', 'np.vstack', (['[x]'], {}), '([x])\n', (950, 955), True, 'import numpy as np\n'), ((1158, 1190), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['file'], {}), '(file)\n', (1184, 1190), True, 'import tensorflow as tf\n'), ((1442, 1474), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['file'], {}), '(file)\n', (1468, 1474), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# plot de la marche aléatoire
from metropolis import metropolis
import numpy as np
import matplotlib.pyplot as plt
# création du modèle :
def model(param, x):
return param[0] * x + param[1]
# génération des données :
x = np.linspace(-5, 15, 120)
y = 4.5 * x + 12 + 5 * np.random.randn(len(x))
# fit :
p = metropolis(model, x, y, [5, 10], [0.1, 0.2], 5000, 500, 20)
# plot de la marche aléatoire
plt.figure()
plt.plot(p[:,0], p[:,1], 'x')
plt.title("{} points".format(p.shape[0]))
plt.xlabel("slope")
plt.ylabel("intercept")
plt.show()
# ce plot permet d'observer la correlation entre les paramètres
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.figure",
"metropolis.metropolis",
"matplotlib.pyplot.show"
] | [((249, 273), 'numpy.linspace', 'np.linspace', (['(-5)', '(15)', '(120)'], {}), '(-5, 15, 120)\n', (260, 273), True, 'import numpy as np\n'), ((336, 395), 'metropolis.metropolis', 'metropolis', (['model', 'x', 'y', '[5, 10]', '[0.1, 0.2]', '(5000)', '(500)', '(20)'], {}), '(model, x, y, [5, 10], [0.1, 0.2], 5000, 500, 20)\n', (346, 395), False, 'from metropolis import metropolis\n'), ((428, 440), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (438, 440), True, 'import matplotlib.pyplot as plt\n'), ((441, 472), 'matplotlib.pyplot.plot', 'plt.plot', (['p[:, 0]', 'p[:, 1]', '"""x"""'], {}), "(p[:, 0], p[:, 1], 'x')\n", (449, 472), True, 'import matplotlib.pyplot as plt\n'), ((513, 532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""slope"""'], {}), "('slope')\n", (523, 532), True, 'import matplotlib.pyplot as plt\n'), ((533, 556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""intercept"""'], {}), "('intercept')\n", (543, 556), True, 'import matplotlib.pyplot as plt\n'), ((557, 567), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (565, 567), True, 'import matplotlib.pyplot as plt\n')] |
import copy
import operator
import os
import numpy as np
import wandb
import sys
from importlib import import_module
import keras
import keras.backend as K
class WandbCallback(keras.callbacks.Callback):
"""WandB Keras Callback.
Automatically saves history and summary data. Optionally logs gradients, writes modes,
and saves example images.
Optionally saves the best model while training.
Optionally logs weights and gradients during training.
"""
def __init__(self, monitor='val_loss', verbose=0, mode='auto',
save_weights_only=False, log_weights=False, log_gradients=False,
save_model=True, training_data=None, validation_data=[],
labels=[], data_type="image"
):
"""Constructor.
# Arguments
monitor: quantity to monitor.
mode: one of {auto, min, max}.
'min' - save model when monitor is minimized
'max' - save model when monitor is maximized
'auto' - try to guess when to save the model
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
save_model:
True - save a model when monitor beats all previous epochs
False - don't save models
log_weights: if True save the weights in wandb.history
log_gradients: if True log the training gradients in wandb.history
training_data: tuple (X,y) needed for calculating gradients
validation_data: numpy array of validation data
data_type: the type of data we're saving, default "image"
labels: list of labels
"""
if wandb.run is None:
raise wandb.Error(
'You must call wandb.init() before WandbCallback()')
self._validation_data = validation_data
self.labels = labels
self.data_type = data_type
self.monitor = monitor
self.verbose = verbose
self.save_weights_only = save_weights_only
self.filepath = os.path.join(wandb.run.dir, 'model-best.h5')
self.save_model = save_model
self.log_weights = log_weights
self.log_gradients = log_gradients
self.training_data = training_data
if self.training_data:
if len(self.training_data) != 2:
raise ValueError("training data must be a tuple of length two")
# From Keras
if mode not in ['auto', 'min', 'max']:
print('WandbCallback mode %s is unknown, '
'fallback to auto mode.' % (mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = operator.lt
self.best = float('inf')
elif mode == 'max':
self.monitor_op = operator.gt
self.best = float('-inf')
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = operator.gt
self.best = float('-inf')
else:
self.monitor_op = operator.lt
self.best = float('inf')
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
# history
row = copy.copy(wandb.run.history.row)
row['epoch'] = epoch
row.update(logs)
if self.log_weights:
weights_metrics = self._log_weights()
row.update(weights_metrics)
if self.log_gradients:
gradients_metrics = self._log_gradients()
row.update(gradients_metrics)
if self.data_type == "image" and len(self._validation_data) > 0:
wandb.run.history.row.update({"examples": self._log_images()})
wandb.run.history.add(row)
# summary
self.current = logs.get(self.monitor)
if self.current is None: # validation data wasn't set
# print('Can save best model only with %s available, '
# 'skipping.' % (self.monitor))
wandb.run.summary.update(row)
return
copied = copy.copy(row)
if self.monitor_op(self.current, self.best):
copied.pop('epoch')
wandb.run.summary.update(copied)
if self.save_model:
self._save_model(epoch)
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
def _log_images(self):
indices = np.random.choice(self._validation_data.shape[0], 36)
test_data = self._validation_data[indices]
labels = np.argmax(self.model.predict(test_data), axis=1)
if len(self.labels) > 0:
captions = []
for label in labels:
try:
captions.append(self.labels[label])
except IndexError:
captions.append(label)
else:
captions = labels
return [wandb.Image(data, caption=captions[i]) for i, data in enumerate(test_data)]
def _log_weights(self):
metrics = {}
for layer in self.model.layers:
weights = layer.get_weights()
if len(weights) == 1:
metrics[layer.name] = np.mean(weights[0])
elif len(weights) == 2:
metrics[layer.name + ".weights-mean"] = np.mean(weights[0])
metrics[layer.name + ".bias-mean"] = np.mean(weights[1])
return metrics
def _log_gradients(self):
if (not self.training_data):
raise ValueError(
"Need to pass in training data if logging gradients")
X_train = self.training_data[0]
y_train = self.training_data[1]
metrics = {}
weights = self.model.trainable_weights # weight tensors
# filter down weights tensors to only ones which are trainable
weights = [weight for weight in weights
if self.model.get_layer(weight.name.split('/')[0]).trainable]
gradients = self.model.optimizer.get_gradients(
self.model.total_loss, weights) # gradient tensors
input_tensors = [self.model.inputs[0], # input data
# how much to weight each sample by
self.model.sample_weights[0],
self.model.targets[0], # labels
K.learning_phase(), # train or test mode
]
get_gradients = K.function(inputs=input_tensors, outputs=gradients)
grads = get_gradients([X_train, np.ones(len(y_train)), y_train])
for (weight, grad) in zip(weights, grads):
metrics[weight.name.split(':')[0] + ".grad-mean"] = np.mean(grad)
metrics[weight.name.split(':')[0] + ".grad-stddev"] = np.std(grad)
return metrics
def _save_model(self, epoch):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch, self.monitor, self.best,
self.current, self.filepath))
self.best = self.current
try:
if self.save_weights_only:
self.model.save_weights(self.filepath, overwrite=True)
else:
self.model.save(self.filepath, overwrite=True)
except ImportError:
print("Warning: Can't save model without h5py installed")
self.save_model = False
| [
"numpy.mean",
"wandb.Image",
"numpy.random.choice",
"keras.backend.learning_phase",
"os.path.join",
"wandb.run.history.add",
"wandb.Error",
"wandb.run.summary.update",
"numpy.std",
"keras.backend.function",
"copy.copy"
] | [((2204, 2248), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""model-best.h5"""'], {}), "(wandb.run.dir, 'model-best.h5')\n", (2216, 2248), False, 'import os\n'), ((3534, 3566), 'copy.copy', 'copy.copy', (['wandb.run.history.row'], {}), '(wandb.run.history.row)\n', (3543, 3566), False, 'import copy\n'), ((4026, 4052), 'wandb.run.history.add', 'wandb.run.history.add', (['row'], {}), '(row)\n', (4047, 4052), False, 'import wandb\n'), ((4401, 4415), 'copy.copy', 'copy.copy', (['row'], {}), '(row)\n', (4410, 4415), False, 'import copy\n'), ((4894, 4946), 'numpy.random.choice', 'np.random.choice', (['self._validation_data.shape[0]', '(36)'], {}), '(self._validation_data.shape[0], 36)\n', (4910, 4946), True, 'import numpy as np\n'), ((6889, 6940), 'keras.backend.function', 'K.function', ([], {'inputs': 'input_tensors', 'outputs': 'gradients'}), '(inputs=input_tensors, outputs=gradients)\n', (6899, 6940), True, 'import keras.backend as K\n'), ((1871, 1935), 'wandb.Error', 'wandb.Error', (['"""You must call wandb.init() before WandbCallback()"""'], {}), "('You must call wandb.init() before WandbCallback()')\n", (1882, 1935), False, 'import wandb\n'), ((4334, 4363), 'wandb.run.summary.update', 'wandb.run.summary.update', (['row'], {}), '(row)\n', (4358, 4363), False, 'import wandb\n'), ((4513, 4545), 'wandb.run.summary.update', 'wandb.run.summary.update', (['copied'], {}), '(copied)\n', (4537, 4545), False, 'import wandb\n'), ((5371, 5409), 'wandb.Image', 'wandb.Image', (['data'], {'caption': 'captions[i]'}), '(data, caption=captions[i])\n', (5382, 5409), False, 'import wandb\n'), ((6795, 6813), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (6811, 6813), True, 'import keras.backend as K\n'), ((7131, 7144), 'numpy.mean', 'np.mean', (['grad'], {}), '(grad)\n', (7138, 7144), True, 'import numpy as np\n'), ((7211, 7223), 'numpy.std', 'np.std', (['grad'], {}), '(grad)\n', (7217, 7223), True, 'import numpy as np\n'), ((5651, 5670), 'numpy.mean', 'np.mean', (['weights[0]'], {}), '(weights[0])\n', (5658, 5670), True, 'import numpy as np\n'), ((5763, 5782), 'numpy.mean', 'np.mean', (['weights[0]'], {}), '(weights[0])\n', (5770, 5782), True, 'import numpy as np\n'), ((5836, 5855), 'numpy.mean', 'np.mean', (['weights[1]'], {}), '(weights[1])\n', (5843, 5855), True, 'import numpy as np\n')] |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import six
import runtime.temp_file as temp_file
import xgboost as xgb
from runtime import db
from runtime.dbapi.paiio import PaiIOConnection
from runtime.feature.compile import compile_ir_feature_columns
from runtime.feature.derivation import get_ordered_field_descs
from runtime.model import EstimatorType, Model
from runtime.pai.pai_distributed import define_tf_flags
from runtime.xgboost.dataset import DMATRIX_FILE_SEP, xgb_dataset
from runtime.xgboost.feature_column import ComposedColumnTransformer
FLAGS = define_tf_flags()
def predict(datasource,
select,
result_table,
result_column_names,
train_label_idx,
model,
extra_result_cols=[],
pai_table=None):
"""TBD
"""
bst = xgb.Booster()
if isinstance(model, six.string_types):
# NOTE(typhoonzero): must run Model.load_from_db in a temp
# directory, calling pyodps in current directory on PAI
# workers will cause paiio fails.
with temp_file.TemporaryDirectory(as_cwd=True):
model = Model.load_from_db(datasource, model)
bst.load_model("my_model")
else:
assert isinstance(model,
Model), "not supported model type %s" % type(model)
bst.load_model("my_model")
model_params = model.get_meta("attributes")
fc_map_ir = model.get_meta("features")
feature_columns = compile_ir_feature_columns(fc_map_ir,
EstimatorType.XGBOOST)
field_descs = get_ordered_field_descs(fc_map_ir)
feature_column_names = [fd.name for fd in field_descs]
feature_metas = dict([(fd.name, fd.to_dict(dtype_to_string=True))
for fd in field_descs])
transform_fn = ComposedColumnTransformer(
feature_column_names, *feature_columns["feature_columns"])
is_pai = True if pai_table else False
if is_pai:
conn = PaiIOConnection.from_table(pai_table)
else:
conn = db.connect_with_data_source(datasource)
with temp_file.TemporaryDirectory() as tmp_dir_name:
pred_fn = os.path.join(tmp_dir_name, "predict.txt")
raw_data_dir = os.path.join(tmp_dir_name, "predict_raw_dir")
dpred = xgb_dataset(datasource=datasource,
fn=pred_fn,
dataset_sql=select,
feature_metas=feature_metas,
feature_column_names=feature_column_names,
label_meta=None,
cache=True,
batch_size=10000,
transform_fn=transform_fn,
raw_data_dir=raw_data_dir,
is_pai=is_pai,
pai_table=pai_table,
pai_single_file=True,
feature_column_code=fc_map_ir)
print("Start predicting XGBoost model...")
for idx, pred_dmatrix in enumerate(dpred):
if is_pai:
feature_file_name = os.path.join(tmp_dir_name,
"predict.txt.raw")
else:
feature_file_name = os.path.join(
tmp_dir_name, "predict_raw_dir/predict.txt_%d" % idx)
preds = _calc_predict_result(bst, pred_dmatrix, model_params)
_store_predict_result(preds, result_table, result_column_names,
train_label_idx, feature_file_name, conn)
print("Done predicting. Predict table : %s" % result_table)
conn.close()
def _calc_predict_result(bst, dpred, model_params):
"""
Calculate the prediction result.
Args:
bst: the XGBoost booster object.
dpred: the XGBoost DMatrix input data to predict.
model_params (dict): the XGBoost model parameters.
Returns:
The prediction result.
"""
preds = bst.predict(dpred)
preds = np.array(preds)
# TODO(yancey1989): should save train_params and model_params
# not only on PAI submitter
# TODO(yancey1989): output the original result for various
# objective function.
obj = model_params.get("objective", "")
# binary:hinge output class labels
if obj == "binary:logistic":
preds = (preds > 0.5).astype(int)
elif obj == "multi:softprob":
preds = np.argmax(np.array(preds), axis=1)
elif obj == "multi:softmax":
# multi:softmax output class labels
# Need to convert to int. Otherwise, the
# table writer of MaxCompute would cause
# error because of writing float values.
preds = np.array(preds).astype(int)
# TODO(typhoonzero): deal with binary:logitraw when needed.
return preds
def _store_predict_result(preds, result_table, result_column_names,
train_label_idx, feature_file_name, conn):
"""
Save the prediction result in the table.
Args:
preds: the prediction result to save.
result_table (str): the result table name.
result_column_names (list[str]): the result column names.
train_label_idx (int): the index where the trained label is inside
result_column_names.
feature_file_name (str): the file path where the feature dumps.
conn: the database connection object.
Returns:
None.
"""
with db.buffered_db_writer(conn, result_table, result_column_names) as w:
with open(feature_file_name, "r") as feature_file_read:
line_no = 0
for line in feature_file_read.readlines():
if not line:
break
row = [
item for i, item in enumerate(line.strip().split(
DMATRIX_FILE_SEP)) if i != train_label_idx
]
row.append(str(preds[line_no]))
w.write(row)
line_no += 1
| [
"runtime.feature.compile.compile_ir_feature_columns",
"runtime.dbapi.paiio.PaiIOConnection.from_table",
"runtime.xgboost.dataset.xgb_dataset",
"os.path.join",
"runtime.db.connect_with_data_source",
"runtime.pai.pai_distributed.define_tf_flags",
"runtime.temp_file.TemporaryDirectory",
"numpy.array",
... | [((1146, 1163), 'runtime.pai.pai_distributed.define_tf_flags', 'define_tf_flags', ([], {}), '()\n', (1161, 1163), False, 'from runtime.pai.pai_distributed import define_tf_flags\n'), ((1409, 1422), 'xgboost.Booster', 'xgb.Booster', ([], {}), '()\n', (1420, 1422), True, 'import xgboost as xgb\n'), ((2063, 2123), 'runtime.feature.compile.compile_ir_feature_columns', 'compile_ir_feature_columns', (['fc_map_ir', 'EstimatorType.XGBOOST'], {}), '(fc_map_ir, EstimatorType.XGBOOST)\n', (2089, 2123), False, 'from runtime.feature.compile import compile_ir_feature_columns\n'), ((2191, 2225), 'runtime.feature.derivation.get_ordered_field_descs', 'get_ordered_field_descs', (['fc_map_ir'], {}), '(fc_map_ir)\n', (2214, 2225), False, 'from runtime.feature.derivation import get_ordered_field_descs\n'), ((2425, 2514), 'runtime.xgboost.feature_column.ComposedColumnTransformer', 'ComposedColumnTransformer', (['feature_column_names', "*feature_columns['feature_columns']"], {}), "(feature_column_names, *feature_columns[\n 'feature_columns'])\n", (2450, 2514), False, 'from runtime.xgboost.feature_column import ComposedColumnTransformer\n'), ((4667, 4682), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (4675, 4682), True, 'import numpy as np\n'), ((2592, 2629), 'runtime.dbapi.paiio.PaiIOConnection.from_table', 'PaiIOConnection.from_table', (['pai_table'], {}), '(pai_table)\n', (2618, 2629), False, 'from runtime.dbapi.paiio import PaiIOConnection\n'), ((2655, 2694), 'runtime.db.connect_with_data_source', 'db.connect_with_data_source', (['datasource'], {}), '(datasource)\n', (2682, 2694), False, 'from runtime import db\n'), ((2705, 2735), 'runtime.temp_file.TemporaryDirectory', 'temp_file.TemporaryDirectory', ([], {}), '()\n', (2733, 2735), True, 'import runtime.temp_file as temp_file\n'), ((2771, 2812), 'os.path.join', 'os.path.join', (['tmp_dir_name', '"""predict.txt"""'], {}), "(tmp_dir_name, 'predict.txt')\n", (2783, 2812), False, 'import os\n'), ((2836, 2881), 'os.path.join', 'os.path.join', (['tmp_dir_name', '"""predict_raw_dir"""'], {}), "(tmp_dir_name, 'predict_raw_dir')\n", (2848, 2881), False, 'import os\n'), ((2899, 3245), 'runtime.xgboost.dataset.xgb_dataset', 'xgb_dataset', ([], {'datasource': 'datasource', 'fn': 'pred_fn', 'dataset_sql': 'select', 'feature_metas': 'feature_metas', 'feature_column_names': 'feature_column_names', 'label_meta': 'None', 'cache': '(True)', 'batch_size': '(10000)', 'transform_fn': 'transform_fn', 'raw_data_dir': 'raw_data_dir', 'is_pai': 'is_pai', 'pai_table': 'pai_table', 'pai_single_file': '(True)', 'feature_column_code': 'fc_map_ir'}), '(datasource=datasource, fn=pred_fn, dataset_sql=select,\n feature_metas=feature_metas, feature_column_names=feature_column_names,\n label_meta=None, cache=True, batch_size=10000, transform_fn=\n transform_fn, raw_data_dir=raw_data_dir, is_pai=is_pai, pai_table=\n pai_table, pai_single_file=True, feature_column_code=fc_map_ir)\n', (2910, 3245), False, 'from runtime.xgboost.dataset import DMATRIX_FILE_SEP, xgb_dataset\n'), ((6101, 6163), 'runtime.db.buffered_db_writer', 'db.buffered_db_writer', (['conn', 'result_table', 'result_column_names'], {}), '(conn, result_table, result_column_names)\n', (6122, 6163), False, 'from runtime import db\n'), ((1653, 1694), 'runtime.temp_file.TemporaryDirectory', 'temp_file.TemporaryDirectory', ([], {'as_cwd': '(True)'}), '(as_cwd=True)\n', (1681, 1694), True, 'import runtime.temp_file as temp_file\n'), ((1716, 1753), 'runtime.model.Model.load_from_db', 'Model.load_from_db', (['datasource', 'model'], {}), '(datasource, model)\n', (1734, 1753), False, 'from runtime.model import EstimatorType, Model\n'), ((3754, 3799), 'os.path.join', 'os.path.join', (['tmp_dir_name', '"""predict.txt.raw"""'], {}), "(tmp_dir_name, 'predict.txt.raw')\n", (3766, 3799), False, 'import os\n'), ((3903, 3969), 'os.path.join', 'os.path.join', (['tmp_dir_name', "('predict_raw_dir/predict.txt_%d' % idx)"], {}), "(tmp_dir_name, 'predict_raw_dir/predict.txt_%d' % idx)\n", (3915, 3969), False, 'import os\n'), ((5089, 5104), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (5097, 5104), True, 'import numpy as np\n'), ((5354, 5369), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (5362, 5369), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
A component of a findNeighbour4 server which provides relatedness information for bacterial genomes.
It does so using PCA, and supports PCA based cluster generation.
he associated classes compute a variation model for samples in a findNeighbour4 server.
Computation uses data in MongoDb, and is not memory intensive, using configuration information in a
config file.
Functionality is provided in following classes:
* VariationModel - stores results of producing variant matrix and running PCA
* VariantMatrix - computes sample x variant matrix (requires: PERSIST object for mongodb access; server configuration file)
* PCARunner - runs PCA on VariantMatrix
Unit testing is facilitated by a
* PersistenceTest class. This exposes a small subset of the fn3persist object's methods, sufficient to test PCA. It can be used to store subsets of data for testing purposes
without then need to access a real fn3persistence data store.
A component of the findNeighbour4 system for bacterial relatedness monitoring
Copyright (C) 2021 <NAME> <EMAIL>
repo: https://github.com/davidhwyllie/findNeighbour4
This program is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published
by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.
bu
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without tcen the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
"""
# import libraries
import os
import logging
import warnings
import datetime
import random
from typing import Tuple, Set
from collections import defaultdict
import hashlib
import json
import pathlib
import pandas as pd
import numpy as np
from scipy.stats import poisson
import progressbar
import sqlalchemy
from scipy.stats import binom_test, median_abs_deviation
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
class PersistenceTest:
"""a class which mimics some methods available in an fn3persistence object, sufficient to unit test PCA generation.
Only these methods are implemented:
__init__
refcompressedsequence_guids
refcompressedsequence_read
Additionally, a load_data method is provided which loads data into the object.
Data in the correct format can be generated by utils/temporal_subsets.py
"""
def __init__(self, **kwargs):
"""constructs the object. Any parameters are accepted, and none have any effect"""
self.seqs = {}
self.sample_ids = set([])
def load_data(self, sample_ids_file, sequences_file):
with open("testdata/pca/seqs_5000test.json", "rt") as f:
self.seqs = json.load(f)
for guid in self.seqs.keys():
for key in ["<KEY>"]:
self.seqs[guid][key] = set(self.seqs[guid][key])
with open("testdata/pca/sample_ids_5000test.json", "rt") as f:
self.sample_ids = set(json.load(f))
# sanity check
# check there are no samples in sample_ids which are not present in seqs
# sample_ids are allowed to be a subset of seqs, but
# no samples should exists in sample_ids which aren't in seqs
missing = self.sample_ids - set(self.seqs.keys())
if len(missing) > 0:
raise KeyError(
"Provided with sample_ids which are not in seqs. There are {0} such sequences. Examples are: {1}".format(
len(missing), missing
)
)
def refcompressedsequence_guids(self):
return self.sample_ids
def refcompressedsequence_read(self, guid):
"""read a single sequence"""
if guid not in self.sample_ids:
return None
return self.seqs[guid]
def refcompressedsequence_read_all(self):
"""reads all sequences, returning a generator"""
for guid in self.sample_ids:
yield guid, self.seqs[guid]
class MNStats:
"""computes the number of M and N bases in a reference compressed object"""
def __init__(self, select_positions, analysed_reference_length):
"""input:
select_positions: the positions contributing to the pca model, as generated by ModelBuilder.
analysed_reference_length: the number of reference bases analysed."""
self.select_positions = select_positions
self.analysed_reference_length = analysed_reference_length
def examine(self, obj):
"""examines the reference compressed object obj,
reporting
* number of Ns and Ms in the sequence, subdivided by whether they are in
select_positions
* "Test 2" (binomial test, as per findNeighbour3 paper) testing whether the frequency of Ns/Ms in the selected_positions exceed those elsewhere,
indicative of a mixture."""
missing = {
"m_in_model": 0,
"n_in_model": 0,
"model_positions": len(self.select_positions),
"reference_positions": self.analysed_reference_length,
}
for base in ["M", "N"]: # compute missingness
base_l = base.lower()
# record total numbers of N and M per guid
try:
missing["{0}_total".format(base_l)] = len(obj[base])
except KeyError:
missing["{0}_total".format(base_l)] = 0
# examine all missing (N/M) sites, adding to a missingness model
try:
for pos in obj[base]:
if pos in self.select_positions:
try:
missing["{0}_in_model".format(base_l)] += 1
except KeyError:
pass
except KeyError:
pass # if there are no M,N then we can ignore these
## do binomial test
not_model = self.analysed_reference_length - len(self.select_positions)
p_expected = (
missing["{0}_total".format(base_l)]
- missing["{0}_in_model".format(base_l)]
) / not_model
missing["{0}_expected_proportion".format(base_l)] = p_expected
p_observed = missing["{0}_in_model".format(base_l)] / len(
self.select_positions
)
missing["{0}_observed_proportion".format(base_l)] = p_observed
p_val = binom_test(
missing["{0}_in_model".format(base_l)],
len(self.select_positions),
p_expected,
alternative="greater",
)
missing["{0}_p_value".format(base_l)] = p_val
return missing
class VariationModel:
"""Stores a VariantMatrix, the output of a PCA of the matrix, and (optionally) a clustering of the principal components.
You should not normally have to call this class directly to create a VariationModel - the VariantMatrix class would do this for you.
- You might wish to instantiate this class directly if you are restoring a previously serialised VariationModel - see constructor"""
def __init__(self):
"""
creates a new Variation model.
"""
self.model = {"built": False}
return
def __getitem__(self, key):
if key not in self.model:
raise KeyError(f"Key {key} not found")
return self.model[key]
def __setitem__(self, key, value):
"""adds a key-value pair to the model"""
if key in self.model.keys():
raise KeyError(f"Cannot replace key {key}")
else:
self.model[key] = value
def _coefficients_hash(self):
"""computes a hash on the coefficients in the variant model.
This is useful for version tracking & storing patterns of masking."""
h = hashlib.md5()
h.update(self.model["eigenvectors"].to_csv().encode("utf-8"))
md5_l = h.hexdigest()
return "{0}".format(md5_l)
def finish(self):
"""completes construction of the VariationModel"""
self.model["build_time"] = datetime.datetime.now().isoformat()
self.model["coefficients_hash"] = self._coefficients_hash()
self.model["built"] = True
def to_sqlite(
self,
outputdir="",
analysis_name="pca_output",
rebuild_databases_if_present=True,
):
"""write output to sqlite database
Inputs
=======
outputdir the directory the SQLite database goes into. will create if it does not exist
analysis_name name of the analysis. Will become the first part of the file name
rebuild_databases_if_present delete any existing SQLite database
Returns
=======
path to sqlite database
"""
# ensure the outputdir exists
pathlib.Path(outputdir).mkdir(parents=True, exist_ok=True)
# configure sqlite file for output.
sqlite_file = os.path.join(outputdir, "{0}.sqlite".format(analysis_name))
engine = sqlalchemy.create_engine(
"sqlite:///{0}".format(sqlite_file), echo=False
)
# run checks on sqlite file
if rebuild_databases_if_present:
try:
os.unlink(sqlite_file)
except FileNotFoundError:
pass
# open connection
conn = engine.connect()
metadata = []
for key in self.model:
if not (
key == "variant_matrix"
or isinstance(self.model[key], PCA)
or key == "transformed_coordinates"
): # we don't serialise these; one is massive and the other can't be serialised
logging.info("Writing {0}".format(key))
native_type = type(self.model[key])
if native_type in [bool, int, float, str]:
metadata.append(
{
"variable": key,
"value": str(self.model[key]),
"native_type": str(native_type),
}
)
elif type(self.model[key]) in [set, list]:
if type(self.model[key]) == set:
list_data = sorted(list(self.model[key]))
else:
list_data = self.model[key]
tmp = pd.DataFrame(list_data, columns=[key])
tmp.to_sql(
key, conn, if_exists="fail"
) # we don't serialise these at present
elif type(self.model[key]) in [dict, defaultdict]:
output_records = []
for this_key in self.model[key].keys():
item = self.model[key][this_key]
if type(item) in [float, bool, int, str]:
item = [item]
if not type(item) == list:
raise TypeError(
"Can only export dictionaries which are of key:list or key:scalar format; the list element is of type {0} : {1}".format(
type(item), item
)
)
for list_element in item:
output_records.append(
{key: this_key, "value": list_element}
)
tmp = pd.DataFrame.from_records(output_records)
elif type(self.model[key]) == np.int64:
metadata.append(
{"variable": key, "value": str(int(self.model[key]))}
)
elif type(self.model[key]) == pd.core.frame.DataFrame:
# these types of dataframe have indices which are sample_ids. We relabel the index sample_id
if key in ["mix_quality_info", "suspect_quality_seqs"]:
self.model[key].rename_axis("sample_id")
self.model[key].to_sql(key, conn, if_exists="fail")
else:
warnings.warn(
"Not handled {0} with class {1}".format(
key, type(self.model[key])
)
)
metadata_df = pd.DataFrame.from_records(metadata)
metadata_df.to_sql("Metadata", conn, if_exists="fail")
conn.close()
return sqlite_file
class VariantMatrix:
"""In charge of producing a sample x SNP matrix"""
def __init__(self, CONFIG, PERSIST, show_bar=True):
"""Construct a variant matrix
Parameters:
CONFIG: a configuration dictionary, as produced by findn.common_utils.ConfigManager.read_config()
PERSIST: a persistence object providing access to stored sequence data.
Any of the following will work:
findn.mongoStore.fn3persistence object, or
findn.rdbmstore.fn3persistence, or
localstore.localstoreutils.LocalStore object (fast access from a local tar file - preferred for large datasets), or
PersistenceTest object, the latter being useful for unit testing.
show_bar: whether or not to show a progress bar
"""
# store the persistence object as part of the object
self.PERSIST = PERSIST
# set easy to read properties from the config
self.analysed_reference_length = len(CONFIG["reference"]) - len(
set(CONFIG["excludePositions"])
)
# store whether we're using bars for display
self.show_bar = show_bar
# we start without any variation model
self._reset()
def _reset(self):
"""clears existing variation model and pca result"""
self.vm = VariationModel()
self._invalid = set() # invalid guids for which we can't compute pcs
self.model = {"built": False, "sample_id": []}
self.validation_data = None
def guids(self):
"""returns list of guids currently in the persistence object"""
return self.PERSIST.refcompressedsequence_guids()
def _column_name(self, pos, base):
"""given a base at a position, returns a position:base string suitable for use as a pandas column name"""
return f"{pos}:{base}"
def get_position_counts(self, guids=None) -> Tuple[set, dict, dict]:
"""returns positions of variation across the genome
Parameters:
guids : a set of sequence identifiers to analyse. If None, all samples in self.PERSIST are analysed
Returns:
a tuple consisting of:
the sample ids (guids) analysed (set)
a dictionary consisting of the positions of variation, and the numbers of samples at each position; example: {28281: 2692, 5387: 2704, 23603: 2722, 23270: 2708, 6953: 2685, 16175: 2689, 24913: 2707, ...}
a dictionary consisting of the positions where missingness/gaps (either N, - or IUPAC mixture codes) are present, and the numbers of samples at each position. Format as above"""
vmodel = defaultdict(int) # variants
mmodel = defaultdict(int) # missingness
# set default values
if guids is None:
guids = self.guids()
guids_analysed = set()
if self.show_bar:
bar = progressbar.ProgressBar(max_value=len(guids))
num_loaded = 0
for guid, refcompressed_sample in self.PERSIST.refcompressedsequence_read_all():
if guid in guids:
num_loaded +=1
if self.show_bar:
bar.update(num_loaded)
if refcompressed_sample["invalid"] == 0:
guids_analysed.add(guid)
# for definite calls, compute variation at each position
for base in ["A", "C", "G", "T"]:
var_positions = refcompressed_sample.get(base, [])
for var_pos in var_positions:
vmodel[var_pos] += 1
# compute missingness/gaps if it's mixed (M) or N
for base in ["M", "N"]: # compute missingness/gaps if it's mixed or N
missingness_positions = refcompressed_sample.get(base, [])
for missingness_pos in missingness_positions:
mmodel[missingness_pos] += 1
if self.show_bar:
bar.finish()
return guids_analysed, vmodel, mmodel
def get_missingness_cutoff(self, positions: Set[int], mmodel: dict) -> int:
"""computes a missingness cutoff, applicable at a per-sequence ltcel.
Samples which have high ltcels of missingness (i.e. N, -, or IUPAC mixture codes)
may be unsuitable for incorporation into PCA models. Some ltcel of missingness is expected,
but samples with very high missingness may compromise modelling.
Parameters:
positions: a set of integers, representing the positions across which missingness is to be estimated
mmodel: a missingness model, which is a dictionary of the form {28281: 2692, 5387: 2704, 23603: 2722}
where 28281 is a position, and 2692 is the number of sequences with missingness at that position.
this kind of dictionary is generated by .get_position_counts
Returns:
a estimate of how many missing positions are unexpected. This is based on the idea that the number of missing positions
for the majority of samples is estimated by Poisson process; the cutoff returned approximates the 99.9% upper confidence
interval on the expected number of missing positions if this is true, and mu = 2 * the median missingness.
Note: This criterion is somewhat arbitrary.
The impact of this approximation has not been systematically tcaluated, and could be the subject of further work.
"""
missingness = list(map(lambda pos: mmodel.get(pos, 0), positions))
median_missingness = np.median(
missingness
) # study median, as this will be relatively insensitive to samples with high missingness
# missingness_distribution = Counter(missingness)
# use Poisson cdf with mu = 2x median_missingness; find 99% CI
upper_cutoff = poisson.ppf(
0.999, 2 * median_missingness
) # crude approximation to upper CI
return upper_cutoff
def build(
self,
min_variant_freq=None,
num_train_on=None,
deterministic=True,
select_from=None,
):
"""
input:
min_variant_freq: the minimum proportion of samples with variation at that site for the site to be included. If none, is set to 3/train_on, i.e. each variant has to appear 3 times to be considered
num_train_on: only compute PCA on a subset of train_on samples. Set to None for all samples.
deterministic: if num_train on is not None, setting deterministic = True (default) ensures the same samples are analysed each time. If num_train_on is None, has no effect.
select_from: only build a model from the sample_ids in the list provided. If None, has no effect
"""
# determine guids there in the database
guids = self.guids()
if select_from is not None:
if not isinstance(select_from, list):
raise TypeError(
"Select from must be a list, not {0}".format(type(select_from))
)
select_from = set(select_from)
guids = set(guids).intersection(select_from)
guids = list(guids)
########################################################################################################
# if we have been told to use a subset of these to build the model, construct that subset.
if num_train_on is None: # if we are not told how many to use then we use
num_train_on = len(guids) # all samples
else:
# randomise order for model training purposes if required
if not deterministic:
random.shuffle(
guids
) # makes pipeline non-deterministic if not all samples are analysed
else:
guids = sorted(guids) # keep order constant between runs
if num_train_on < len(guids):
guids = guids[:num_train_on]
self.vm["num_train_on"] = num_train_on # persist parameters used
#########################################################################################################
#########################################################################################################
# if minimum variation is not set, only analyse variants seen at least 2 times.
if min_variant_freq is None:
min_variant_freq = 2 / num_train_on
#########################################################################################################
#########################################################################################################
# determine the variation model. In the first stage, we analyse by position
# positions with unexpectedly high ltcels of missingness are excluded, as these may be hard to call.
logging.info(
"Assessing per-base variation from {0} samples".format(num_train_on)
)
guids_analysed_stage1, vmodel, mmodel = self.get_position_counts(guids)
# store variant model
self.vm["variant_frequencies"] = vmodel
self.vm["min_variant_freq"] = min_variant_freq
self.vm["analysed_reference_length"] = self.analysed_reference_length
# from the totality of the variation, select positions with > cutoff % variation
cutoff_variant_number = num_train_on * min_variant_freq
self.vm["cutoff_variant_number"] = cutoff_variant_number
select_positions = set()
for pos, variant_count in vmodel.items():
if variant_count >= cutoff_variant_number:
select_positions.add(pos)
logging.info(
"Found {0} positions which vary at frequencies more than {1}.".format(
len(select_positions), min_variant_freq
)
)
if len(select_positions) == 0:
raise ValueError(
"No variation found above cutoff. normally this is because you ran the PCA operation against an empty database; this is what happens if you omit a config file parameter, when a test database is examined by default. Cannot continue"
)
self.model["variant_positions_gt_cutoff_variant_number"] = len(
select_positions
) # store result
upper_cutoff = self.get_missingness_cutoff(
select_positions, mmodel
) # define cutoff
self.vm["max_ok_missingness"] = float(upper_cutoff)
self.vm["max_ok_missingness_pc"] = int(100 * upper_cutoff / num_train_on)
# remove any positions with high ltcels of missingness from the variation model to be used in the PCA
num_removed = 0
for pos in mmodel.keys(): # positions
if mmodel[pos] > upper_cutoff and pos in select_positions:
num_removed += 1
select_positions.remove(pos)
self.select_positions = select_positions
pc_removed = int(100 * (num_removed / (len(select_positions) + num_removed)))
logging.info(
f"Removed {num_removed} positions with missingness > cutoff {upper_cutoff} sequences. Represents removal of {pc_removed} %"
)
logging.info(
f"There remain {len(select_positions)} positions which vary at frequencies more than {min_variant_freq} and pass the missingness cutoff."
)
self.vm["variant_positions_ok_missingness"] = len(select_positions)
#########################################################################################################
#########################################################################################################
# determine the variation model. In the second stage, we analyse by sample.
# find any samples which have a high number of missing or uncertain bases in the positions of variation
# vs in other positions. Such samples may be mixed.
self.mns = MNStats(select_positions, self.vm.model["analysed_reference_length"])
logging.info(
"Scanning for samples with unexpectedly high missingness (N), or likely to be mixed (M) in variation model"
)
if self.show_bar:
bar = progressbar.ProgressBar(max_value=len(guids_analysed_stage1))
guid2missing = {}
nLoaded = 0
for guid, obj in self.PERSIST.refcompressedsequence_read_all():
if guid in guids_analysed_stage1:
nLoaded +=1
if self.show_bar:
bar.update(nLoaded)
for base in [
"M",
"N",
]: # compute how many bases in this position are either M or N
# examine all missing (N/M) sites, adding to a missingness model
try:
for pos in obj[base]:
if pos in select_positions:
try:
mmodel[pos] = mmodel[pos] + 1
except KeyError:
if pos not in vmodel.keys():
mmodel[pos] = 1 # first occurrence at this position
except KeyError:
pass # if there are no M,N then we can ignore these
## do binomial test for unexpectedly high missingness in the variant sites, as well as an n/m count
guid2missing[guid] = self.mns.examine(obj)
if self.show_bar:
bar.finish()
#########################################################################################################
# reject samples with higher missingness in the variation model vs. other sites.
logging.info("Scan complete. Collating information.")
# collate mixture quality information, and identify low quality (mixed, \
# as judged by high Ns or Ms in the variant sites)
mix_quality_info = pd.DataFrame.from_dict(guid2missing, orient="index")
self.vm["mix_quality_info"] = mix_quality_info
# identify any mixed samples. we don't build the model from these.
# mixed are defined as having significantly more N or M in the variant
# positions than in other bases.
# Note: this impact of this step has been tcaluated in TB, but not as extensively in SARS-COV-2
mix_quality_cutoff = 0.01 / len(
mix_quality_info.index
) # 0.01 divided by the number of samples analysed; Bonferroni adj.
suspect_quality = mix_quality_info.query(
"m_p_value < {0} or n_p_value < {0}".format(mix_quality_cutoff)
)
self.vm["suspect_quality_seqs"] = suspect_quality
n_suspect = len(set(suspect_quality.index.to_list()))
pc_suspect = int(100 * n_suspect / len(mix_quality_info.index))
logging.info(
"Identified {0} ({1}%) sequences with higher N/M in variant vs. non-variant bases (composition p cutoff {2}); excluded from model as may be mixed.".format(
n_suspect, pc_suspect, mix_quality_cutoff
)
)
guids_analysed_stage2 = guids_analysed_stage1 - set(
suspect_quality.index.to_list()
)
#########################################################################################################
# build a variation matrix for variant sites which pass, and samples which pass
vmodel = {}
nLoaded = 0
logging.info(
"Gathering variation for matrix construction from {0} unmixed samples into dictionary ".format(
len(guids_analysed_stage2)
)
)
if self.show_bar:
bar = progressbar.ProgressBar(max_value=len(guids_analysed_stage2))
self.model["sample_id"] = []
for guid, obj in self.PERSIST.refcompressedsequence_read_all():
if guid in guids_analysed_stage2:
nLoaded += 1
if self.show_bar:
bar.update(nLoaded)
# for invalid samples, compute nothing
if obj["invalid"] == 1:
self._invalid.add(guid)
else:
# compute a variation model - a list of bases and variants where variation occurs
variants = {} # variation for this guid
# for definite calls, compute variation at each position
# positions of variation where a call was made
for base in set(["A", "C", "G", "T"]).intersection(obj.keys()):
target_positions = select_positions.intersection(obj[base])
called_positions = dict(
(self._column_name(pos, base), 1) for pos in target_positions
)
variants = {**variants, **called_positions}
vmodel[guid] = variants
if self.show_bar:
bar.finish()
#########################################################################################################
# build a variation matrix for variant sites using pandas - may take minutes for giant matrices
logging.info(
"Building variant matrix as pandas DataFrame. May take several minutes for huge matrices."
)
t0 = datetime.datetime.now()
vmodel = pd.DataFrame.from_dict(vmodel, orient="index")
vmodel.fillna(value=0, inplace=True) # if not completed, then it's reference
# unless it's null, which we are ignoring at present- we have preselected sites as having low null frequencies
t1 = datetime.datetime.now()
elapsed = (t1 - t0).total_seconds()
logging.info(
"Matrix construction complete. There are {0} sequences in the variation model, which took {1} seconds to build".format(
len(vmodel.index), elapsed
)
)
self.vm["variant_matrix"] = vmodel
return None
class PCARunner:
"""Performs PCA on a VariantMatrix"""
def __init__(self, snp_matrix: VariantMatrix, show_bar=True):
self.vm = snp_matrix.vm
self.transformed_coordinates = None
self.show_bar = show_bar
def run(
self, n_components, pca_parameters={}, deterministic=True
) -> VariationModel:
"""conducts pca on a snp_matrix, storing the results in the snp_matrix's VariantModel object.
input:
n_components: the maximum number of components to extract.
pca_parameters: a dictionary of parameters passed to the scikit-learn PCA command
The contents of the dictionary are passed as-is to the PCA command, without any checking.
"""
logging.info("Performing pca, extracting {0} components".format(n_components))
self.vm["n_pca_components"] = n_components
# if necessary, can perform incremental PCA see https://stackoverflow.com/questions/31428581/incremental-pca-on-big-data
t0 = datetime.datetime.now()
pca = PCA(n_components=n_components, **pca_parameters)
variant_matrix = self.vm["variant_matrix"]
pca.fit(variant_matrix)
t1 = datetime.datetime.now()
elapsed = (t1 - t0).total_seconds()
logging.info("PCA took {0} seconds".format(elapsed))
contribs = []
# summarise the positions and variants responsible for each pc
pc2contributing_pos = {}
contributing_basepos = set()
contributing_pos = set()
for i, row in enumerate(pca.components_, 0):
# mark values far from the median, which is close to zero
# this information is not used, but it is retained for depiction purposes if needed
row_median = np.median(row)
row_mad = median_abs_deviation(row)
row_upper_ci = row_median + 3 * row_mad
row_lower_ci = row_median - 3 * row_mad
pc2contributing_pos[i] = set()
for j, cell in enumerate(row, 0):
if cell > row_upper_ci or cell < row_lower_ci:
outside_3mad = True
else:
outside_3mad = False
pos = int(variant_matrix.columns[j].split(":")[0])
allele = variant_matrix.columns[j].split(":")[1]
# indicate whether positions are strongly weighted
if outside_3mad:
pc2contributing_pos[i].add(pos)
contributing_basepos.add(variant_matrix.columns[j])
contributing_pos.add(pos)
contribs.append(
{
"pc": i,
"pos": pos,
"allele": allele,
"col": variant_matrix.columns[j],
"weight": cell,
"outside_3mad": outside_3mad,
}
)
pc2contributing_pos[i] = sorted(
list(pc2contributing_pos[i])
) # can be json serialised, unlike set
# report eigenvectors which are different from median +- 3 median absolute dtciations
self.eigenvectors = pd.DataFrame.from_records(contribs)
if len(self.eigenvectors.index) == 0:
raise KeyError(
"PCA problem. No eigenvectors found. Contributions found are as follows: {0}. This usually means there is insufficient data to build PCs. Try increasing the sample number".format(
contribs
)
)
# compute transformed_coordinates for the samples on which the fit was performed.
logging.info("Computing transformed_coordinates")
transformed_coordinates_dict = {}
for guid, tcs in zip(variant_matrix.index, pca.transform(variant_matrix)):
transformed_coordinates_dict[guid] = tcs
self.transformed_coordinates = pd.DataFrame.from_dict(
transformed_coordinates_dict, orient="index"
)
self.transformed_coordinates.columns = range(n_components)
self.vm["pca"] = pca
self.vm["transformed_coordinates"] = self.transformed_coordinates
self.vm["eigenvectors"] = self.eigenvectors
self.vm["explained_variance_ratio"] = list(pca.explained_variance_ratio_)
self.vm["n_contributing_positions"] = len(contributing_pos)
self.vm["pc2_contributing_positions"] = pc2contributing_pos
self.vm["n_contributing_variants"] = len(contributing_basepos)
self.vm["contributing_basepos"] = contributing_basepos
self.vm["contributing_pos"] = contributing_pos
self.vm["sample_id"] = variant_matrix.index.tolist()
self.vm["pos_per_pc"] = [
len(x) for x in self.vm.model["pc2_contributing_positions"].values()
]
logging.info(
"PCA completed, identified {0} strongly contributing base/positions".format(
len(contributing_basepos)
)
)
self.vm.finish()
return self.vm
def cluster(self, initial_cats_per_unit=8):
"""clusters the transformed_coordinates obtained by run()
Categorises transformed_coordinates. Uses kmeans clustering, and uses a crude approximation to estimate the number of clusters.
The technique used operates per pc; we bin transformed_coordinates into bins 1/initial_cats_per_unit wide, and count the non-zero bins. This is used as an estimate of
the numbers of clusters, and the pca is provided with the bin centres as a set of starting values.
Empirically, the technique was found to provide better discrimination of emerging SARS-CoV-2 genomes than approaches based on model fitting,
such as Gaussian mixture modelling, although it undoubtedly splits some PCs arbitrarily.
Parameters:
initial_cats_per_unit: (default 8). Used to estimate the number of k-means clusters to generate.
Outputs:
self.vm: the VariantModel object generated by this routine
Sets:
self.transformed_coordinate_categories: a data frame containing cluster names for each cluster
"""
# check there is a model
if self.transformed_coordinates is None:
raise NotImplementedError(
"No transformed_coordinates. You must call .run() before calling .cluster()"
)
t0 = datetime.datetime.now() # startup time
# prepare data for clustering
tc = (
self.transformed_coordinates.copy()
) # transformed_coordinates. option to drop PCs of technical origin could be dropped.
if self.show_bar:
bar = progressbar.ProgressBar(
max_value=len(self.transformed_coordinates.columns.to_list())
)
logging.info("Clustering transformed_coordinates")
for i, col in enumerate(tc.columns):
if self.show_bar:
bar.update(i)
this_tc = tc[col].to_frame()
this_tc.columns = ["transformed_coordinate"]
this_tc["pc"] = col
this_tc["initial_cat"] = [
int(x * initial_cats_per_unit)
for x in this_tc["transformed_coordinate"]
]
# how many non-zero categories
cats = this_tc.groupby(["initial_cat"])["transformed_coordinate"].describe()
# convert to arrays to fit
to_fit = this_tc["transformed_coordinate"].to_numpy().reshape(-1, 1)
centres = cats["mean"].to_numpy().reshape(-1, 1)
km = KMeans(n_clusters=len(cats.index), n_init=1, init=centres).fit(to_fit)
this_tc["cat"] = km.labels_
this_tc["sample_id"] = this_tc.index
# store a pc_cat field. useful for searching later.
pc_cats = [str(col) + "_" + str(x) for x in this_tc["cat"]]
this_tc["pc_cat"] = pc_cats
this_tc.drop(["initial_cat"], axis=1)
if col == 0:
tcs = this_tc
else:
tcs = tcs.append(this_tc, ignore_index=True)
self.vm["transformed_coordinate_categories"] = tcs
self.vm.finish()
if self.show_bar:
bar.finish()
t1 = datetime.datetime.now()
elapsed = (t1 - t0).total_seconds()
logging.info(
"Transformed coordinate clustering took {0} seconds".format(elapsed)
)
return self.vm
| [
"pandas.DataFrame.from_records",
"numpy.median",
"scipy.stats.median_abs_deviation",
"hashlib.md5",
"random.shuffle",
"pathlib.Path",
"sklearn.decomposition.PCA",
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"scipy.stats.poisson.ppf",
"datetime.datetime.now",
"collections.defaultdict",
... | [((7940, 7953), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (7951, 7953), False, 'import hashlib\n'), ((12582, 12617), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['metadata'], {}), '(metadata)\n', (12607, 12617), True, 'import pandas as pd\n'), ((15387, 15403), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (15398, 15403), False, 'from collections import defaultdict\n'), ((15433, 15449), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (15444, 15449), False, 'from collections import defaultdict\n'), ((18422, 18444), 'numpy.median', 'np.median', (['missingness'], {}), '(missingness)\n', (18431, 18444), True, 'import numpy as np\n'), ((18709, 18751), 'scipy.stats.poisson.ppf', 'poisson.ppf', (['(0.999)', '(2 * median_missingness)'], {}), '(0.999, 2 * median_missingness)\n', (18720, 18751), False, 'from scipy.stats import poisson\n'), ((23932, 24080), 'logging.info', 'logging.info', (['f"""Removed {num_removed} positions with missingness > cutoff {upper_cutoff} sequences. Represents removal of {pc_removed} %"""'], {}), "(\n f'Removed {num_removed} positions with missingness > cutoff {upper_cutoff} sequences. Represents removal of {pc_removed} %'\n )\n", (23944, 24080), False, 'import logging\n'), ((24935, 25066), 'logging.info', 'logging.info', (['"""Scanning for samples with unexpectedly high missingness (N), or likely to be mixed (M) in variation model"""'], {}), "(\n 'Scanning for samples with unexpectedly high missingness (N), or likely to be mixed (M) in variation model'\n )\n", (24947, 25066), False, 'import logging\n'), ((26687, 26741), 'logging.info', 'logging.info', (['"""Scan complete. Collating information."""'], {}), "('Scan complete. Collating information.')\n", (26699, 26741), False, 'import logging\n'), ((26911, 26963), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['guid2missing'], {'orient': '"""index"""'}), "(guid2missing, orient='index')\n", (26933, 26963), True, 'import pandas as pd\n'), ((30191, 30306), 'logging.info', 'logging.info', (['"""Building variant matrix as pandas DataFrame. May take several minutes for huge matrices."""'], {}), "(\n 'Building variant matrix as pandas DataFrame. May take several minutes for huge matrices.'\n )\n", (30203, 30306), False, 'import logging\n'), ((30332, 30355), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30353, 30355), False, 'import datetime\n'), ((30374, 30420), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['vmodel'], {'orient': '"""index"""'}), "(vmodel, orient='index')\n", (30396, 30420), True, 'import pandas as pd\n'), ((30639, 30662), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30660, 30662), False, 'import datetime\n'), ((32028, 32051), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32049, 32051), False, 'import datetime\n'), ((32066, 32114), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components, **pca_parameters)\n', (32069, 32114), False, 'from sklearn.decomposition import PCA\n'), ((32211, 32234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32232, 32234), False, 'import datetime\n'), ((34229, 34264), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['contribs'], {}), '(contribs)\n', (34254, 34264), True, 'import pandas as pd\n'), ((34700, 34749), 'logging.info', 'logging.info', (['"""Computing transformed_coordinates"""'], {}), "('Computing transformed_coordinates')\n", (34712, 34749), False, 'import logging\n'), ((34967, 35035), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['transformed_coordinates_dict'], {'orient': '"""index"""'}), "(transformed_coordinates_dict, orient='index')\n", (34989, 35035), True, 'import pandas as pd\n'), ((37484, 37507), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (37505, 37507), False, 'import datetime\n'), ((37892, 37942), 'logging.info', 'logging.info', (['"""Clustering transformed_coordinates"""'], {}), "('Clustering transformed_coordinates')\n", (37904, 37942), False, 'import logging\n'), ((39344, 39367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39365, 39367), False, 'import datetime\n'), ((2800, 2812), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2809, 2812), False, 'import json\n'), ((32780, 32794), 'numpy.median', 'np.median', (['row'], {}), '(row)\n', (32789, 32794), True, 'import numpy as np\n'), ((32817, 32842), 'scipy.stats.median_abs_deviation', 'median_abs_deviation', (['row'], {}), '(row)\n', (32837, 32842), False, 'from scipy.stats import binom_test, median_abs_deviation\n'), ((3068, 3080), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3077, 3080), False, 'import json\n'), ((8206, 8229), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8227, 8229), False, 'import datetime\n'), ((8997, 9020), 'pathlib.Path', 'pathlib.Path', (['outputdir'], {}), '(outputdir)\n', (9009, 9020), False, 'import pathlib\n'), ((9407, 9429), 'os.unlink', 'os.unlink', (['sqlite_file'], {}), '(sqlite_file)\n', (9416, 9429), False, 'import os\n'), ((20550, 20571), 'random.shuffle', 'random.shuffle', (['guids'], {}), '(guids)\n', (20564, 20571), False, 'import random\n'), ((10587, 10625), 'pandas.DataFrame', 'pd.DataFrame', (['list_data'], {'columns': '[key]'}), '(list_data, columns=[key])\n', (10599, 10625), True, 'import pandas as pd\n'), ((11698, 11739), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['output_records'], {}), '(output_records)\n', (11723, 11739), True, 'import pandas as pd\n')] |
"""State distinguishability."""
from typing import List
import cvxpy
import numpy as np
from .state_helper import __is_states_valid, __is_probs_valid
def state_distinguishability(
states: List[np.ndarray], probs: List[float] = None, dist_method: str = "min-error"
) -> float:
r"""
Compute probability of state distinguishability [ELD03]_.
The "quantum state distinguishability" problem involves a collection of :math:`n` quantum states
.. math::
\rho = \{ \rho_0, \ldots, \rho_n \},
as well as a list of corresponding probabilities
.. math::
p = \{ p_0, \ldots, p_n \}.
Alice chooses :math:`i` with probability :math:`p_i` and creates the state :math:`\rho_i` Bob
wants to guess which state he was given from the collection of states.
One can specify the distinguishability method using the :code:`dist_method` argument.
For :code:`dist_method = "min-error"`, this is the default method that yields the probability of
distinguishing quantum states that minimize the probability of error.
For :code:`dist_method = "unambiguous"`, Alice and Bob never provide an incorrect answer,
although it is possible that their answer is inconclusive.
When :code:`dist_method = "min-error"`, this function implements the following semidefinite
program that provides the optimal probability with which Bob can conduct quantum state
distinguishability.
.. math::
\begin{align*}
\text{maximize:} \quad & \sum_{i=0}^n p_i \langle M_i, \rho_i \rangle \\
\text{subject to:} \quad & M_0 + \ldots + M_n = \mathbb{I},\\
& M_0, \ldots, M_n \geq 0
\end{align*}
When :code:`dist_method = "unambiguous"`, this function implements the following semidefinite
program that provides the optimal probability with which Bob can conduct unambiguous quantum
state distinguishability.
.. math::
\begin{align*}
\text{maximize:} \quad & \sum_{i=0}^n p_i \langle M_i, \rho_i \rangle \\
\text{subject to:} \quad & M_0 + \ldots + M_{n+1} = \mathbb{I},\\
& \langle M_i, \rho_j \rangle = 0,
\quad 1 \leq i, j \leq n, \quad i \not= j.
& M_0, \ldots, M_n \geq 0
\end{align*}
Examples
==========
State distinguishability for two state density matrices.
>>> from toqito.states import basis, bell
>>> from toqito.state_opt import state_distinguishability
>>> e_0, e_1 = basis(2, 0), basis(2, 1)
>>> e_00 = e_0 * e_0.conj().T
>>> e_11 = e_1 * e_1.conj().T
>>> states = [e_00, e_11]
>>> probs = [1 / 2, 1 / 2]
>>> res = state_distinguishability(states, probs)
0.5000000000006083
References
==========
.. [ELD03] Eldar, <NAME>.
"A semidefinite programming approach to optimal unambiguous
discrimination of quantum states."
IEEE Transactions on information theory 49.2 (2003): 446-456.
https://arxiv.org/abs/quant-ph/0206093
:param states: A list of states provided as either matrices or vectors.
:param probs: Respective list of probabilities each state is selected.
:param dist_method: Method of distinguishing to use.
:return: The optimal probability with which Bob can distinguish the state.
"""
obj_func = []
measurements = []
constraints = []
__is_states_valid(states)
if probs is None:
probs = [1 / len(states)] * len(states)
__is_probs_valid(probs)
dim_x, dim_y = states[0].shape
# The variable `states` is provided as a list of vectors. Transform them
# into density matrices.
if dim_y == 1:
for i, state_ket in enumerate(states):
states[i] = state_ket * state_ket.conj().T
# Unambiguous state discrimination has an additional constraint on the states and measurements.
if dist_method == "unambiguous":
# Note we have one additional measurement operator in the unambiguous case.
for i in range(len(states) + 1):
measurements.append(cvxpy.Variable((dim_x, dim_x), PSD=True))
# This is an extra condition required for the unambiguous case.
for i, _ in enumerate(states):
for j, _ in enumerate(states):
if i != j:
constraints.append(cvxpy.trace(states[i].conj().T @ measurements[i]) == 0)
if dist_method == "min-error":
for i, _ in enumerate(states):
measurements.append(cvxpy.Variable((dim_x, dim_x), PSD=True))
# Objective function is the inner product between the states and measurements.
for i, _ in enumerate(states):
obj_func.append(probs[i] * cvxpy.trace(states[i].conj().T @ measurements[i]))
constraints.append(sum(measurements) == np.identity(dim_x))
objective = cvxpy.Maximize(sum(obj_func))
problem = cvxpy.Problem(objective, constraints)
sol_default = problem.solve()
return sol_default
| [
"numpy.identity",
"cvxpy.Variable",
"cvxpy.Problem"
] | [((4962, 4999), 'cvxpy.Problem', 'cvxpy.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (4975, 4999), False, 'import cvxpy\n'), ((4881, 4899), 'numpy.identity', 'np.identity', (['dim_x'], {}), '(dim_x)\n', (4892, 4899), True, 'import numpy as np\n'), ((4163, 4203), 'cvxpy.Variable', 'cvxpy.Variable', (['(dim_x, dim_x)'], {'PSD': '(True)'}), '((dim_x, dim_x), PSD=True)\n', (4177, 4203), False, 'import cvxpy\n'), ((4589, 4629), 'cvxpy.Variable', 'cvxpy.Variable', (['(dim_x, dim_x)'], {'PSD': '(True)'}), '((dim_x, dim_x), PSD=True)\n', (4603, 4629), False, 'import cvxpy\n')] |
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import init
import numpy as np
import json
import os.path
import subprocess
import random
from operator import itemgetter
import sklearn.metrics as metrics
np.set_printoptions(linewidth=1000000000)
torch.cuda.manual_seed(1)
training_data = []
testing_data = []
I = open("pssm_list.tsv","r").readlines()
pssm_data = list(map(str.strip, I))
pdb_features = dict()
for i in pssm_data:
I = iter(list(map(str.strip,open("PSSM/"+i,"r").readlines())))
r = i.split("_")
pdb = r[0]+"_"+r[1]
ch = r[2]
if not pdb in pdb_features:
pdb_features[pdb] = dict()
next(I)
for j in I:
r = j.split(" ")
res_id = r[1]
pdb_features[pdb][res_id+ch] = dict()
pdb_features[pdb][res_id+ch]['pssm'] = list(map(float,r[3:23]))
for pdb in pdb_features.keys():
I = iter(list(map(str.strip,open("NEIGHBOURS/"+pdb+"_u.vd","r").readlines())))
for j in I:
r = iter(j.split("\t"))
res_ch = next(r)
pdb_features[pdb][res_ch]['vd'] = list(r)
rri = dict()
PDB = list(map(str.strip, open("rri_list.tsv","r").readlines()))
for i in PDB:
if not i in rri:
rri[i] = dict()
J = iter(list(map(str.strip,open("pairPred_contactMap/"+i+".int","r").readlines())))
for j in J:
r = j.split("\t")
rri[i][r[0]+":"+r[1]]=True
def get_native_rri( pdb,res_i,res_j ):
if res_i+":"+res_j in rri[pdb]:
return autograd.Variable(torch.LongTensor([1])).cuda()
return autograd.Variable(torch.LongTensor([0])).cuda()
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.xavier_normal(m.weight)
m.bias.data.fill_(0)
#nn.init.xavier_normal(m.bias)
class DyNet(nn.Module):
def __init__( self, input_dim=20, direct_pair_dim=512, neighbour_pair_dim=512, neighbour_pair_out_dim=256, out_size=2 ):
super(DyNet, self).__init__()
self.input_dim = input_dim
self.direct_pair_dim = direct_pair_dim
self.neighbour_pair_dim = neighbour_pair_dim
self.neighbour_pair_out_dim = neighbour_pair_out_dim
self.out_size = out_size
#NN DIRECT PAIR
self.drop_direct_pair = nn.Dropout(p=0.5)
self.direct_pair = nn.Linear(2*input_dim+2*neighbour_pair_out_dim, direct_pair_dim)
self.direct_pair_out = nn.Linear(direct_pair_dim, out_size)
#NN NEIGHBOURS
self.drop_neighbour_pair = nn.Dropout(p=0.5)
self.neighbour_pair = nn.Linear(2*input_dim, neighbour_pair_dim)
self.neighbour_pair_out = nn.Linear(neighbour_pair_dim, neighbour_pair_out_dim)
def prepare_data(self, pdb_i, res_i, pdb_j, res_j):
a_i = torch.unsqueeze( torch.FloatTensor(pdb_features[pdb_i][res_i]['pssm']),dim=0 )
b_j = torch.unsqueeze( torch.FloatTensor(pdb_features[pdb_j][res_j]['pssm']),dim=0 )
flag = True
for i in pdb_features[pdb_i][res_i]['vd']:
v = list(pdb_features[pdb_j][res_j]['pssm'])
v.extend( pdb_features[pdb_i][i]['pssm'] )
v = torch.unsqueeze(torch.FloatTensor(v),dim=0)
if not flag:
A_i = torch.cat( (A_i, v) ,dim=0 )
else:
flag = False
A_i = v
flag = True
for j in pdb_features[pdb_j][res_j]['vd']:
v = list(pdb_features[pdb_i][res_i]['pssm'])
v.extend(pdb_features[pdb_j][j]['pssm'])
v = torch.unsqueeze(torch.FloatTensor(v),dim=0)
if not flag:
B_j = torch.cat( (B_j, v),dim=0 )
else:
flag = False
B_j = v
return autograd.Variable(a_i).cuda(), autograd.Variable(A_i).cuda(), autograd.Variable(b_j).cuda(), autograd.Variable(B_j).cuda()
def forward(self, pdb_i, res_i, pdb_j, res_j ):
a_i, A_i, b_j, B_j = self.prepare_data( pdb_i, res_i, pdb_j, res_j )
N_i = self.neighbour_pair(A_i)
N_i = self.drop_neighbour_pair(N_i)
N_i = F.relu(N_i)
N_i = self.neighbour_pair_out(N_i)
N_i = F.relu( N_i )
N_i = torch.mean(N_i,0)
N_j = self.neighbour_pair(B_j)
N_j = self.drop_neighbour_pair(N_j)
N_j = F.relu(N_j)
N_j = self.neighbour_pair_out(N_j)
N_j = F.relu( N_j )
N_j = torch.mean(N_j,0)
v_in = torch.cat([a_i,b_j,N_i,N_j],dim=1)
out = self.direct_pair(v_in)
out = self.drop_direct_pair(out)
out = F.relu(out)
out = self.direct_pair_out(out)
out = F.log_softmax( out )
return out
input_dim=20
direct_pair_dim=512
neighbour_pair_dim=512
neighbour_pair_out_dim=1
out_size=2
model = DyNet(input_dim=input_dim, direct_pair_dim=direct_pair_dim, neighbour_pair_dim=neighbour_pair_dim, neighbour_pair_out_dim=neighbour_pair_out_dim, out_size=out_size)
model.cuda()
print(model)
loss_function = nn.NLLLoss()
#optimizer = optim.Adam(model.parameters(), lr=0.01)
N = len(training_data)
current_n = 1
print("Neural networking ...")
for target in PDB:
lr = 0.1
model = model = DyNet(input_dim=input_dim, direct_pair_dim=direct_pair_dim, neighbour_pair_dim=neighbour_pair_dim, neighbour_pair_out_dim=neighbour_pair_out_dim, out_size=out_size)
model.cuda()
for epoch in range(100):
optimizer = optim.SGD(model.parameters(), lr=lr)
#lr *= 0.9
curr_n = len(PDB)
results = list()
for pdb in PDB:
curr_n -=1
if pdb == target:
continue
print("%d:%d - %s \r"%(epoch,curr_n,pdb),end="")
for R in list(rri[pdb].keys()):
[res_i,res_j] = R.split(":")
model.zero_grad()
optimizer.zero_grad()
predicted = model( pdb+"_r", res_i, pdb+"_l", res_j )
native = get_native_rri( pdb,res_i,res_j )
loss = loss_function( predicted, native )
loss.backward()
optimizer.step()
results.append( [1,predicted.data.cpu()[0,1]] )
neg = len( list(rri[pdb].keys()) )
while(neg>0):
res_i = random.choice( list(pdb_features[pdb+"_r"].keys()) )
res_j = random.choice( list(pdb_features[pdb+"_l"].keys()) )
if res_i+":"+res_j in rri:
continue
if not "vd" in pdb_features[pdb+"_r"][res_i]:
continue
if not "vd" in pdb_features[pdb+"_l"][res_j]:
continue
neg -= 1
model.zero_grad()
optimizer.zero_grad()
predicted = model( pdb+"_r", res_i, pdb+"_l", res_j )
native = get_native_rri( pdb,res_i,res_j )
loss = loss_function( predicted, native )
loss.backward()
optimizer.step()
results.append( [0,predicted.data.cpu()[0,1]] )
soreted_res = np.array(sorted(results, key=itemgetter(1),reverse=True))
fpr, tpr, thresholds = metrics.roc_curve(soreted_res[:,0], soreted_res[:,1], pos_label=1)
auc = metrics.auc(fpr, tpr)
print("Training %s:%d AUC=%0.4f\n"%(target,epoch,auc),end="")
if epoch % 10 == 0:
print("Evaluating %s:%d\n"%(target,epoch),end="")
model.train(mode=False)
results = list()
for res_i in list(pdb_features[target+"_r"].keys()):
for res_j in list(pdb_features[target+"_l"].keys()):
if res_i+":"+res_j in rri:
continue
if not "vd" in pdb_features[target+"_r"][res_i]:
continue
if not "vd" in pdb_features[target+"_l"][res_j]:
continue
predicted = model( target+"_r", res_i, target+"_l", res_j )
results.append( [0,predicted.data.cpu()[0,1]] )
for R in list(rri[target].keys()):
[res_i,res_j] = R.split(":")
predicted = model( target+"_r", res_i, target+"_l", res_j )
results.append( [1,predicted.data.cpu()[0,1]] )
soreted_res = np.array(sorted(results, key=itemgetter(1),reverse=True))
fpr, tpr, thresholds = metrics.roc_curve(soreted_res[:,0], soreted_res[:,1], pos_label=1)
auc = metrics.auc(fpr, tpr)
p_10 = np.sum(soreted_res[0:10,0])/10
p_100 = np.sum(soreted_res[0:100,0])/100
p_500 = np.sum(soreted_res[0:500,0])/500
print( "%s - AUC=%0.4f - P10=%0.4f - P100=%0.4f - P500=%0.4f" % (target, auc, p_10, p_100, p_500) )
model.train(mode=True)
| [
"torch.nn.Dropout",
"torch.mean",
"sklearn.metrics.auc",
"torch.LongTensor",
"torch.nn.init.xavier_normal",
"numpy.sum",
"sklearn.metrics.roc_curve",
"torch.nn.NLLLoss",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.functional.log_softmax",
"operator.itemgetter",
"torch.cuda.manua... | [((301, 342), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(1000000000)'}), '(linewidth=1000000000)\n', (320, 342), True, 'import numpy as np\n'), ((343, 368), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (365, 368), False, 'import torch\n'), ((4918, 4930), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (4928, 4930), True, 'import torch.nn as nn\n'), ((1685, 1716), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['m.weight'], {}), '(m.weight)\n', (1706, 1716), True, 'import torch.nn as nn\n'), ((2253, 2270), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2263, 2270), True, 'import torch.nn as nn\n'), ((2298, 2368), 'torch.nn.Linear', 'nn.Linear', (['(2 * input_dim + 2 * neighbour_pair_out_dim)', 'direct_pair_dim'], {}), '(2 * input_dim + 2 * neighbour_pair_out_dim, direct_pair_dim)\n', (2307, 2368), True, 'import torch.nn as nn\n'), ((2394, 2430), 'torch.nn.Linear', 'nn.Linear', (['direct_pair_dim', 'out_size'], {}), '(direct_pair_dim, out_size)\n', (2403, 2430), True, 'import torch.nn as nn\n'), ((2499, 2516), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2509, 2516), True, 'import torch.nn as nn\n'), ((2547, 2591), 'torch.nn.Linear', 'nn.Linear', (['(2 * input_dim)', 'neighbour_pair_dim'], {}), '(2 * input_dim, neighbour_pair_dim)\n', (2556, 2591), True, 'import torch.nn as nn\n'), ((2624, 2677), 'torch.nn.Linear', 'nn.Linear', (['neighbour_pair_dim', 'neighbour_pair_out_dim'], {}), '(neighbour_pair_dim, neighbour_pair_out_dim)\n', (2633, 2677), True, 'import torch.nn as nn\n'), ((4028, 4039), 'torch.nn.functional.relu', 'F.relu', (['N_i'], {}), '(N_i)\n', (4034, 4039), True, 'import torch.nn.functional as F\n'), ((4097, 4108), 'torch.nn.functional.relu', 'F.relu', (['N_i'], {}), '(N_i)\n', (4103, 4108), True, 'import torch.nn.functional as F\n'), ((4125, 4143), 'torch.mean', 'torch.mean', (['N_i', '(0)'], {}), '(N_i, 0)\n', (4135, 4143), False, 'import torch\n'), ((4241, 4252), 'torch.nn.functional.relu', 'F.relu', (['N_j'], {}), '(N_j)\n', (4247, 4252), True, 'import torch.nn.functional as F\n'), ((4310, 4321), 'torch.nn.functional.relu', 'F.relu', (['N_j'], {}), '(N_j)\n', (4316, 4321), True, 'import torch.nn.functional as F\n'), ((4338, 4356), 'torch.mean', 'torch.mean', (['N_j', '(0)'], {}), '(N_j, 0)\n', (4348, 4356), False, 'import torch\n'), ((4372, 4410), 'torch.cat', 'torch.cat', (['[a_i, b_j, N_i, N_j]'], {'dim': '(1)'}), '([a_i, b_j, N_i, N_j], dim=1)\n', (4381, 4410), False, 'import torch\n'), ((4500, 4511), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (4506, 4511), True, 'import torch.nn.functional as F\n'), ((4566, 4584), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['out'], {}), '(out)\n', (4579, 4584), True, 'import torch.nn.functional as F\n'), ((6835, 6903), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['soreted_res[:, 0]', 'soreted_res[:, 1]'], {'pos_label': '(1)'}), '(soreted_res[:, 0], soreted_res[:, 1], pos_label=1)\n', (6852, 6903), True, 'import sklearn.metrics as metrics\n'), ((6912, 6933), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (6923, 6933), True, 'import sklearn.metrics as metrics\n'), ((2766, 2819), 'torch.FloatTensor', 'torch.FloatTensor', (["pdb_features[pdb_i][res_i]['pssm']"], {}), "(pdb_features[pdb_i][res_i]['pssm'])\n", (2783, 2819), False, 'import torch\n'), ((2859, 2912), 'torch.FloatTensor', 'torch.FloatTensor', (["pdb_features[pdb_j][res_j]['pssm']"], {}), "(pdb_features[pdb_j][res_j]['pssm'])\n", (2876, 2912), False, 'import torch\n'), ((7936, 8004), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['soreted_res[:, 0]', 'soreted_res[:, 1]'], {'pos_label': '(1)'}), '(soreted_res[:, 0], soreted_res[:, 1], pos_label=1)\n', (7953, 8004), True, 'import sklearn.metrics as metrics\n'), ((8015, 8036), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (8026, 8036), True, 'import sklearn.metrics as metrics\n'), ((1557, 1578), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (1573, 1578), False, 'import torch\n'), ((3133, 3153), 'torch.FloatTensor', 'torch.FloatTensor', (['v'], {}), '(v)\n', (3150, 3153), False, 'import torch\n'), ((3202, 3228), 'torch.cat', 'torch.cat', (['(A_i, v)'], {'dim': '(0)'}), '((A_i, v), dim=0)\n', (3211, 3228), False, 'import torch\n'), ((3502, 3522), 'torch.FloatTensor', 'torch.FloatTensor', (['v'], {}), '(v)\n', (3519, 3522), False, 'import torch\n'), ((3571, 3597), 'torch.cat', 'torch.cat', (['(B_j, v)'], {'dim': '(0)'}), '((B_j, v), dim=0)\n', (3580, 3597), False, 'import torch\n'), ((8051, 8079), 'numpy.sum', 'np.sum', (['soreted_res[0:10, 0]'], {}), '(soreted_res[0:10, 0])\n', (8057, 8079), True, 'import numpy as np\n'), ((8096, 8125), 'numpy.sum', 'np.sum', (['soreted_res[0:100, 0]'], {}), '(soreted_res[0:100, 0])\n', (8102, 8125), True, 'import numpy as np\n'), ((8143, 8172), 'numpy.sum', 'np.sum', (['soreted_res[0:500, 0]'], {}), '(soreted_res[0:500, 0])\n', (8149, 8172), True, 'import numpy as np\n'), ((1500, 1521), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (1516, 1521), False, 'import torch\n'), ((3676, 3698), 'torch.autograd.Variable', 'autograd.Variable', (['a_i'], {}), '(a_i)\n', (3693, 3698), True, 'import torch.autograd as autograd\n'), ((3707, 3729), 'torch.autograd.Variable', 'autograd.Variable', (['A_i'], {}), '(A_i)\n', (3724, 3729), True, 'import torch.autograd as autograd\n'), ((3738, 3760), 'torch.autograd.Variable', 'autograd.Variable', (['b_j'], {}), '(b_j)\n', (3755, 3760), True, 'import torch.autograd as autograd\n'), ((3769, 3791), 'torch.autograd.Variable', 'autograd.Variable', (['B_j'], {}), '(B_j)\n', (3786, 3791), True, 'import torch.autograd as autograd\n'), ((6779, 6792), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (6789, 6792), False, 'from operator import itemgetter\n'), ((7877, 7890), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (7887, 7890), False, 'from operator import itemgetter\n')] |
import os
import numpy
import pygeoprocessing
from osgeo import gdal
from osgeo import osr
# I'm assuming that our synthetic raster here will cover the globe, just not
# have any real-world values.
# These details are copied from another raster I have, so roughly 5 arcseconds
# resolution
ORIGIN = (-180, 90)
PIXELSIZE = (0.083333333333333, -0.083333333333333)
COLS, ROWS = (4320, 2160)
SRS = osr.SpatialReference()
SRS.ImportFromEPSG(4326)
TARGET_RASTER = os.path.join(os.path.dirname(__file__), 'raster.tif')
GTIFF_OPTIONS = [
'COMPRESS=LZW',
'TILED=YES',
'BLOCKXSIZE=256',
'BLOCKYSIZE=256',
]
NODATA_INT32 = -9999
NODATA_FLOAT32 = float(numpy.finfo(numpy.float32).min)
def create(target_filepath):
driver = gdal.GetDriverByName('GTiff')
target_raster = driver.Create(
target_filepath, COLS, ROWS, 1, gdal.GDT_Int32, options=GTIFF_OPTIONS)
target_raster.SetProjection(SRS.ExportToWkt())
target_raster.SetGeoTransform(
[ORIGIN[0], PIXELSIZE[0], 0, ORIGIN[1], 0, PIXELSIZE[1]])
target_band = target_raster.GetRasterBand(1)
target_band.SetNoDataValue(NODATA_INT32)
target_band = None
target_raster = None
index = 0
target_raster = gdal.OpenEx(target_filepath, gdal.GA_Update)
target_band = target_raster.GetRasterBand(1)
for block_info in pygeoprocessing.iterblocks((target_filepath, 1),
largest_block=-1,
offset_only=True):
array = numpy.full((block_info['win_ysize'], block_info['win_xsize']),
index, dtype=numpy.int32)
array += numpy.random.randint(
-30, 31, size=array.size).reshape(array.shape) # make a little noise.
target_band.WriteArray(array, xoff=block_info['xoff'],
yoff=block_info['yoff'])
index += 5
target_band = None
target_raster = None
target_raster = gdal.OpenEx(target_filepath, gdal.GA_Update)
gdal.SetConfigOption("COMPRESS_OVERVIEW", "LZW")
target_raster.BuildOverviews("AVERAGE", [2, 4, 8, 16, 32, 64, 128, 256])
target_raster = None
if __name__ == '__main__':
create(TARGET_RASTER)
| [
"pygeoprocessing.iterblocks",
"osgeo.osr.SpatialReference",
"osgeo.gdal.SetConfigOption",
"os.path.dirname",
"numpy.random.randint",
"numpy.finfo",
"numpy.full",
"osgeo.gdal.GetDriverByName",
"osgeo.gdal.OpenEx"
] | [((396, 418), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (416, 418), False, 'from osgeo import osr\n'), ((474, 499), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (489, 499), False, 'import os\n'), ((736, 765), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (756, 765), False, 'from osgeo import gdal\n'), ((1210, 1254), 'osgeo.gdal.OpenEx', 'gdal.OpenEx', (['target_filepath', 'gdal.GA_Update'], {}), '(target_filepath, gdal.GA_Update)\n', (1221, 1254), False, 'from osgeo import gdal\n'), ((1326, 1414), 'pygeoprocessing.iterblocks', 'pygeoprocessing.iterblocks', (['(target_filepath, 1)'], {'largest_block': '(-1)', 'offset_only': '(True)'}), '((target_filepath, 1), largest_block=-1,\n offset_only=True)\n', (1352, 1414), False, 'import pygeoprocessing\n'), ((1972, 2016), 'osgeo.gdal.OpenEx', 'gdal.OpenEx', (['target_filepath', 'gdal.GA_Update'], {}), '(target_filepath, gdal.GA_Update)\n', (1983, 2016), False, 'from osgeo import gdal\n'), ((2021, 2069), 'osgeo.gdal.SetConfigOption', 'gdal.SetConfigOption', (['"""COMPRESS_OVERVIEW"""', '"""LZW"""'], {}), "('COMPRESS_OVERVIEW', 'LZW')\n", (2041, 2069), False, 'from osgeo import gdal\n'), ((660, 686), 'numpy.finfo', 'numpy.finfo', (['numpy.float32'], {}), '(numpy.float32)\n', (671, 686), False, 'import numpy\n'), ((1526, 1619), 'numpy.full', 'numpy.full', (["(block_info['win_ysize'], block_info['win_xsize'])", 'index'], {'dtype': 'numpy.int32'}), "((block_info['win_ysize'], block_info['win_xsize']), index, dtype\n =numpy.int32)\n", (1536, 1619), False, 'import numpy\n'), ((1659, 1705), 'numpy.random.randint', 'numpy.random.randint', (['(-30)', '(31)'], {'size': 'array.size'}), '(-30, 31, size=array.size)\n', (1679, 1705), False, 'import numpy\n')] |
"""
Distance measures that can be used for various torch.tensor operations
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.autograd import Variable
from scipy.spatial.distance import cosine
def get_predict_token_vector(pred, target, k=10, s=1, tau=1):
"""
pred: l_2 normed prediction vector (batch_size*sent_length x dim)
target: embedding matrix (vocab_size x dim)
k: choose top k values
s: number of samples to choose for each output
(we can average embeddings of multiple samples drawn from top k)
tau: temperature for controlling softmax kurtosis
"""
cos_sims = pairwise_distances(pred, target)
vals, inds = torch.topk(cos_sims, k)
sample_probs = F.softmax(vals/tau, 1)
# print(sample_probs)
# k_cands = target[inds]
sample_size = torch.Size((sample_probs.size(0), s))
# should maybe consider averaging over sampled embeddings cand embeddings
samp_inds = Categorical(sample_probs).sample()
cand_ind = inds[list(range(sample_probs.size(0))), samp_inds]
cand = Variable(target[cand_ind], requires_grad=True)
return cand, cand_ind
# get nearest word in vocab to prediction
def get_nearest_token(pred, vocab, k=1):
cos_sims = pairwise_distances(pred, vocab)
vals, inds = torch.topk(cos_sims, k)
return inds
# cosine dist
def pairwise_distances(x, y, clamp = False):
x_norm = (x**2).sum(1).view(-1, 1)
y_t = torch.transpose(y, 0, 1)
y_norm = (y**2).sum(1).view(1, -1)
dist = (x_norm + y_norm - 2.0 * torch.mm(x, y_t)).flatten()
if clamp: return torch.clamp(dist, 0.0, np.inf)
return dist
def pearson_correlation(x, y):
num = (x * y).sum(1).view(-1, 1)
dx = torch.sqrt((x ** 2).sum(1).view(1, -1))
dy = torch.sqrt((y ** 2).sum(1).view(1, -1))
pc = num / (dx * dy)
return torch.clamp(pc, 0.0, 1)
def cosine_distance(x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)
def csim_np(x, y):
"""Numpy version for calculating cosine_sim for ith row of both x and y"""
print(x.shape)
print(y.shape)
assert x.shape[0] == y.shape[0]
total = 0.
for i in range(x.shape[0]):
# cos_sim(u, v) = 1 - cos_dist(u, v)
total += 1 - cosine(x[i, :],y[i, :])
return total/x.shape[0]
def exp_dist(x, y): return torch.clamp(torch.exp(-abs(x - y)), 0.0, 1)
def batthacaryya_dist(output, target): return torch.sum(torch.sqrt(torch.abs(torch.mul(output, target))))
def hamming_distance(pred, target, weight=1):
"""So far, just used for comparing binary codes"""
if isinstance(pred, torch.Tensor):
if weight != 1 or weight != None:
weight = torch.ones(target.size(1)).cuda()
# print("pred : \t {} \t\t target : {}".format(pred.size(), target.size()))
return round(float(torch.sum((pred * weight != target * weight))) / pred.numel(), 4)
elif isinstance(pred, np.ndarray):
return np.count_nonzero(pred != target) / pred.numel()
if __name__ == "__main__":
x = torch.randn((10000, 100))
y = torch.randn((1, 100))
result = pairwise_distances(x, y).flatten()
idx = torch.topk(result, result.size(0))[1]
| [
"torch.mul",
"scipy.spatial.distance.cosine",
"torch.distributions.Categorical",
"torch.topk",
"torch.transpose",
"numpy.count_nonzero",
"torch.mm",
"torch.sum",
"torch.autograd.Variable",
"torch.nn.functional.softmax",
"torch.clamp",
"torch.randn"
] | [((734, 757), 'torch.topk', 'torch.topk', (['cos_sims', 'k'], {}), '(cos_sims, k)\n', (744, 757), False, 'import torch\n'), ((777, 801), 'torch.nn.functional.softmax', 'F.softmax', (['(vals / tau)', '(1)'], {}), '(vals / tau, 1)\n', (786, 801), True, 'import torch.nn.functional as F\n'), ((1117, 1163), 'torch.autograd.Variable', 'Variable', (['target[cand_ind]'], {'requires_grad': '(True)'}), '(target[cand_ind], requires_grad=True)\n', (1125, 1163), False, 'from torch.autograd import Variable\n'), ((1339, 1362), 'torch.topk', 'torch.topk', (['cos_sims', 'k'], {}), '(cos_sims, k)\n', (1349, 1362), False, 'import torch\n'), ((1489, 1513), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (1504, 1513), False, 'import torch\n'), ((1890, 1913), 'torch.clamp', 'torch.clamp', (['pc', '(0.0)', '(1)'], {}), '(pc, 0.0, 1)\n', (1901, 1913), False, 'import torch\n'), ((3243, 3268), 'torch.randn', 'torch.randn', (['(10000, 100)'], {}), '((10000, 100))\n', (3254, 3268), False, 'import torch\n'), ((3277, 3298), 'torch.randn', 'torch.randn', (['(1, 100)'], {}), '((1, 100))\n', (3288, 3298), False, 'import torch\n'), ((1638, 1668), 'torch.clamp', 'torch.clamp', (['dist', '(0.0)', 'np.inf'], {}), '(dist, 0.0, np.inf)\n', (1649, 1668), False, 'import torch\n'), ((1005, 1030), 'torch.distributions.Categorical', 'Categorical', (['sample_probs'], {}), '(sample_probs)\n', (1016, 1030), False, 'from torch.distributions import Categorical\n'), ((2450, 2474), 'scipy.spatial.distance.cosine', 'cosine', (['x[i, :]', 'y[i, :]'], {}), '(x[i, :], y[i, :])\n', (2456, 2474), False, 'from scipy.spatial.distance import cosine\n'), ((2653, 2678), 'torch.mul', 'torch.mul', (['output', 'target'], {}), '(output, target)\n', (2662, 2678), False, 'import torch\n'), ((3157, 3189), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred != target)'], {}), '(pred != target)\n', (3173, 3189), True, 'import numpy as np\n'), ((1589, 1605), 'torch.mm', 'torch.mm', (['x', 'y_t'], {}), '(x, y_t)\n', (1597, 1605), False, 'import torch\n'), ((3037, 3080), 'torch.sum', 'torch.sum', (['(pred * weight != target * weight)'], {}), '(pred * weight != target * weight)\n', (3046, 3080), False, 'import torch\n')] |
import numpy as np
from PIL import Image
from numpy import array
class ImgUtils(object):
@staticmethod
def read_image_bytes(filename):
with open(filename, mode='rb') as file:
return file.read()
@staticmethod
def read_image_numpy(filename, w, h):
img = Image.open(filename).resize((w, h))
img = img.convert('RGB')
return array(img)
@staticmethod
def scale(arr):
return arr / 255.0
@staticmethod
def mosaic_images(images_tensor, ncols, grayscale=False):
img_size = images_tensor.shape[1]
col_size = ncols * (img_size + 1) - 1
nrows = int(np.ceil(images_tensor.shape[0] / ncols))
row_size = nrows * (img_size + 1) - 1
if grayscale:
final = np.ones((row_size, col_size))
else:
final = np.ones((row_size, col_size, 3))
for i in range(images_tensor.shape[0]):
row = int(np.floor(i / ncols))
col = i % ncols
kernel = images_tensor[i]
x = col * (img_size + 1)
y = row * (img_size + 1)
final[y:y + img_size, x:x + img_size] = kernel
return final
| [
"numpy.ceil",
"PIL.Image.open",
"numpy.ones",
"numpy.floor",
"numpy.array"
] | [((384, 394), 'numpy.array', 'array', (['img'], {}), '(img)\n', (389, 394), False, 'from numpy import array\n'), ((650, 689), 'numpy.ceil', 'np.ceil', (['(images_tensor.shape[0] / ncols)'], {}), '(images_tensor.shape[0] / ncols)\n', (657, 689), True, 'import numpy as np\n'), ((780, 809), 'numpy.ones', 'np.ones', (['(row_size, col_size)'], {}), '((row_size, col_size))\n', (787, 809), True, 'import numpy as np\n'), ((844, 876), 'numpy.ones', 'np.ones', (['(row_size, col_size, 3)'], {}), '((row_size, col_size, 3))\n', (851, 876), True, 'import numpy as np\n'), ((300, 320), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (310, 320), False, 'from PIL import Image\n'), ((948, 967), 'numpy.floor', 'np.floor', (['(i / ncols)'], {}), '(i / ncols)\n', (956, 967), True, 'import numpy as np\n')] |
import copy
import numpy as np
import torch
from mmpose.core import (aggregate_results, get_group_preds,
get_multi_stage_outputs)
def test_get_multi_stage_outputs():
fake_outputs = [torch.zeros((1, 4, 2, 2))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2))]
# outputs_flip
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=4, with_heatmaps=[False],
with_ae=[True])
assert heatmaps == []
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True],
with_ae=[True])
assert len(heatmaps) == 1
flip_index = [1, 0]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
tag_per_joint=False,
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
# with heatmaps & with ae
fake_outputs = [torch.zeros((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, True])
assert torch.allclose(heatmaps[0], torch.tensor(0.))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False])
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, False], flip_index=flip_index)
assert torch.allclose(heatmaps[0], torch.tensor(0.))
# size_projected
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
size_projected=(8, 8))
assert heatmaps[0].shape == torch.Size([1, 2, 8, 8])
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
align_corners=True)
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
def test_aggregate_results():
fake_heatmaps = [torch.zeros((1, 2, 2, 2))]
fake_tags = [torch.zeros((1, 2, 2, 2))]
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=None, tags_list=[],
heatmaps=fake_heatmaps, tags=fake_tags,
test_scale_factor=[1], project2image=True,
flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(0.))
fake_aggr_heatmaps = torch.ones(1, 2, 2, 2)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False,
align_corners=True)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
fake_heatmaps = [torch.zeros((1, 2, 2, 2)), torch.ones((1, 2, 2, 2))]
fake_aggr_heatmaps = torch.ones(1, 2, 4, 4)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=2, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1, 2],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
def test_get_group_preds():
fake_grouped_joints = [np.array([[[0, 0], [1, 1]]])]
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]))
assert not results == []
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]),
use_udp=True)
assert not results == []
| [
"mmpose.core.aggregate_results",
"torch.tensor",
"numpy.array",
"copy.deepcopy",
"torch.Size",
"torch.zeros",
"torch.ones"
] | [((3884, 4060), 'mmpose.core.aggregate_results', 'aggregate_results', ([], {'scale': '(1)', 'aggregated_heatmaps': 'None', 'tags_list': '[]', 'heatmaps': 'fake_heatmaps', 'tags': 'fake_tags', 'test_scale_factor': '[1]', 'project2image': '(True)', 'flip_test': '(False)'}), '(scale=1, aggregated_heatmaps=None, tags_list=[], heatmaps\n =fake_heatmaps, tags=fake_tags, test_scale_factor=[1], project2image=\n True, flip_test=False)\n', (3901, 4060), False, 'from mmpose.core import aggregate_results, get_group_preds, get_multi_stage_outputs\n'), ((4219, 4241), 'torch.ones', 'torch.ones', (['(1)', '(2)', '(2)', '(2)'], {}), '(1, 2, 2, 2)\n', (4229, 4241), False, 'import torch\n'), ((4289, 4478), 'mmpose.core.aggregate_results', 'aggregate_results', ([], {'scale': '(1)', 'aggregated_heatmaps': 'fake_aggr_heatmaps', 'tags_list': '[]', 'heatmaps': 'fake_heatmaps', 'tags': 'fake_tags', 'test_scale_factor': '[1]', 'project2image': '(True)', 'flip_test': '(False)'}), '(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,\n tags_list=[], heatmaps=fake_heatmaps, tags=fake_tags, test_scale_factor\n =[1], project2image=True, flip_test=False)\n', (4306, 4478), False, 'from mmpose.core import aggregate_results, get_group_preds, get_multi_stage_outputs\n'), ((4660, 4869), 'mmpose.core.aggregate_results', 'aggregate_results', ([], {'scale': '(1)', 'aggregated_heatmaps': 'fake_aggr_heatmaps', 'tags_list': '[]', 'heatmaps': 'fake_heatmaps', 'tags': 'fake_tags', 'test_scale_factor': '[1]', 'project2image': '(True)', 'flip_test': '(False)', 'align_corners': '(True)'}), '(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,\n tags_list=[], heatmaps=fake_heatmaps, tags=fake_tags, test_scale_factor\n =[1], project2image=True, flip_test=False, align_corners=True)\n', (4677, 4869), False, 'from mmpose.core import aggregate_results, get_group_preds, get_multi_stage_outputs\n'), ((5129, 5151), 'torch.ones', 'torch.ones', (['(1)', '(2)', '(4)', '(4)'], {}), '(1, 2, 4, 4)\n', (5139, 5151), False, 'import torch\n'), ((5199, 5388), 'mmpose.core.aggregate_results', 'aggregate_results', ([], {'scale': '(1)', 'aggregated_heatmaps': 'fake_aggr_heatmaps', 'tags_list': '[]', 'heatmaps': 'fake_heatmaps', 'tags': 'fake_tags', 'test_scale_factor': '[1]', 'project2image': '(False)', 'flip_test': '(True)'}), '(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,\n tags_list=[], heatmaps=fake_heatmaps, tags=fake_tags, test_scale_factor\n =[1], project2image=False, flip_test=True)\n', (5216, 5388), False, 'from mmpose.core import aggregate_results, get_group_preds, get_multi_stage_outputs\n'), ((5570, 5762), 'mmpose.core.aggregate_results', 'aggregate_results', ([], {'scale': '(2)', 'aggregated_heatmaps': 'fake_aggr_heatmaps', 'tags_list': '[]', 'heatmaps': 'fake_heatmaps', 'tags': 'fake_tags', 'test_scale_factor': '[1, 2]', 'project2image': '(False)', 'flip_test': '(True)'}), '(scale=2, aggregated_heatmaps=fake_aggr_heatmaps,\n tags_list=[], heatmaps=fake_heatmaps, tags=fake_tags, test_scale_factor\n =[1, 2], project2image=False, flip_test=True)\n', (5587, 5762), False, 'from mmpose.core import aggregate_results, get_group_preds, get_multi_stage_outputs\n'), ((215, 240), 'torch.zeros', 'torch.zeros', (['(1, 4, 2, 2)'], {}), '((1, 4, 2, 2))\n', (226, 240), False, 'import torch\n'), ((267, 291), 'torch.ones', 'torch.ones', (['(1, 4, 2, 2)'], {}), '((1, 4, 2, 2))\n', (277, 291), False, 'import torch\n'), ((1700, 1725), 'torch.zeros', 'torch.zeros', (['(1, 4, 2, 2)'], {}), '((1, 4, 2, 2))\n', (1711, 1725), False, 'import torch\n'), ((1727, 1751), 'torch.ones', 'torch.ones', (['(1, 2, 4, 4)'], {}), '((1, 2, 4, 4))\n', (1737, 1751), False, 'import torch\n'), ((1778, 1802), 'torch.ones', 'torch.ones', (['(1, 4, 2, 2)'], {}), '((1, 4, 2, 2))\n', (1788, 1802), False, 'import torch\n'), ((1804, 1828), 'torch.ones', 'torch.ones', (['(1, 2, 4, 4)'], {}), '((1, 2, 4, 4))\n', (1814, 1828), False, 'import torch\n'), ((2150, 2167), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2162, 2167), False, 'import torch\n'), ((2501, 2518), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (2513, 2518), False, 'import torch\n'), ((2877, 2894), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2889, 2894), False, 'import torch\n'), ((3284, 3308), 'torch.Size', 'torch.Size', (['[1, 2, 8, 8]'], {}), '([1, 2, 8, 8])\n', (3294, 3308), False, 'import torch\n'), ((3694, 3711), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (3706, 3711), False, 'import torch\n'), ((3766, 3791), 'torch.zeros', 'torch.zeros', (['(1, 2, 2, 2)'], {}), '((1, 2, 2, 2))\n', (3777, 3791), False, 'import torch\n'), ((3810, 3835), 'torch.zeros', 'torch.zeros', (['(1, 2, 2, 2)'], {}), '((1, 2, 2, 2))\n', (3821, 3835), False, 'import torch\n'), ((4176, 4193), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4188, 4193), False, 'import torch\n'), ((4595, 4612), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (4607, 4612), False, 'import torch\n'), ((5012, 5029), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (5024, 5029), False, 'import torch\n'), ((5051, 5076), 'torch.zeros', 'torch.zeros', (['(1, 2, 2, 2)'], {}), '((1, 2, 2, 2))\n', (5062, 5076), False, 'import torch\n'), ((5078, 5102), 'torch.ones', 'torch.ones', (['(1, 2, 2, 2)'], {}), '((1, 2, 2, 2))\n', (5088, 5102), False, 'import torch\n'), ((5498, 5522), 'torch.Size', 'torch.Size', (['(1, 2, 4, 4)'], {}), '((1, 2, 4, 4))\n', (5508, 5522), False, 'import torch\n'), ((5872, 5896), 'torch.Size', 'torch.Size', (['(1, 2, 4, 4)'], {}), '((1, 2, 4, 4))\n', (5882, 5896), False, 'import torch\n'), ((5954, 5982), 'numpy.array', 'np.array', (['[[[0, 0], [1, 1]]]'], {}), '([[[0, 0], [1, 1]]])\n', (5962, 5982), True, 'import numpy as np\n'), ((384, 411), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (397, 411), False, 'import copy\n'), ((679, 706), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (692, 706), False, 'import copy\n'), ((1001, 1028), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (1014, 1028), False, 'import copy\n'), ((1335, 1362), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (1348, 1362), False, 'import copy\n'), ((1902, 1929), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (1915, 1929), False, 'import copy\n'), ((2240, 2267), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (2253, 2267), False, 'import copy\n'), ((2592, 2619), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (2605, 2619), False, 'import copy\n'), ((2988, 3015), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (3001, 3015), False, 'import copy\n'), ((3381, 3408), 'copy.deepcopy', 'copy.deepcopy', (['fake_outputs'], {}), '(fake_outputs)\n', (3394, 3408), False, 'import copy\n'), ((6059, 6075), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6067, 6075), True, 'import numpy as np\n'), ((6091, 6107), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (6099, 6107), True, 'import numpy as np\n'), ((6130, 6146), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (6138, 6146), True, 'import numpy as np\n'), ((6253, 6269), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6261, 6269), True, 'import numpy as np\n'), ((6285, 6301), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (6293, 6301), True, 'import numpy as np\n'), ((6324, 6340), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (6332, 6340), True, 'import numpy as np\n')] |
import torchvision.transforms as transforms
from torch.autograd import Variable
import os
from PIL import Image
import numpy as np
def image_loader(image_name, imsize):
loader = transforms.Compose([
transforms.Resize((imsize, imsize)), # scale imported image
transforms.ToTensor()]) # transform it into a torch tensor
image = Image.open(image_name)
image = Variable(loader(image))
# fake batch dimension required to fit network's input dimensions
image = image.unsqueeze(0)
return image
def image_loader_gray(image_name, imsize):
# loader = transforms.Compose([
# transforms.Resize((imsize, imsize)), # scale imported image
# transforms.ToTensor()]) # transform it into a torch tensor
# mean=[0.49139968,0.48215841,0.44653091]
# stdv= [0.24703223,0.24348513,0.26158784]
loader = transforms.Compose([
#transforms.RandomCrop(32, padding=4),
#transforms.RandomHorizontalFlip(),
transforms.Resize((imsize, imsize)), # scale imported image
transforms.ToTensor(),
#transforms.Normalize(mean=mean, std=stdv),
])
image = Image.open(image_name).convert('L')
image = np.asarray(image)
image = np.asarray([image,image,image])
image = Image.fromarray(np.uint8(image).transpose(1,2,0))
image = Variable(loader(image))
# fake batch dimension required to fit network's input dimensions
image = image.unsqueeze(0)
return image
def save_image(tensor, size, input_size, fname='transferred.png'):
unloader = transforms.ToPILImage() # reconvert into PIL image
image = tensor.clone().cpu() # we clone the tensor to not do changes on it
image = image.view(size)
image = unloader(image).resize(input_size)
out_path = os.path.join('result', fname)
if not os.path.exists('result'):
os.mkdir('result')
image.save(out_path)
| [
"numpy.uint8",
"os.path.exists",
"PIL.Image.open",
"torchvision.transforms.ToPILImage",
"os.path.join",
"numpy.asarray",
"os.mkdir",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] | [((355, 377), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (365, 377), False, 'from PIL import Image\n'), ((1191, 1208), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1201, 1208), True, 'import numpy as np\n'), ((1221, 1254), 'numpy.asarray', 'np.asarray', (['[image, image, image]'], {}), '([image, image, image])\n', (1231, 1254), True, 'import numpy as np\n'), ((1552, 1575), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1573, 1575), True, 'import torchvision.transforms as transforms\n'), ((1777, 1806), 'os.path.join', 'os.path.join', (['"""result"""', 'fname'], {}), "('result', fname)\n", (1789, 1806), False, 'import os\n'), ((1818, 1842), 'os.path.exists', 'os.path.exists', (['"""result"""'], {}), "('result')\n", (1832, 1842), False, 'import os\n'), ((1852, 1870), 'os.mkdir', 'os.mkdir', (['"""result"""'], {}), "('result')\n", (1860, 1870), False, 'import os\n'), ((213, 248), 'torchvision.transforms.Resize', 'transforms.Resize', (['(imsize, imsize)'], {}), '((imsize, imsize))\n', (230, 248), True, 'import torchvision.transforms as transforms\n'), ((282, 303), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (301, 303), True, 'import torchvision.transforms as transforms\n'), ((979, 1014), 'torchvision.transforms.Resize', 'transforms.Resize', (['(imsize, imsize)'], {}), '((imsize, imsize))\n', (996, 1014), True, 'import torchvision.transforms as transforms\n'), ((1048, 1069), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1067, 1069), True, 'import torchvision.transforms as transforms\n'), ((1143, 1165), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (1153, 1165), False, 'from PIL import Image\n'), ((1281, 1296), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (1289, 1296), True, 'import numpy as np\n')] |
"""
A visual class containing multiple axes.
"""
import numpy as np
from vispy.visuals import CompoundVisual, LineVisual, TextVisual
class HyperAxisVisual(CompoundVisual):
def __init__(self, pos, color="black", labels=None):
self.pos = np.zeros((pos.shape[0]*2, 3))
for i in range(pos.shape[0]):
self.pos[2*i+1] = pos[i]
self._lines = LineVisual(pos=self.pos, method="gl", color=color, connect="segments", antialias=True)
self._text = TextVisual(text=labels, color=color, bold=True, italic=True, pos=pos * 1.1, font_size=14, method="gpu")
CompoundVisual.__init__(self, [self._lines, self._text])
def set_data(self, pos):
for i in range(pos.shape[0]):
self.pos[2*i+1] = pos[i]
self._lines.set_data(pos=self.pos)
self._text.pos = pos * 1.1
self._text.update()
| [
"vispy.visuals.TextVisual",
"numpy.zeros",
"vispy.visuals.CompoundVisual.__init__",
"vispy.visuals.LineVisual"
] | [((252, 283), 'numpy.zeros', 'np.zeros', (['(pos.shape[0] * 2, 3)'], {}), '((pos.shape[0] * 2, 3))\n', (260, 283), True, 'import numpy as np\n'), ((380, 470), 'vispy.visuals.LineVisual', 'LineVisual', ([], {'pos': 'self.pos', 'method': '"""gl"""', 'color': 'color', 'connect': '"""segments"""', 'antialias': '(True)'}), "(pos=self.pos, method='gl', color=color, connect='segments',\n antialias=True)\n", (390, 470), False, 'from vispy.visuals import CompoundVisual, LineVisual, TextVisual\n'), ((488, 595), 'vispy.visuals.TextVisual', 'TextVisual', ([], {'text': 'labels', 'color': 'color', 'bold': '(True)', 'italic': '(True)', 'pos': '(pos * 1.1)', 'font_size': '(14)', 'method': '"""gpu"""'}), "(text=labels, color=color, bold=True, italic=True, pos=pos * 1.1,\n font_size=14, method='gpu')\n", (498, 595), False, 'from vispy.visuals import CompoundVisual, LineVisual, TextVisual\n'), ((601, 657), 'vispy.visuals.CompoundVisual.__init__', 'CompoundVisual.__init__', (['self', '[self._lines, self._text]'], {}), '(self, [self._lines, self._text])\n', (624, 657), False, 'from vispy.visuals import CompoundVisual, LineVisual, TextVisual\n')] |
#-*-coding:utf-8-*-
# date:2021-06-15
# Author: Eric.Lee
# function: easy 3d handpose data iter
import glob
import math
import os
import random
from tqdm import tqdm
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import json
#----------------------
import torch
from manopth import manolayer
# from model.detnet import detnet
from utils import func, bone, AIK, smoother
from utils.LM_new import LM_Solver
import numpy as np
import matplotlib.pyplot as plt
from utils import vis
from op_pso import PSO
import open3d
from mpl_toolkits.mplot3d import Axes3D
import time
#----------------------
def draw_handpose_2d(img_,hand_,x,y,thick = 3):
# thick = 2
colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)]
#
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick)
def img_agu_channel_same(img_):
img_a = np.zeros(img_.shape, dtype = np.uint8)
gray = cv2.cvtColor(img_,cv2.COLOR_RGB2GRAY)
img_a[:,:,0] =gray
img_a[:,:,1] =gray
img_a[:,:,2] =gray
return img_a
# 图像白化
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1 / std_adj)
return y
# 图像亮度、对比度增强
def contrast_img(img, c, b): # 亮度就是每个像素所有通道都加上b
rows, cols, channels = img.shape
# 新建全零(黑色)图片数组:np.zeros(img1.shape, dtype=uint8)
blank = np.zeros([rows, cols, channels], img.dtype)
dst = cv2.addWeighted(img, c, blank, 1-c, b)
return dst
def letterbox(img, height=416, augment=False, color=(127.5, 127.5, 127.5)):
# Resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))
dw = (height - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
# resize img
if augment:
interpolation = np.random.choice([None, cv2.INTER_NEAREST, cv2.INTER_LINEAR,
None, cv2.INTER_NEAREST, cv2.INTER_LINEAR,
cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4])
if interpolation is None:
img = cv2.resize(img, new_shape)
else:
img = cv2.resize(img, new_shape, interpolation=interpolation)
else:
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_NEAREST)
# print("resize time:",time.time()-s1)
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
return img, ratio, dw, dh
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
def get_heatmap(img_fix_size,x1y1x2y2,handpose_2d,ratio, dw, dh,offset_x1=0,offset_y1=0,radius=20,vis = False):
num_objs = 21
hm = np.zeros((img_fix_size.shape[0],img_fix_size.shape[1],num_objs), dtype=np.float32)
draw_gaussian = draw_msra_gaussian if False else draw_umich_gaussian
for k in range(num_objs):
x,y = (handpose_2d[str(k)]["x"]-offset_x1)*ratio+round(dw - 0.1),(handpose_2d[str(k)]["y"]-offset_y1)*ratio+round(dh - 0.1)
draw_gaussian(hm[:,:,k], (x,y), radius)
if vis:
# print("x,y : ",x,y)
cv2.namedWindow("hm",0)
cv2.imshow("hm",hm[:,:,k])
cv2.circle(img_fix_size, (int(x),int(y)), 3, (250,60,255),-1)
cv2.namedWindow("fix_size",0)
cv2.imshow("fix_size",img_fix_size)
cv2.waitKey(1)
# print("------------------------>>>")
hm_w = hm.max(axis=2)
if vis:
cv2.namedWindow("hm_w",0)
cv2.imshow("hm_w",hm_w)
# cv2.waitKey(1)
# print(hm_w.size)
return hm,hm_w
class LoadImagesAndLabels(Dataset):
def __init__(self, ops, img_size=(256,256), flag_agu = False,vis = False):
print('img_size (height,width) : ',img_size[0],img_size[1])
print("train_path : {}".format(ops.train_path))
g_side = "right"
path = ops.train_path
#-----------------------
if vis:
pose, shape = func.initiate("zero")
pre_useful_bone_len = np.zeros((1, 15)) # 骨架点信息
solver = LM_Solver(num_Iter=666, th_beta=shape.cpu(), th_pose=pose.cpu(), lb_target=pre_useful_bone_len,
weight=1e-5)
pose0 = torch.eye(3).repeat(1, 16, 1, 1)
mano = manolayer.ManoLayer(flat_hand_mean=True,
side=g_side,
mano_root='./mano/models',
use_pca=False,
root_rot_mode='rotmat',
joint_rot_mode='rotmat')
print('start ~')
point_fliter = smoother.OneEuroFilter(4.0, 0.0)
mesh_fliter = smoother.OneEuroFilter(4.0, 0.0)
shape_fliter = smoother.OneEuroFilter(1.5, 0.0)
#--------------------------- 配置点云
view_mat = np.array([[1.0, 0.0, 0.0],
[0.0, -1.0, 0],
[0.0, 0, -1.0]])
mesh = open3d.geometry.TriangleMesh()
hand_verts, j3d_recon = mano(pose0, shape.float())
mesh.triangles = open3d.utility.Vector3iVector(mano.th_faces)
hand_verts = hand_verts.clone().detach().cpu().numpy()[0]
mesh.vertices = open3d.utility.Vector3dVector(hand_verts)
viewer = open3d.visualization.Visualizer()
viewer.create_window(width=640, height=640, window_name='HandPose3d_Mesh')
viewer.add_geometry(mesh)
viewer.update_renderer()
renderOptions = viewer.get_render_option ()
renderOptions.background_color = np.asarray([120/255,120/255,120/255]) # 设置背景颜色
# axis_pcd = open3d.create_mesh_coordinate_frame(size=0.5, origin=[0, 0, 0])
# vis.add_geometry(axis_pcd)
pts_flag = True
if pts_flag:
test_pcd = open3d.geometry.PointCloud() # 定义点云
viewer.add_geometry(test_pcd)
print('start pose estimate')
pre_uv = None
shape_time = 0
opt_shape = None
shape_flag = True
#-----------------------------------------------------------------------
file_list = []
label_list = []
bbox_list = []
handpose_2d_x1y1x2y2_list = []
handpose_2d_pts_hand_list = []
handpose_3d_xyz_list = []
idx = 0
for f_ in os.listdir(path):
if ".jpg" in f_:
thr = 0
num_ = int(f_.split("_")[-1].replace(".jpg",""))
file_img = path + f_
file_json = file_img.replace("_{}.jpg".format(num_),"_{}.json".format(num_+thr))
if not os.access(file_json,os.F_OK):
continue
#-----------------------------
file_list.append(file_img)
label_list.append(file_json)
#-----------------------------
# print(file_json)
f = open(file_json, encoding='utf-8')#读取 json文件
dict_x = json.load(f)
f.close()
# print(dict_x)
#--------------------
if vis:
img = cv2.imread(file_img)
if g_side == "left":
img = cv2.flip(img,1)
bbox = dict_x["bbox"]
handpose_2d = dict_x["handpose_2d"]
#-----------------
x1_,y1_,x2_,y2_ = handpose_2d["x1y1x2y2"]
x1_,y1_,x2_,y2_ = int(x1_),int(y1_),int(x2_),int(y2_)
gt_3d_joints = dict_x["handpose_3d_xyz"]
#
handpose_2d_x1y1x2y2_list.append((x1_,y1_,x2_,y2_))
handpose_2d_pts_hand_list.append(handpose_2d["pts_hand"])
handpose_3d_xyz_list.append(gt_3d_joints)
if vis:
img_fix_size,ratio, dw, dh = letterbox(img[y1_:y2_,x1_:x2_], height=img_size[0], color=(0,0,0))
hm,hm_w = get_heatmap(img_fix_size,handpose_2d["x1y1x2y2"],handpose_2d["pts_hand"],ratio, dw, dh,vis=False)
cv2.namedWindow("fix_size",0)
cv2.imshow("fix_size",img_fix_size)
hm_w = np.expand_dims(hm_w,2)
print("hm.shape : {}".format(hm.shape))
print("hm_w.shape : {}".format(hm_w.shape))
print("img_fix_size.shape : {}".format(img_fix_size.shape))
img_fix_size_r = img_fix_size.astype(np.float32)
img_fix_size_r = (img_fix_size_r-128.)/256.
#--------------------------------------------------
image_fusion = np.concatenate((img_fix_size_r,hm),axis=2)
print(" A image_fusion.shape : {}".format(image_fusion.shape))
image_fusion = image_fusion.transpose(2, 0, 1)
print(" B image_fusion.shape : {}".format(image_fusion.shape))
image_fusion = np.expand_dims(image_fusion,0)
print(" C image_fusion.shape : {}".format(image_fusion.shape))
# img_fix_size_r = np.expand_dims(img_fix_size_r,0)
# print(hm.shape ," 《《-------------》》",img_fix_size_r.shape)
# #--------------------------------------------------
# image_fusion = np.concatenate((img_fix_size_r,hm_w),axis=0)
#-----------------
cv2.rectangle(img, (int(bbox[0]),int(bbox[1])), (int(bbox[2]),int(bbox[3])), (255,0,255), 5) # 绘制空心矩形
pts_hand2d = handpose_2d["pts_hand"]
draw_handpose_2d(img,pts_hand2d,x1_,y1_,2)
#---------------------------------
gt_3d_joints= np.array(gt_3d_joints)
print(gt_3d_joints.shape)
if g_side == "left":
print("------------------->>. left")
gt_3d_joints[:,0] *=(-1.)
gt_3d_joints = torch.tensor(gt_3d_joints).squeeze(0)
gt_3d_joints= gt_3d_joints.cuda()
print(gt_3d_joints.size())
#------------------------------
# now_uv = result['uv'].clone().detach().cpu().numpy()[0, 0]
# now_uv = now_uv.astype(np.float)
trans = np.zeros((1, 3))
# trans[0, 0:2] = now_uv - 16.0
trans = trans / 16.0
new_tran = np.array([[trans[0, 1], trans[0, 0], trans[0, 2]]])
gt_3d_joints = gt_3d_joints.clone().detach().cpu().numpy()
flited_joints = point_fliter.process(gt_3d_joints)
# fliter_ax.cla()
#
# filted_ax = vis.plot3d(flited_joints + new_tran, fliter_ax)
pre_useful_bone_len = bone.caculate_length(gt_3d_joints, label="useful")
NGEN = 0 # PSO 迭代次数
popsize = 100
low = np.zeros((1, 10)) - 3.0
up = np.zeros((1, 10)) - 2.0
parameters = [NGEN, popsize, low, up]
pso = PSO(parameters, pre_useful_bone_len.reshape((1, 15)),g_side)
pso.main(solver)
if True:#opt_shape is None:
opt_shape = pso.ng_best
opt_shape = shape_fliter.process(opt_shape)
opt_tensor_shape = torch.tensor(opt_shape, dtype=torch.float)
_, j3d_p0_ops = mano(pose0, opt_tensor_shape)
template = j3d_p0_ops.cpu().numpy().squeeze(0) / 1000.0 # template, m 21*3
ratio = np.linalg.norm(template[9] - template[0]) / np.linalg.norm(gt_3d_joints[9] - gt_3d_joints[0])
j3d_pre_process = gt_3d_joints * ratio # template, m
j3d_pre_process = j3d_pre_process - j3d_pre_process[0] + template[0]
pose_R = AIK.adaptive_IK(template, j3d_pre_process)
pose_R = torch.from_numpy(pose_R).float()
# reconstruction
hand_verts, j3d_recon = mano(pose_R, opt_tensor_shape.float())
hand_verts[:,:,:] = hand_verts[:,:,:]*(0.85)
# print(j3d_recon.size())
mesh.triangles = open3d.utility.Vector3iVector(mano.th_faces)
hand_verts = hand_verts.clone().detach().cpu().numpy()[0]
hand_verts = mesh_fliter.process(hand_verts)
hand_verts = np.matmul(view_mat, hand_verts.T).T
if g_side == "right":
hand_verts[:, 0] = hand_verts[:, 0] - 80
else:
hand_verts[:, 0] = hand_verts[:, 0] + 80
hand_verts[:, 1] = hand_verts[:, 1] - 0
mesh_tran = np.array([[-new_tran[0, 0], new_tran[0, 1], new_tran[0, 2]]])
hand_verts = hand_verts - 100 * mesh_tran
mesh.vertices = open3d.utility.Vector3dVector(hand_verts)
# mesh.paint_uniform_color([252 / 255, 224 / 255, 203 / 255])
mesh.paint_uniform_color([238 / 255, 188 / 255, 158 / 255])
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
#-----------
if pts_flag:
if False:
j3d_ = j3d_recon.detach().cpu().numpy()
j3d_[0][:,1] *=(-1.)
# j3d_[0][:,0] +=trans[0,0]
j3d_[0] = j3d_[0] - 100 * mesh_tran
j3d_[0][:,0] -=50
j3d_[0][:,1] -=30
# print(j3d_.shape,j3d_)
test_pcd.points = open3d.utility.Vector3dVector(j3d_[0]) # 定义点云坐标位置
else:
# test_pcd.points = open3d.utility.Vector3dVector(hand_verts)
gt_3d_joints[:,1] *=-1.
gt_3d_joints = gt_3d_joints*70
gt_3d_joints[:,1] -= 40
gt_3d_joints[:,0] -= 0
print("gt_3d_joints",gt_3d_joints.shape)
test_pcd.points = open3d.utility.Vector3dVector(gt_3d_joints)
# test_pcd.points = open3d.utility.Vector3dVector(gt_3d_joints[1,:].reshape(1,3))
# rgb = np.asarray([250,0,250])
# rgb_t = np.transpose(rgb)
# test_pcd.colors = open3d.utility.Vector3dVector(rgb_t.astype(np.float) / 255.0)
# print("hand_verts shape",hand_verts)
#-----------
viewer.update_geometry(mesh)
if pts_flag:
viewer.update_geometry(test_pcd)
viewer.poll_events()
viewer.update_renderer()
cv2.namedWindow("img",0)
cv2.imshow("img",img)
cv2.waitKey(1)
#-----------------------------------------------------------------------
if vis:
cv2.destroyAllWindows()
#
print()
self.files = file_list
self.img_size = img_size
self.flag_agu = flag_agu
self.vis = vis
# label_list = []
self.bbox_list = bbox_list
self.x1y1x2y2_2d_list = handpose_2d_x1y1x2y2_list
self.pts_hand_2d_list = handpose_2d_pts_hand_list
self.xyz_3d_list = handpose_3d_xyz_list
def __len__(self):
return len(self.files)
def __getitem__(self, index):
# gt_3d_joints = dict_x["handpose_3d_xyz"]
# #
# handpose_2d_x1y1x2y2_list.append((x1_,y1_,x2_,y2_))
# handpose_2d_pts_hand_list.append(handpose_2d["pts_hand"])
# handpose_3d_xyz_list.append(gt_3d_joints)
img_path = self.files[index]
x1y1x2y2 = self.x1y1x2y2_2d_list[index]
pts_hand = self.pts_hand_2d_list[index]
gt_3d_joints = self.xyz_3d_list[index]
img = cv2.imread(img_path) # BGR
x1_,y1_,x2_,y2_ = x1y1x2y2
hand_w = int((x2_-x1_)/2)
hand_h = int((y2_-y1_)/2)
offset_x1 = random.randint(-hand_w,int(hand_w/6))
offset_y1 = random.randint(-hand_h,int(hand_h/6))
offset_x2 = random.randint(-int(hand_w/6),hand_w)
offset_y2 = random.randint(-int(hand_h/6),hand_h)
# print(" A : x1_,y1_,x2_,y2_ : ",x1_,y1_,x2_,y2_)
x1_new = x1_+offset_x1
y1_new = y1_+offset_y1
x2_new = x2_+offset_x2
y2_new = y2_+offset_y2
x1_new = np.clip(x1_,0,img.shape[1]-1)
x2_new = np.clip(x2_,0,img.shape[1]-1)
y1_new = np.clip(y1_,0,img.shape[0]-1)
y2_new = np.clip(y2_,0,img.shape[0]-1)
offset_x1 = x1_new - x1_
offset_y1 = y1_new - y1_
offset_x2 = x2_new - x2_
offset_y2 = y2_new - y2_
# print(" B : x1_,y1_,x2_,y2_ : ",x1_,y1_,x2_,y2_)
x1_ = x1_new
y1_ = y1_new
x2_ = x2_new
y2_ = y2_new
#-------------------------------------
# if self.vis:
# aa = img[y1_:y2_,x1_:x2_]
# for k in range(21):
# x,y = (pts_hand[str(k)]["x"]-offset_x1),(pts_hand[str(k)]["y"]-offset_y1)
#
#
# cv2.circle(aa, (int(x),int(y)), 3, (250,60,255),-1)
# cv2.namedWindow("fix_size_a",0)
# cv2.imshow("fix_size_a",aa)
#-------------------------------------
# print("self.img_size : ",self.img_size)
img_,ratio, dw, dh = letterbox(img[y1_:y2_,x1_:x2_], height=self.img_size[0], color=(0,0,0))
hm,hm_w = get_heatmap(img_,x1y1x2y2,pts_hand,ratio, dw, dh,offset_x1,offset_y1,vis=self.vis)
if self.vis:
cv2.namedWindow("fix_size",0)
cv2.imshow("fix_size",img_)
hm_w = np.expand_dims(hm_w,2)
if self.vis:
print("hm.shape : {}".format(hm.shape))
print("hm_w.shape : {}".format(hm_w.shape))
print("img_fix_size.shape : {}".format(img_.shape))
#-------------------------------------
#-------------------------------------
if self.flag_agu == True:
if random.random() > 0.5:
c = float(random.randint(80,120))/100.
b = random.randint(-10,10)
img_ = contrast_img(img_, c, b)
if self.flag_agu == True:
if random.random() > 0.9:
# print('agu hue ')
img_hsv=cv2.cvtColor(img_,cv2.COLOR_BGR2HSV)
hue_x = random.randint(-10,10)
# print(cc)
img_hsv[:,:,0]=(img_hsv[:,:,0]+hue_x)
img_hsv[:,:,0] =np.maximum(img_hsv[:,:,0],0)
img_hsv[:,:,0] =np.minimum(img_hsv[:,:,0],180)#范围 0 ~180
img_=cv2.cvtColor(img_hsv,cv2.COLOR_HSV2BGR)
if self.flag_agu == True:
if random.random() > 0.95:
img_ = img_agu_channel_same(img_)
if self.vis:
cv2.namedWindow("fix_size_agu",0)
cv2.imshow("fix_size_agu",img_)
cv2.waitKey(1)
img_fix_size = img_.astype(np.float32)
img_fix_size_r = img_fix_size.astype(np.float32)
img_fix_size_r = (img_fix_size_r-128.)/256.
#--------------------------------------------------
image_fusion = np.concatenate((img_fix_size_r,hm),axis=2)
if self.vis:
print(" A image_fusion.shape : {}".format(image_fusion.shape))
image_fusion = image_fusion.transpose(2, 0, 1)
if self.vis:
print(" B image_fusion.shape : {}".format(image_fusion.shape))
# image_fusion = np.expand_dims(image_fusion,0)
# if self.vis:
# print(" C image_fusion.shape : {}".format(image_fusion.shape))
# cv2.waitKey(0)
gt_3d_joints = np.array(gt_3d_joints).ravel()
if self.vis:
print(gt_3d_joints.shape)
print(image_fusion.shape)
return image_fusion,gt_3d_joints
| [
"numpy.clip",
"numpy.sqrt",
"utils.AIK.adaptive_IK",
"cv2.imshow",
"torch.from_numpy",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.linalg.norm",
"numpy.arange",
"numpy.mean",
"os.listdir",
"torch.eye",
"open3d.visualization.Visualizer",
"numpy.asarray",
"numpy.subtract",
"numpy.exp"... | [((3618, 3654), 'numpy.zeros', 'np.zeros', (['img_.shape'], {'dtype': 'np.uint8'}), '(img_.shape, dtype=np.uint8)\n', (3626, 3654), True, 'import numpy as np\n'), ((3668, 3706), 'cv2.cvtColor', 'cv2.cvtColor', (['img_', 'cv2.COLOR_RGB2GRAY'], {}), '(img_, cv2.COLOR_RGB2GRAY)\n', (3680, 3706), False, 'import cv2\n'), ((3829, 3839), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3836, 3839), True, 'import numpy as np\n'), ((3850, 3859), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (3856, 3859), True, 'import numpy as np\n'), ((4146, 4189), 'numpy.zeros', 'np.zeros', (['[rows, cols, channels]', 'img.dtype'], {}), '([rows, cols, channels], img.dtype)\n', (4154, 4189), True, 'import numpy as np\n'), ((4200, 4240), 'cv2.addWeighted', 'cv2.addWeighted', (['img', 'c', 'blank', '(1 - c)', 'b'], {}), '(img, c, blank, 1 - c, b)\n', (4215, 4240), False, 'import cv2\n'), ((5379, 5466), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'color'}), '(img, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=color)\n', (5397, 5466), False, 'import cv2\n'), ((6333, 6379), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2 * sigma * sigma))\n', (6339, 6379), True, 'import numpy as np\n'), ((6858, 6891), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)', 'np.float32'], {}), '(0, size, 1, np.float32)\n', (6867, 6891), True, 'import numpy as np\n'), ((6949, 7008), 'numpy.exp', 'np.exp', (['(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))'], {}), '(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n', (6955, 7008), True, 'import numpy as np\n'), ((7240, 7334), 'numpy.maximum', 'np.maximum', (['heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]]', 'g[g_y[0]:g_y[1], g_x[0]:g_x[1]]'], {}), '(heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]], g[g_y[0]:g_y[1],\n g_x[0]:g_x[1]])\n', (7250, 7334), True, 'import numpy as np\n'), ((7507, 7596), 'numpy.zeros', 'np.zeros', (['(img_fix_size.shape[0], img_fix_size.shape[1], num_objs)'], {'dtype': 'np.float32'}), '((img_fix_size.shape[0], img_fix_size.shape[1], num_objs), dtype=np\n .float32)\n', (7515, 7596), True, 'import numpy as np\n'), ((3933, 3953), 'numpy.subtract', 'np.subtract', (['x', 'mean'], {}), '(x, mean)\n', (3944, 3953), True, 'import numpy as np\n'), ((4832, 4999), 'numpy.random.choice', 'np.random.choice', (['[None, cv2.INTER_NEAREST, cv2.INTER_LINEAR, None, cv2.INTER_NEAREST, cv2.\n INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]'], {}), '([None, cv2.INTER_NEAREST, cv2.INTER_LINEAR, None, cv2.\n INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.\n INTER_LANCZOS4])\n', (4848, 4999), True, 'import numpy as np\n'), ((5265, 5324), 'cv2.resize', 'cv2.resize', (['img', 'new_shape'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img, new_shape, interpolation=cv2.INTER_NEAREST)\n', (5275, 5324), False, 'import cv2\n'), ((6125, 6192), 'numpy.maximum', 'np.maximum', (['masked_heatmap', '(masked_gaussian * k)'], {'out': 'masked_heatmap'}), '(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n', (6135, 6192), True, 'import numpy as np\n'), ((8289, 8315), 'cv2.namedWindow', 'cv2.namedWindow', (['"""hm_w"""', '(0)'], {}), "('hm_w', 0)\n", (8304, 8315), False, 'import cv2\n'), ((8323, 8347), 'cv2.imshow', 'cv2.imshow', (['"""hm_w"""', 'hm_w'], {}), "('hm_w', hm_w)\n", (8333, 8347), False, 'import cv2\n'), ((11286, 11302), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (11296, 11302), False, 'import os\n'), ((21257, 21277), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (21267, 21277), False, 'import cv2\n'), ((21825, 21858), 'numpy.clip', 'np.clip', (['x1_', '(0)', '(img.shape[1] - 1)'], {}), '(x1_, 0, img.shape[1] - 1)\n', (21832, 21858), True, 'import numpy as np\n'), ((21872, 21905), 'numpy.clip', 'np.clip', (['x2_', '(0)', '(img.shape[1] - 1)'], {}), '(x2_, 0, img.shape[1] - 1)\n', (21879, 21905), True, 'import numpy as np\n'), ((21919, 21952), 'numpy.clip', 'np.clip', (['y1_', '(0)', '(img.shape[0] - 1)'], {}), '(y1_, 0, img.shape[0] - 1)\n', (21926, 21952), True, 'import numpy as np\n'), ((21966, 21999), 'numpy.clip', 'np.clip', (['y2_', '(0)', '(img.shape[0] - 1)'], {}), '(y2_, 0, img.shape[0] - 1)\n', (21973, 21999), True, 'import numpy as np\n'), ((23105, 23128), 'numpy.expand_dims', 'np.expand_dims', (['hm_w', '(2)'], {}), '(hm_w, 2)\n', (23119, 23128), True, 'import numpy as np\n'), ((24631, 24675), 'numpy.concatenate', 'np.concatenate', (['(img_fix_size_r, hm)'], {'axis': '(2)'}), '((img_fix_size_r, hm), axis=2)\n', (24645, 24675), True, 'import numpy as np\n'), ((3896, 3911), 'numpy.sqrt', 'np.sqrt', (['x.size'], {}), '(x.size)\n', (3903, 3911), True, 'import numpy as np\n'), ((5126, 5152), 'cv2.resize', 'cv2.resize', (['img', 'new_shape'], {}), '(img, new_shape)\n', (5136, 5152), False, 'import cv2\n'), ((5185, 5240), 'cv2.resize', 'cv2.resize', (['img', 'new_shape'], {'interpolation': 'interpolation'}), '(img, new_shape, interpolation=interpolation)\n', (5195, 5240), False, 'import cv2\n'), ((7938, 7962), 'cv2.namedWindow', 'cv2.namedWindow', (['"""hm"""', '(0)'], {}), "('hm', 0)\n", (7953, 7962), False, 'import cv2\n'), ((7974, 8003), 'cv2.imshow', 'cv2.imshow', (['"""hm"""', 'hm[:, :, k]'], {}), "('hm', hm[:, :, k])\n", (7984, 8003), False, 'import cv2\n'), ((8087, 8117), 'cv2.namedWindow', 'cv2.namedWindow', (['"""fix_size"""', '(0)'], {}), "('fix_size', 0)\n", (8102, 8117), False, 'import cv2\n'), ((8129, 8165), 'cv2.imshow', 'cv2.imshow', (['"""fix_size"""', 'img_fix_size'], {}), "('fix_size', img_fix_size)\n", (8139, 8165), False, 'import cv2\n'), ((8177, 8191), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8188, 8191), False, 'import cv2\n'), ((8788, 8809), 'utils.func.initiate', 'func.initiate', (['"""zero"""'], {}), "('zero')\n", (8801, 8809), False, 'from utils import func, bone, AIK, smoother\n'), ((8844, 8861), 'numpy.zeros', 'np.zeros', (['(1, 15)'], {}), '((1, 15))\n', (8852, 8861), True, 'import numpy as np\n'), ((9104, 9258), 'manopth.manolayer.ManoLayer', 'manolayer.ManoLayer', ([], {'flat_hand_mean': '(True)', 'side': 'g_side', 'mano_root': '"""./mano/models"""', 'use_pca': '(False)', 'root_rot_mode': '"""rotmat"""', 'joint_rot_mode': '"""rotmat"""'}), "(flat_hand_mean=True, side=g_side, mano_root=\n './mano/models', use_pca=False, root_rot_mode='rotmat', joint_rot_mode=\n 'rotmat')\n", (9123, 9258), False, 'from manopth import manolayer\n'), ((9500, 9532), 'utils.smoother.OneEuroFilter', 'smoother.OneEuroFilter', (['(4.0)', '(0.0)'], {}), '(4.0, 0.0)\n', (9522, 9532), False, 'from utils import func, bone, AIK, smoother\n'), ((9559, 9591), 'utils.smoother.OneEuroFilter', 'smoother.OneEuroFilter', (['(4.0)', '(0.0)'], {}), '(4.0, 0.0)\n', (9581, 9591), False, 'from utils import func, bone, AIK, smoother\n'), ((9619, 9651), 'utils.smoother.OneEuroFilter', 'smoother.OneEuroFilter', (['(1.5)', '(0.0)'], {}), '(1.5, 0.0)\n', (9641, 9651), False, 'from utils import func, bone, AIK, smoother\n'), ((9721, 9780), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, -1.0, 0], [0.0, 0, -1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, -1.0, 0], [0.0, 0, -1.0]])\n', (9729, 9780), True, 'import numpy as np\n'), ((9866, 9896), 'open3d.geometry.TriangleMesh', 'open3d.geometry.TriangleMesh', ([], {}), '()\n', (9894, 9896), False, 'import open3d\n'), ((9989, 10033), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['mano.th_faces'], {}), '(mano.th_faces)\n', (10018, 10033), False, 'import open3d\n'), ((10132, 10173), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['hand_verts'], {}), '(hand_verts)\n', (10161, 10173), False, 'import open3d\n'), ((10195, 10228), 'open3d.visualization.Visualizer', 'open3d.visualization.Visualizer', ([], {}), '()\n', (10226, 10228), False, 'import open3d\n'), ((10492, 10537), 'numpy.asarray', 'np.asarray', (['[120 / 255, 120 / 255, 120 / 255]'], {}), '([120 / 255, 120 / 255, 120 / 255])\n', (10502, 10537), True, 'import numpy as np\n'), ((20331, 20354), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20352, 20354), False, 'import cv2\n'), ((23019, 23049), 'cv2.namedWindow', 'cv2.namedWindow', (['"""fix_size"""', '(0)'], {}), "('fix_size', 0)\n", (23034, 23049), False, 'import cv2\n'), ((23061, 23089), 'cv2.imshow', 'cv2.imshow', (['"""fix_size"""', 'img_'], {}), "('fix_size', img_)\n", (23071, 23089), False, 'import cv2\n'), ((24284, 24318), 'cv2.namedWindow', 'cv2.namedWindow', (['"""fix_size_agu"""', '(0)'], {}), "('fix_size_agu', 0)\n", (24299, 24318), False, 'import cv2\n'), ((24330, 24362), 'cv2.imshow', 'cv2.imshow', (['"""fix_size_agu"""', 'img_'], {}), "('fix_size_agu', img_)\n", (24340, 24362), False, 'import cv2\n'), ((24374, 24388), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (24385, 24388), False, 'import cv2\n'), ((10750, 10778), 'open3d.geometry.PointCloud', 'open3d.geometry.PointCloud', ([], {}), '()\n', (10776, 10778), False, 'import open3d\n'), ((11944, 11956), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11953, 11956), False, 'import json\n'), ((23465, 23480), 'random.random', 'random.random', ([], {}), '()\n', (23478, 23480), False, 'import random\n'), ((23563, 23586), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (23577, 23586), False, 'import random\n'), ((23683, 23698), 'random.random', 'random.random', ([], {}), '()\n', (23696, 23698), False, 'import random\n'), ((23766, 23803), 'cv2.cvtColor', 'cv2.cvtColor', (['img_', 'cv2.COLOR_BGR2HSV'], {}), '(img_, cv2.COLOR_BGR2HSV)\n', (23778, 23803), False, 'import cv2\n'), ((23827, 23850), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (23841, 23850), False, 'import random\n'), ((23964, 23995), 'numpy.maximum', 'np.maximum', (['img_hsv[:, :, 0]', '(0)'], {}), '(img_hsv[:, :, 0], 0)\n', (23974, 23995), True, 'import numpy as np\n'), ((24025, 24058), 'numpy.minimum', 'np.minimum', (['img_hsv[:, :, 0]', '(180)'], {}), '(img_hsv[:, :, 0], 180)\n', (24035, 24058), True, 'import numpy as np\n'), ((24087, 24127), 'cv2.cvtColor', 'cv2.cvtColor', (['img_hsv', 'cv2.COLOR_HSV2BGR'], {}), '(img_hsv, cv2.COLOR_HSV2BGR)\n', (24099, 24127), False, 'import cv2\n'), ((24176, 24191), 'random.random', 'random.random', ([], {}), '()\n', (24189, 24191), False, 'import random\n'), ((25133, 25155), 'numpy.array', 'np.array', (['gt_3d_joints'], {}), '(gt_3d_joints)\n', (25141, 25155), True, 'import numpy as np\n'), ((6390, 6407), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (6398, 6407), True, 'import numpy as np\n'), ((9051, 9063), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (9060, 9063), False, 'import torch\n'), ((11579, 11608), 'os.access', 'os.access', (['file_json', 'os.F_OK'], {}), '(file_json, os.F_OK)\n', (11588, 11608), False, 'import os\n'), ((12103, 12123), 'cv2.imread', 'cv2.imread', (['file_img'], {}), '(file_img)\n', (12113, 12123), False, 'import cv2\n'), ((12187, 12203), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (12195, 12203), False, 'import cv2\n'), ((13021, 13051), 'cv2.namedWindow', 'cv2.namedWindow', (['"""fix_size"""', '(0)'], {}), "('fix_size', 0)\n", (13036, 13051), False, 'import cv2\n'), ((13071, 13107), 'cv2.imshow', 'cv2.imshow', (['"""fix_size"""', 'img_fix_size'], {}), "('fix_size', img_fix_size)\n", (13081, 13107), False, 'import cv2\n'), ((13135, 13158), 'numpy.expand_dims', 'np.expand_dims', (['hm_w', '(2)'], {}), '(hm_w, 2)\n', (13149, 13158), True, 'import numpy as np\n'), ((13603, 13647), 'numpy.concatenate', 'np.concatenate', (['(img_fix_size_r, hm)'], {'axis': '(2)'}), '((img_fix_size_r, hm), axis=2)\n', (13617, 13647), True, 'import numpy as np\n'), ((13916, 13947), 'numpy.expand_dims', 'np.expand_dims', (['image_fusion', '(0)'], {}), '(image_fusion, 0)\n', (13930, 13947), True, 'import numpy as np\n'), ((14714, 14736), 'numpy.array', 'np.array', (['gt_3d_joints'], {}), '(gt_3d_joints)\n', (14722, 14736), True, 'import numpy as np\n'), ((15325, 15341), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (15333, 15341), True, 'import numpy as np\n'), ((15466, 15517), 'numpy.array', 'np.array', (['[[trans[0, 1], trans[0, 0], trans[0, 2]]]'], {}), '([[trans[0, 1], trans[0, 0], trans[0, 2]]])\n', (15474, 15517), True, 'import numpy as np\n'), ((15854, 15904), 'utils.bone.caculate_length', 'bone.caculate_length', (['gt_3d_joints'], {'label': '"""useful"""'}), "(gt_3d_joints, label='useful')\n", (15874, 15904), False, 'from utils import func, bone, AIK, smoother\n'), ((16465, 16507), 'torch.tensor', 'torch.tensor', (['opt_shape'], {'dtype': 'torch.float'}), '(opt_shape, dtype=torch.float)\n', (16477, 16507), False, 'import torch\n'), ((16984, 17026), 'utils.AIK.adaptive_IK', 'AIK.adaptive_IK', (['template', 'j3d_pre_process'], {}), '(template, j3d_pre_process)\n', (16999, 17026), False, 'from utils import func, bone, AIK, smoother\n'), ((17359, 17403), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['mano.th_faces'], {}), '(mano.th_faces)\n', (17388, 17403), False, 'import open3d\n'), ((17906, 17967), 'numpy.array', 'np.array', (['[[-new_tran[0, 0], new_tran[0, 1], new_tran[0, 2]]]'], {}), '([[-new_tran[0, 0], new_tran[0, 1], new_tran[0, 2]]])\n', (17914, 17967), True, 'import numpy as np\n'), ((18067, 18108), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['hand_verts'], {}), '(hand_verts)\n', (18096, 18108), False, 'import open3d\n'), ((20119, 20144), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', '(0)'], {}), "('img', 0)\n", (20134, 20144), False, 'import cv2\n'), ((20164, 20186), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (20174, 20186), False, 'import cv2\n'), ((20206, 20220), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (20217, 20220), False, 'import cv2\n'), ((16006, 16023), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (16014, 16023), True, 'import numpy as np\n'), ((16055, 16072), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (16063, 16072), True, 'import numpy as np\n'), ((16698, 16739), 'numpy.linalg.norm', 'np.linalg.norm', (['(template[9] - template[0])'], {}), '(template[9] - template[0])\n', (16712, 16739), True, 'import numpy as np\n'), ((16742, 16791), 'numpy.linalg.norm', 'np.linalg.norm', (['(gt_3d_joints[9] - gt_3d_joints[0])'], {}), '(gt_3d_joints[9] - gt_3d_joints[0])\n', (16756, 16791), True, 'import numpy as np\n'), ((17580, 17613), 'numpy.matmul', 'np.matmul', (['view_mat', 'hand_verts.T'], {}), '(view_mat, hand_verts.T)\n', (17589, 17613), True, 'import numpy as np\n'), ((23514, 23537), 'random.randint', 'random.randint', (['(80)', '(120)'], {}), '(80, 120)\n', (23528, 23537), False, 'import random\n'), ((14970, 14996), 'torch.tensor', 'torch.tensor', (['gt_3d_joints'], {}), '(gt_3d_joints)\n', (14982, 14996), False, 'import torch\n'), ((17056, 17080), 'torch.from_numpy', 'torch.from_numpy', (['pose_R'], {}), '(pose_R)\n', (17072, 17080), False, 'import torch\n'), ((18901, 18939), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['j3d_[0]'], {}), '(j3d_[0])\n', (18930, 18939), False, 'import open3d\n'), ((19401, 19444), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['gt_3d_joints'], {}), '(gt_3d_joints)\n', (19430, 19444), False, 'import open3d\n')] |
"""
desispec.quicklook.palib
Low level functions to be from top level PAs
"""
import numpy as np
from desispec.quicklook import qlexceptions,qllogger
qlog=qllogger.QLLogger("QuickLook",20)
log=qlog.getlog()
def project(x1,x2):
"""
return a projection matrix so that arrays are related by linear interpolation
x1: Array with one binning
x2: new binning
Return Pr: x1= Pr.dot(x2) in the overlap region
"""
x1=np.sort(x1)
x2=np.sort(x2)
Pr=np.zeros((len(x2),len(x1)))
e1 = np.zeros(len(x1)+1)
e1[1:-1]=(x1[:-1]+x1[1:])/2.0 # calculate bin edges
e1[0]=1.5*x1[0]-0.5*x1[1]
e1[-1]=1.5*x1[-1]-0.5*x1[-2]
e1lo = e1[:-1] # make upper and lower bounds arrays vs. index
e1hi = e1[1:]
e2=np.zeros(len(x2)+1)
e2[1:-1]=(x2[:-1]+x2[1:])/2.0 # bin edges for resampled grid
e2[0]=1.5*x2[0]-0.5*x2[1]
e2[-1]=1.5*x2[-1]-0.5*x2[-2]
for ii in range(len(e2)-1): # columns
#- Find indices in x1, containing the element in x2
#- This is much faster than looping over rows
k = np.where((e1lo<=e2[ii]) & (e1hi>e2[ii]))[0]
# this where obtains single e1 edge just below start of e2 bin
emin = e2[ii]
emax = e1hi[k]
if e2[ii+1] < emax : emax = e2[ii+1]
dx = (emax-emin)/(e1hi[k]-e1lo[k])
Pr[ii,k] = dx # enter first e1 contribution to e2[ii]
if e2[ii+1] > emax :
# cross over to another e1 bin contributing to this e2 bin
l = np.where((e1 < e2[ii+1]) & (e1 > e1hi[k]))[0]
if len(l) > 0 :
# several-to-one resample. Just consider 3 bins max. case
Pr[ii,k[0]+1] = 1.0 # middle bin fully contained in e2
q = k[0]+2
else : q = k[0]+1 # point to bin partially contained in current e2 bin
emin = e1lo[q]
emax = e2[ii+1]
dx = (emax-emin)/(e1hi[q]-e1lo[q])
Pr[ii,q] = dx
#- edge:
if x2[-1]==x1[-1]:
Pr[-1,-1]=1
return Pr
def resample_spec(wave,flux,outwave,ivar=None):
"""
rebinning conserving S/N
Algorithm is based on http://www.ast.cam.ac.uk/%7Erfc/vpfit10.2.pdf
Appendix: B.1
Args:
wave : original wavelength array (expected (but not limited) to be native CCD pixel wavelength grid
outwave: new wavelength array: expected (but not limited) to be uniform binning
flux : df/dx (Flux per A) sampled at x
ivar : ivar in original binning. If not None, ivar in new binning is returned.
Note:
Full resolution computation for resampling is expensive for quicklook.
desispec.interpolation.resample_flux using weights by ivar does not conserve total S/N.
Tests with arc lines show much narrow spectral profile, thus not giving realistic psf resolutions
This algorithm gives the same resolution as obtained for native CCD binning, i.e, resampling has
insignificant effect. Details,plots in the arc processing note.
"""
#- convert flux to per bin before projecting to new bins
flux=flux*np.gradient(wave)
Pr=project(wave,outwave)
n=len(wave)
newflux=Pr.dot(flux)
#- convert back to df/dx (per angstrom) sampled at outwave
newflux/=np.gradient(outwave) #- per angstrom
if ivar is None:
return newflux
else:
ivar = ivar/(np.gradient(wave))**2.0
newvar=Pr.dot(ivar**(-1.0)) #- maintaining Total S/N
# RK: this is just a kludge until we more robustly ensure newvar is correct
k = np.where(newvar <= 0.0)[0]
newvar[k] = 0.0000001 # flag bins with no contribution from input grid
newivar=1/newvar
# newivar[k] = 0.0
#- convert to per angstrom
newivar*=(np.gradient(outwave))**2.0
return newflux, newivar
def get_resolution(wave,nspec,psf,usesigma=False):
"""
Calculates approximate resolution values at given wavelengths in the format that can directly
feed resolution data of desispec.frame.Frame object.
wave: wavelength array
nsepc: no of spectra (int)
psf: desispec.quicklook.qlpsf.PSF like object
usesigma: allows to use sigma from psf file for resolution computation.
If psf file is psfboot, uses per fiber xsigma.
If psf file is from QL arcs processing, uses wsigma
returns : resolution data (nspec,nband,nwave); nband = 1 for usesigma = False, otherwise nband=21
"""
#from desispec.resolution import Resolution
from desispec.quicklook.qlresolution import QuickResolution
nwave=len(wave)
if usesigma:
nband=21
else:
nband=1 # only for dimensionality purpose of data model.
resolution_data=np.zeros((nspec,nband,nwave))
if usesigma: #- use sigmas for resolution based on psffile type
for ispec in range(nspec):
thissigma=psf.ysigma(ispec,wave) #- in pixel units
Rsig=QuickResolution(sigma=thissigma,ndiag=nband)
resolution_data[ispec]=Rsig.data
return resolution_data
| [
"numpy.where",
"numpy.sort",
"numpy.zeros",
"desispec.quicklook.qlresolution.QuickResolution",
"desispec.quicklook.qllogger.QLLogger",
"numpy.gradient"
] | [((155, 189), 'desispec.quicklook.qllogger.QLLogger', 'qllogger.QLLogger', (['"""QuickLook"""', '(20)'], {}), "('QuickLook', 20)\n", (172, 189), False, 'from desispec.quicklook import qlexceptions, qllogger\n'), ((442, 453), 'numpy.sort', 'np.sort', (['x1'], {}), '(x1)\n', (449, 453), True, 'import numpy as np\n'), ((461, 472), 'numpy.sort', 'np.sort', (['x2'], {}), '(x2)\n', (468, 472), True, 'import numpy as np\n'), ((3246, 3266), 'numpy.gradient', 'np.gradient', (['outwave'], {}), '(outwave)\n', (3257, 3266), True, 'import numpy as np\n'), ((4696, 4727), 'numpy.zeros', 'np.zeros', (['(nspec, nband, nwave)'], {}), '((nspec, nband, nwave))\n', (4704, 4727), True, 'import numpy as np\n'), ((3080, 3097), 'numpy.gradient', 'np.gradient', (['wave'], {}), '(wave)\n', (3091, 3097), True, 'import numpy as np\n'), ((1081, 1125), 'numpy.where', 'np.where', (['((e1lo <= e2[ii]) & (e1hi > e2[ii]))'], {}), '((e1lo <= e2[ii]) & (e1hi > e2[ii]))\n', (1089, 1125), True, 'import numpy as np\n'), ((3540, 3563), 'numpy.where', 'np.where', (['(newvar <= 0.0)'], {}), '(newvar <= 0.0)\n', (3548, 3563), True, 'import numpy as np\n'), ((3753, 3773), 'numpy.gradient', 'np.gradient', (['outwave'], {}), '(outwave)\n', (3764, 3773), True, 'import numpy as np\n'), ((4914, 4959), 'desispec.quicklook.qlresolution.QuickResolution', 'QuickResolution', ([], {'sigma': 'thissigma', 'ndiag': 'nband'}), '(sigma=thissigma, ndiag=nband)\n', (4929, 4959), False, 'from desispec.quicklook.qlresolution import QuickResolution\n'), ((1511, 1555), 'numpy.where', 'np.where', (['((e1 < e2[ii + 1]) & (e1 > e1hi[k]))'], {}), '((e1 < e2[ii + 1]) & (e1 > e1hi[k]))\n', (1519, 1555), True, 'import numpy as np\n'), ((3358, 3375), 'numpy.gradient', 'np.gradient', (['wave'], {}), '(wave)\n', (3369, 3375), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 16:45:36 2019
Copyright © 2019 DataRock S.A.S. All rights reserved.
@author: DavidFelipe
Select the objects that would be analized for each layer
"""
try:
import numpy as np
from operator import itemgetter
import time
import progressbar
import cv2
except:
print(" PLEASE REVIEW THE MODULES THAT NEEDS THE SOFTWARE - AN ERROR WAS OCCURRED")
print(" %% THIRD MODULE %%")
print(" -- Select the objects from the minimum -- Check the current progress --")
class MarkProcess:
"""
Procedure to process the coordinates and the geometric properties of each object
segmented and found
"""
def __init__(self, imageRGB, diameter_range, flagMinimize):
print("MarkProcess Process")
self.image = imageRGB
self.diameter = diameter_range
self.flag = flagMinimize
self.diameterMark = 16
self.markGate = 58
def objectMatch(self, vectorL1, vectorL2, vectorL3):
"""
Each vector is a list of three vectors containing the information
of each sublayer of each layer
"""
print(" ")
print("MarkProcess object match process ")
widgets = [progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA(),
' ', progressbar.AdaptiveETA()]
bar = progressbar.ProgressBar(widgets=widgets, maxval=3)
bar.start()
inicio = time.time()
L1_organized = self.layer_organize(vectorL1)
L2_organized = self.layer_organize(vectorL2)
L3_organized = self.layer_organize(vectorL3)
bar.update(1)
L1_minimized =[]
L2_minimized =[]
L3_minimized =[]
bar.update(2)
if(self.flag == 1):
### Minimize the vector sparse
L10_minimize = self.minimize(L1_organized[0], self.diameter)
L11_minimize = self.minimize(L1_organized[1], self.diameter)
L12_minimize = self.minimize(L1_organized[2], self.diameter)
L20_minimize = self.minimize(L2_organized[0], self.diameter)
L21_minimize = self.minimize(L2_organized[1], self.diameter)
L22_minimize = self.minimize(L2_organized[2], self.diameter)
L3_minimized = self.minimize(L3_organized, self.diameter)
L1_minimized = [L10_minimize, L11_minimize, L12_minimize]
L2_minimized = [L20_minimize, L21_minimize, L22_minimize]
final = time.time() - inicio
print(final)
bar.update(3)
print(" ")
print("MarkProcess Ended with minimize function")
return L1_minimized, L2_minimized, L3_minimized
else:
final = time.time() - inicio
print(final)
bar.update(3)
print(" ")
print("MarkProcess Ended - organized")
return L1_organized, L2_organized, L3_organized
def layer_organize(self, vector_layer):
if(type(vector_layer) == list):
vector1_organized = self.organize_vector(vector_layer[0])
vector2_organized = self.organize_vector(vector_layer[1])
vector3_organized = self.organize_vector(vector_layer[2])
vector_organized = [vector1_organized, vector2_organized, vector3_organized]
else:
vector_organized = self.organize_vector(vector_layer)
return vector_organized
def organize_vector(self, vector):
vector_metrics = np.array([0,0,0,0])
x,y = vector.shape
for i in range(1,x):
line = vector[i]
w = line[2]
h = line[3]
x1 = int(w / 2)
y1 = int(h / 2)
cx = int(line[0] + x1)
cy = int(line[1] + y1)
distance = (cx**2) + (cy**2)
distance = int(np.sqrt(distance))
area = int(w * h)
line_block = np.array([distance, cx, cy, area])
vector_metrics = np.vstack((vector_metrics, line_block))
vector_sort = vector_metrics[vector_metrics[:,0].argsort()]
return vector_sort
def minimize(self, vector, diameter):
"""
Function to compare and delete some points
"""
x, y = vector.shape
widgets = [progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA(),
' ', progressbar.AdaptiveETA()]
bar2 = progressbar.ProgressBar(widgets=widgets, maxval=x)
bar2.start()
minimize_vector = np.array([0,0,0,0] )
c = 1
j = 2
while(c <= x-2):
line = vector[c]
val = line[0]
array = np.copy(line)
counter = 0
while(j <= x-1):
lineCompare = vector[j]
# print(lineCompare[0])
difference = lineCompare[0] - val
if(difference <= diameter):
array = np.vstack((array, lineCompare))
counter += 1
elif(difference > diameter):
try:
distance_mean = np.mean(array[:,0])
cx_mean = np.average(array[:,1])
cy_mean = np.average(array[:,2])
area_mean = np.average(array[:,3])
result = np.array([distance_mean, cx_mean, cy_mean, area_mean])
c = c + counter
except:
result = array
break
j += 1
minimize_vector = np.vstack((minimize_vector, result))
c += 1
j = c + 1
# print(c)
bar2.update(c)
bar2.update(x)
return minimize_vector
def imageMatch(self, vector):
"""
Process to mark the coordinates in the current image process
"""
print(" ")
x,y = vector.shape
widgets = [progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA(),
' ', progressbar.AdaptiveETA()]
bar = progressbar.ProgressBar(widgets=widgets, maxval=x)
bar.start()
mark_image = np.copy(self.image)
for i in range(0, x):
bar.update(i)
line = vector[i]
cx = int(line[1])
cy = int(line[2])
cv2.circle(mark_image, (cx, cy), 10, (255, 255, 255), 2)
cv2.circle(mark_image, (cx, cy), 4, (0, 0, 255), -1)
bar.update(x)
return mark_image
def meanGeometry(self, vector):
if(type(vector) == list):
vector1 = vector[0]
vector2 = vector[1]
vector3 = vector[2]
vector1_mean = np.mean(vector1[:,3])
vector2_mean = np.mean(vector2[:,3])
vector3_mean = np.mean(vector3[:,3])
vector_mean = np.array([vector1_mean, vector2_mean, vector3_mean])
else:
vector_mean = np.mean(vector[:,3])
return vector_mean
def imageGrouped(self, vector):
"""
Process to mark the coordinates in the current image process
"""
mask_image = np.zeros_like(self.image)
mark_image = np.copy(self.image)
x,y = vector.shape
for i in range(0, x):
line = vector[i]
cx = int(line[1])
cy = int(line[2])
cv2.circle(mask_image, (cx, cy), self.diameterMark, (255, 255, 255), -1) #### Aqui hay un valor que puede cambiar dependiendo de la escala
cv2.circle(mark_image, (cx, cy), 10, (255, 255, 255), 2)
cv2.circle(mark_image, (cx, cy), 4, (0, 0, 255), -1)
return mark_image, mask_image
def Vagroup(self, vector1, vector2, vector3, vector4):
grouped = np.vstack([vector1, vector2, vector3, vector4])
grouped_organized = self.layer_organize(grouped)
grouped_minimized = self.minimize(grouped_organized, 8)
## With the minimized function
image_minimized = self.imageGrouped(grouped_minimized)
## Raw vector integrated
image_grouped, mask_image = self.imageGrouped(grouped)
grouped_vector = [grouped, grouped_organized, grouped_minimized]
return image_grouped, mask_image, grouped_vector
def countIntegration(self, image, mask_image):
x, y, _ = image.shape
image_mark = np.copy(self.image)
gray = cv2.cvtColor(mask_image, cv2.COLOR_BGR2GRAY)
contours,hierachy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
counter = 0
vector_object = np.array([0,0,0,0])
for (i, contour) in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
contour_valid = (w >= 5) and (
h >= 5) and (w <= 500) and (h <= 500)
if not contour_valid:
continue
## Definimos bounding box para el tracker
boundingBox = np.array([x,y,w,h])
## Definimos tipo de Tracker
# getting center of the bounding box
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
if(w > self.markGate or h > self.markGate):
cv2.circle(image_mark, (cx, cy), 14, (255, 255, 255), 2)
cv2.circle(image_mark, (cx, cy), 7, (255, 0, 0), -1)
cv2.circle(mask_image, (cx, cy), 6, (0, 0, 255), -1)
counter += 2
elif(w > self.markGate and h > self.markGate):
counter += 3
cv2.circle(image_mark, (cx, cy), 14, (255, 255, 255), 2)
cv2.circle(image_mark, (cx, cy), 7, (0, 255, 0), -1)
cv2.circle(mask_image, (cx, cy), 6, (0, 255, 0), -1)
else :
cv2.circle(image_mark, (cx, cy), 10, (255, 255, 255), 2)
cv2.circle(image_mark, (cx, cy), 4, (0, 0, 255), -1)
cv2.circle(mask_image, (cx, cy), 4, (255, 0, 0), -1)
counter += 1
## Vector of the current object
vector_object = np.vstack((vector_object, boundingBox))
return image_mark, mask_image, vector_object, counter
| [
"progressbar.Bar",
"numpy.copy",
"numpy.mean",
"numpy.sqrt",
"numpy.average",
"cv2.boundingRect",
"numpy.zeros_like",
"numpy.array",
"cv2.circle",
"progressbar.Percentage",
"numpy.vstack",
"cv2.cvtColor",
"progressbar.ETA",
"cv2.findContours",
"progressbar.AdaptiveETA",
"time.time",
... | [((1436, 1486), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'widgets': 'widgets', 'maxval': '(3)'}), '(widgets=widgets, maxval=3)\n', (1459, 1486), False, 'import progressbar\n'), ((1524, 1535), 'time.time', 'time.time', ([], {}), '()\n', (1533, 1535), False, 'import time\n'), ((3631, 3653), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (3639, 3653), True, 'import numpy as np\n'), ((4602, 4652), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'widgets': 'widgets', 'maxval': 'x'}), '(widgets=widgets, maxval=x)\n', (4625, 4652), False, 'import progressbar\n'), ((4700, 4722), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (4708, 4722), True, 'import numpy as np\n'), ((6318, 6368), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'widgets': 'widgets', 'maxval': 'x'}), '(widgets=widgets, maxval=x)\n', (6341, 6368), False, 'import progressbar\n'), ((6410, 6429), 'numpy.copy', 'np.copy', (['self.image'], {}), '(self.image)\n', (6417, 6429), True, 'import numpy as np\n'), ((7397, 7422), 'numpy.zeros_like', 'np.zeros_like', (['self.image'], {}), '(self.image)\n', (7410, 7422), True, 'import numpy as np\n'), ((7444, 7463), 'numpy.copy', 'np.copy', (['self.image'], {}), '(self.image)\n', (7451, 7463), True, 'import numpy as np\n'), ((8020, 8067), 'numpy.vstack', 'np.vstack', (['[vector1, vector2, vector3, vector4]'], {}), '([vector1, vector2, vector3, vector4])\n', (8029, 8067), True, 'import numpy as np\n'), ((8647, 8666), 'numpy.copy', 'np.copy', (['self.image'], {}), '(self.image)\n', (8654, 8666), True, 'import numpy as np\n'), ((8682, 8726), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_image', 'cv2.COLOR_BGR2GRAY'], {}), '(mask_image, cv2.COLOR_BGR2GRAY)\n', (8694, 8726), False, 'import cv2\n'), ((8755, 8821), 'cv2.findContours', 'cv2.findContours', (['gray', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (8771, 8821), False, 'import cv2\n'), ((8866, 8888), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (8874, 8888), True, 'import numpy as np\n'), ((1256, 1280), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (1278, 1280), False, 'import progressbar\n'), ((1307, 1324), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (1322, 1324), False, 'import progressbar\n'), ((1351, 1368), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (1366, 1368), False, 'import progressbar\n'), ((1395, 1420), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (1418, 1420), False, 'import progressbar\n'), ((4052, 4086), 'numpy.array', 'np.array', (['[distance, cx, cy, area]'], {}), '([distance, cx, cy, area])\n', (4060, 4086), True, 'import numpy as np\n'), ((4116, 4155), 'numpy.vstack', 'np.vstack', (['(vector_metrics, line_block)'], {}), '((vector_metrics, line_block))\n', (4125, 4155), True, 'import numpy as np\n'), ((4421, 4445), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (4443, 4445), False, 'import progressbar\n'), ((4472, 4489), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (4487, 4489), False, 'import progressbar\n'), ((4516, 4533), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (4531, 4533), False, 'import progressbar\n'), ((4560, 4585), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (4583, 4585), False, 'import progressbar\n'), ((4849, 4862), 'numpy.copy', 'np.copy', (['line'], {}), '(line)\n', (4856, 4862), True, 'import numpy as np\n'), ((5759, 5795), 'numpy.vstack', 'np.vstack', (['(minimize_vector, result)'], {}), '((minimize_vector, result))\n', (5768, 5795), True, 'import numpy as np\n'), ((6138, 6162), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (6160, 6162), False, 'import progressbar\n'), ((6189, 6206), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (6204, 6206), False, 'import progressbar\n'), ((6233, 6250), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (6248, 6250), False, 'import progressbar\n'), ((6277, 6302), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (6300, 6302), False, 'import progressbar\n'), ((6587, 6643), 'cv2.circle', 'cv2.circle', (['mark_image', '(cx, cy)', '(10)', '(255, 255, 255)', '(2)'], {}), '(mark_image, (cx, cy), 10, (255, 255, 255), 2)\n', (6597, 6643), False, 'import cv2\n'), ((6656, 6708), 'cv2.circle', 'cv2.circle', (['mark_image', '(cx, cy)', '(4)', '(0, 0, 255)', '(-1)'], {}), '(mark_image, (cx, cy), 4, (0, 0, 255), -1)\n', (6666, 6708), False, 'import cv2\n'), ((6955, 6977), 'numpy.mean', 'np.mean', (['vector1[:, 3]'], {}), '(vector1[:, 3])\n', (6962, 6977), True, 'import numpy as np\n'), ((7004, 7026), 'numpy.mean', 'np.mean', (['vector2[:, 3]'], {}), '(vector2[:, 3])\n', (7011, 7026), True, 'import numpy as np\n'), ((7053, 7075), 'numpy.mean', 'np.mean', (['vector3[:, 3]'], {}), '(vector3[:, 3])\n', (7060, 7075), True, 'import numpy as np\n'), ((7101, 7153), 'numpy.array', 'np.array', (['[vector1_mean, vector2_mean, vector3_mean]'], {}), '([vector1_mean, vector2_mean, vector3_mean])\n', (7109, 7153), True, 'import numpy as np\n'), ((7194, 7215), 'numpy.mean', 'np.mean', (['vector[:, 3]'], {}), '(vector[:, 3])\n', (7201, 7215), True, 'import numpy as np\n'), ((7622, 7694), 'cv2.circle', 'cv2.circle', (['mask_image', '(cx, cy)', 'self.diameterMark', '(255, 255, 255)', '(-1)'], {}), '(mask_image, (cx, cy), self.diameterMark, (255, 255, 255), -1)\n', (7632, 7694), False, 'import cv2\n'), ((7773, 7829), 'cv2.circle', 'cv2.circle', (['mark_image', '(cx, cy)', '(10)', '(255, 255, 255)', '(2)'], {}), '(mark_image, (cx, cy), 10, (255, 255, 255), 2)\n', (7783, 7829), False, 'import cv2\n'), ((7842, 7894), 'cv2.circle', 'cv2.circle', (['mark_image', '(cx, cy)', '(4)', '(0, 0, 255)', '(-1)'], {}), '(mark_image, (cx, cy), 4, (0, 0, 255), -1)\n', (7852, 7894), False, 'import cv2\n'), ((8962, 8987), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (8978, 8987), False, 'import cv2\n'), ((9237, 9259), 'numpy.array', 'np.array', (['[x, y, w, h]'], {}), '([x, y, w, h])\n', (9245, 9259), True, 'import numpy as np\n'), ((10377, 10416), 'numpy.vstack', 'np.vstack', (['(vector_object, boundingBox)'], {}), '((vector_object, boundingBox))\n', (10386, 10416), True, 'import numpy as np\n'), ((2553, 2564), 'time.time', 'time.time', ([], {}), '()\n', (2562, 2564), False, 'import time\n'), ((2817, 2828), 'time.time', 'time.time', ([], {}), '()\n', (2826, 2828), False, 'import time\n'), ((3978, 3995), 'numpy.sqrt', 'np.sqrt', (['distance'], {}), '(distance)\n', (3985, 3995), True, 'import numpy as np\n'), ((9523, 9579), 'cv2.circle', 'cv2.circle', (['image_mark', '(cx, cy)', '(14)', '(255, 255, 255)', '(2)'], {}), '(image_mark, (cx, cy), 14, (255, 255, 255), 2)\n', (9533, 9579), False, 'import cv2\n'), ((9596, 9648), 'cv2.circle', 'cv2.circle', (['image_mark', '(cx, cy)', '(7)', '(255, 0, 0)', '(-1)'], {}), '(image_mark, (cx, cy), 7, (255, 0, 0), -1)\n', (9606, 9648), False, 'import cv2\n'), ((9665, 9717), 'cv2.circle', 'cv2.circle', (['mask_image', '(cx, cy)', '(6)', '(0, 0, 255)', '(-1)'], {}), '(mask_image, (cx, cy), 6, (0, 0, 255), -1)\n', (9675, 9717), False, 'import cv2\n'), ((5117, 5148), 'numpy.vstack', 'np.vstack', (['(array, lineCompare)'], {}), '((array, lineCompare))\n', (5126, 5148), True, 'import numpy as np\n'), ((9851, 9907), 'cv2.circle', 'cv2.circle', (['image_mark', '(cx, cy)', '(14)', '(255, 255, 255)', '(2)'], {}), '(image_mark, (cx, cy), 14, (255, 255, 255), 2)\n', (9861, 9907), False, 'import cv2\n'), ((9924, 9976), 'cv2.circle', 'cv2.circle', (['image_mark', '(cx, cy)', '(7)', '(0, 255, 0)', '(-1)'], {}), '(image_mark, (cx, cy), 7, (0, 255, 0), -1)\n', (9934, 9976), False, 'import cv2\n'), ((9993, 10045), 'cv2.circle', 'cv2.circle', (['mask_image', '(cx, cy)', '(6)', '(0, 255, 0)', '(-1)'], {}), '(mask_image, (cx, cy), 6, (0, 255, 0), -1)\n', (10003, 10045), False, 'import cv2\n'), ((10081, 10137), 'cv2.circle', 'cv2.circle', (['image_mark', '(cx, cy)', '(10)', '(255, 255, 255)', '(2)'], {}), '(image_mark, (cx, cy), 10, (255, 255, 255), 2)\n', (10091, 10137), False, 'import cv2\n'), ((10154, 10206), 'cv2.circle', 'cv2.circle', (['image_mark', '(cx, cy)', '(4)', '(0, 0, 255)', '(-1)'], {}), '(image_mark, (cx, cy), 4, (0, 0, 255), -1)\n', (10164, 10206), False, 'import cv2\n'), ((10223, 10275), 'cv2.circle', 'cv2.circle', (['mask_image', '(cx, cy)', '(4)', '(255, 0, 0)', '(-1)'], {}), '(mask_image, (cx, cy), 4, (255, 0, 0), -1)\n', (10233, 10275), False, 'import cv2\n'), ((5292, 5312), 'numpy.mean', 'np.mean', (['array[:, 0]'], {}), '(array[:, 0])\n', (5299, 5312), True, 'import numpy as np\n'), ((5346, 5369), 'numpy.average', 'np.average', (['array[:, 1]'], {}), '(array[:, 1])\n', (5356, 5369), True, 'import numpy as np\n'), ((5403, 5426), 'numpy.average', 'np.average', (['array[:, 2]'], {}), '(array[:, 2])\n', (5413, 5426), True, 'import numpy as np\n'), ((5462, 5485), 'numpy.average', 'np.average', (['array[:, 3]'], {}), '(array[:, 3])\n', (5472, 5485), True, 'import numpy as np\n'), ((5518, 5572), 'numpy.array', 'np.array', (['[distance_mean, cx_mean, cy_mean, area_mean]'], {}), '([distance_mean, cx_mean, cy_mean, area_mean])\n', (5526, 5572), True, 'import numpy as np\n')] |
import numpy as np
import pickle
class MinMaxScaler():
"""Reimplementation of scikit.learn MinMaxSaler. Avoids the need for pickling scikitlearn objects."""
def __init__(self,x_scale,x_min):
self.x_scale = np.array(x_scale).astype(float)
self.x_min = np.array(x_min).astype(float)
def transform(self,X):
X = np.array(X).astype(float)
print(X)
print(self.x_scale)
X*=self.x_scale
X+=self.x_min
return X
def inverse_transform(self,X):
X = np.array(X).astype(float)
X -= self.x_min
X /= self.x_scale
return X
| [
"numpy.array"
] | [((223, 240), 'numpy.array', 'np.array', (['x_scale'], {}), '(x_scale)\n', (231, 240), True, 'import numpy as np\n'), ((276, 291), 'numpy.array', 'np.array', (['x_min'], {}), '(x_min)\n', (284, 291), True, 'import numpy as np\n'), ((346, 357), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (354, 357), True, 'import numpy as np\n'), ((528, 539), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (536, 539), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Created on Fri Mar 30 22:03:29 2018
@author: mohammad
"""
import sys
import os
import glob
import numpy as np
import pandas as pd
import scipy.io
from sklearn.externals import joblib
import physionetchallenge2018_lib as phyc
def classify_record(record_name):
header_file = record_name + '.hea'
signal_file = record_name + '.mat'
# Read model files from the 'models' subdirectory, which are
# generated by 'train_classifier.py'
model_list = []
for f in glob.glob('models/*_model.pkl'):
model_list.append(f)
# Use the average predictions from the models generated on the
# training set
predictions_mean = 0.
for j in range(0, len(model_list)):
this_model = model_list[j]
predictions = run_classifier(header_file, signal_file, this_model)
predictions_mean += predictions
predictions_mean /= len(model_list)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Return a vector of per-sample predictions, as per challenge requirements
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return predictions_mean
# This function generates the predictions from a single model
def run_classifier(header_file, signal_file, classifier_pickle):
signal_names, Fs, n_samples = phyc.import_signal_names(header_file)
# Get this subject's data as a dataframe
this_data = phyc.get_subject_data_test(signal_file, signal_names)
SaO2 = this_data.get(['SaO2']).values
step = Fs * 60
window_size = Fs * 60
# Initialize the X_subj and Y_subj matricies
X_subj = np.zeros([((n_samples) // step), 1])
for idx, k in enumerate(range(0, (n_samples-step+1), step)):
X_subj[idx, :] = np.var(np.transpose(SaO2[k:k+window_size]), axis=1)
# Load the classifier
my_classifier = joblib.load(classifier_pickle)
# Generate the prediction for the subjects.
predictions = my_classifier.predict_proba(X_subj)
predictions = predictions[:, 1]
predictions = [x * np.ones([window_size]) for x in predictions]
predictions = np.concatenate(predictions)
predictions = np.append(predictions, np.zeros(np.size(this_data, 0)
- np.size(predictions, 0)))
return predictions
if __name__ == '__main__':
for record in sys.argv[1:]:
output_file = os.path.basename(record) + '.vec'
results = classify_record(record)
np.savetxt('vec/' + output_file, results, fmt='%.3f')
| [
"numpy.transpose",
"numpy.ones",
"numpy.size",
"sklearn.externals.joblib.load",
"physionetchallenge2018_lib.import_signal_names",
"numpy.zeros",
"os.path.basename",
"numpy.concatenate",
"numpy.savetxt",
"physionetchallenge2018_lib.get_subject_data_test",
"glob.glob"
] | [((506, 537), 'glob.glob', 'glob.glob', (['"""models/*_model.pkl"""'], {}), "('models/*_model.pkl')\n", (515, 537), False, 'import glob\n'), ((1341, 1378), 'physionetchallenge2018_lib.import_signal_names', 'phyc.import_signal_names', (['header_file'], {}), '(header_file)\n', (1365, 1378), True, 'import physionetchallenge2018_lib as phyc\n'), ((1441, 1494), 'physionetchallenge2018_lib.get_subject_data_test', 'phyc.get_subject_data_test', (['signal_file', 'signal_names'], {}), '(signal_file, signal_names)\n', (1467, 1494), True, 'import physionetchallenge2018_lib as phyc\n'), ((1653, 1685), 'numpy.zeros', 'np.zeros', (['[n_samples // step, 1]'], {}), '([n_samples // step, 1])\n', (1661, 1685), True, 'import numpy as np\n'), ((1880, 1910), 'sklearn.externals.joblib.load', 'joblib.load', (['classifier_pickle'], {}), '(classifier_pickle)\n', (1891, 1910), False, 'from sklearn.externals import joblib\n'), ((2136, 2163), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (2150, 2163), True, 'import numpy as np\n'), ((2503, 2556), 'numpy.savetxt', 'np.savetxt', (["('vec/' + output_file)", 'results'], {'fmt': '"""%.3f"""'}), "('vec/' + output_file, results, fmt='%.3f')\n", (2513, 2556), True, 'import numpy as np\n'), ((1788, 1825), 'numpy.transpose', 'np.transpose', (['SaO2[k:k + window_size]'], {}), '(SaO2[k:k + window_size])\n', (1800, 1825), True, 'import numpy as np\n'), ((2073, 2095), 'numpy.ones', 'np.ones', (['[window_size]'], {}), '([window_size])\n', (2080, 2095), True, 'import numpy as np\n'), ((2419, 2443), 'os.path.basename', 'os.path.basename', (['record'], {}), '(record)\n', (2435, 2443), False, 'import os\n'), ((2214, 2235), 'numpy.size', 'np.size', (['this_data', '(0)'], {}), '(this_data, 0)\n', (2221, 2235), True, 'import numpy as np\n'), ((2288, 2311), 'numpy.size', 'np.size', (['predictions', '(0)'], {}), '(predictions, 0)\n', (2295, 2311), True, 'import numpy as np\n')] |
# %% IMPORTS
# Package imports
from astropy.units import Quantity
from astropy.time import Time
from astropy.coordinates import Angle, SkyCoord
import astropy.constants as apc
from astropy.table import Table
import numpy as np
from py.path import local
# hickle imports
import hickle as hkl
# Set the current working directory to the temporary directory
local.get_temproot().chdir()
# %% FUNCTION DEFINITIONS
def test_astropy_quantity():
for uu in ['m^3', 'm^3 / s', 'kg/pc']:
a = Quantity(7, unit=uu)
hkl.dump(a, "test_ap.h5")
b = hkl.load("test_ap.h5")
assert a == b
assert a.unit == b.unit
a *= a
hkl.dump(a, "test_ap.h5")
b = hkl.load("test_ap.h5")
assert a == b
assert a.unit == b.unit
def test_astropy_constant():
hkl.dump(apc.G, "test_ap.h5")
gg = hkl.load("test_ap.h5")
assert gg == apc.G
hkl.dump(apc.cgs.e, 'test_ap.h5')
ee = hkl.load('test_ap.h5')
assert ee == apc.cgs.e
def test_astropy_table():
t = Table([[1, 2], [3, 4]], names=('a', 'b'), meta={'name': 'test_thing'})
hkl.dump({'a': t}, "test_ap.h5")
t2 = hkl.load("test_ap.h5")['a']
print(t)
print(t.meta)
print(t2)
print(t2.meta)
print(t.dtype, t2.dtype)
assert t.meta == t2.meta
assert t.dtype == t2.dtype
assert np.allclose(t['a'].astype('float32'), t2['a'].astype('float32'))
assert np.allclose(t['b'].astype('float32'), t2['b'].astype('float32'))
def test_astropy_quantity_array():
a = Quantity([1, 2, 3], unit='m')
hkl.dump(a, "test_ap.h5")
b = hkl.load("test_ap.h5")
assert np.allclose(a.value, b.value)
assert a.unit == b.unit
def test_astropy_time_array():
times = ['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00']
t1 = Time(times, format='isot', scale='utc')
hkl.dump(t1, "test_ap2.h5")
t2 = hkl.load("test_ap2.h5")
print(t1)
print(t2)
assert t1.value.shape == t2.value.shape
for ii in range(len(t1)):
assert t1.value[ii] == t2.value[ii]
assert t1.format == t2.format
assert t1.scale == t2.scale
times = [58264, 58265, 58266]
t1 = Time(times, format='mjd', scale='utc')
hkl.dump(t1, "test_ap2.h5")
t2 = hkl.load("test_ap2.h5")
print(t1)
print(t2)
assert t1.value.shape == t2.value.shape
assert np.allclose(t1.value, t2.value)
assert t1.format == t2.format
assert t1.scale == t2.scale
def test_astropy_angle():
for uu in ['radian', 'degree']:
a = Angle(1.02, unit=uu)
hkl.dump(a, "test_ap.h5")
b = hkl.load("test_ap.h5")
assert a == b
assert a.unit == b.unit
def test_astropy_angle_array():
a = Angle([1, 2, 3], unit='degree')
hkl.dump(a, "test_ap.h5")
b = hkl.load("test_ap.h5")
assert np.allclose(a.value, b.value)
assert a.unit == b.unit
def test_astropy_skycoord():
ra = Angle('1d20m', unit='degree')
dec = Angle('33d0m0s', unit='degree')
radec = SkyCoord(ra, dec)
hkl.dump(radec, "test_ap.h5")
radec2 = hkl.load("test_ap.h5")
assert radec.ra == radec2.ra
assert radec.dec == radec2.dec
ra = Angle('1d20m', unit='hourangle')
dec = Angle('33d0m0s', unit='degree')
radec = SkyCoord(ra, dec)
hkl.dump(radec, "test_ap.h5")
radec2 = hkl.load("test_ap.h5")
assert radec.ra == radec2.ra
assert radec.dec == radec2.dec
def test_astropy_skycoord_array():
ra = Angle(['1d20m', '0d21m'], unit='degree')
dec = Angle(['33d0m0s', '-33d01m'], unit='degree')
radec = SkyCoord(ra, dec)
hkl.dump(radec, "test_ap.h5")
radec2 = hkl.load("test_ap.h5")
assert np.allclose(radec.ra.value, radec2.ra.value)
assert np.allclose(radec.dec.value, radec2.dec.value)
assert radec.ra.shape == radec2.ra.shape
assert radec.dec.shape == radec2.dec.shape
ra = Angle([['1d20m', '0d21m'], ['1d20m', '0d21m']], unit='hourangle')
dec = Angle([['33d0m0s', '33d01m'], ['33d0m0s', '33d01m']], unit='degree')
radec = SkyCoord(ra, dec)
hkl.dump(radec, "test_ap.h5")
radec2 = hkl.load("test_ap.h5")
assert np.allclose(radec.ra.value, radec2.ra.value)
assert np.allclose(radec.dec.value, radec2.dec.value)
assert radec.ra.shape == radec2.ra.shape
assert radec.dec.shape == radec2.dec.shape
# %% MAIN SCRIPT
if __name__ == "__main__":
test_astropy_quantity()
test_astropy_constant()
test_astropy_table()
test_astropy_quantity_array()
test_astropy_time_array()
test_astropy_angle()
test_astropy_angle_array()
test_astropy_skycoord()
test_astropy_skycoord_array()
| [
"numpy.allclose",
"astropy.table.Table",
"astropy.coordinates.Angle",
"astropy.coordinates.SkyCoord",
"astropy.time.Time",
"py.path.local.get_temproot",
"hickle.load",
"hickle.dump",
"astropy.units.Quantity"
] | [((817, 846), 'hickle.dump', 'hkl.dump', (['apc.G', '"""test_ap.h5"""'], {}), "(apc.G, 'test_ap.h5')\n", (825, 846), True, 'import hickle as hkl\n'), ((856, 878), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (864, 878), True, 'import hickle as hkl\n'), ((907, 940), 'hickle.dump', 'hkl.dump', (['apc.cgs.e', '"""test_ap.h5"""'], {}), "(apc.cgs.e, 'test_ap.h5')\n", (915, 940), True, 'import hickle as hkl\n'), ((950, 972), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (958, 972), True, 'import hickle as hkl\n'), ((1036, 1106), 'astropy.table.Table', 'Table', (['[[1, 2], [3, 4]]'], {'names': "('a', 'b')", 'meta': "{'name': 'test_thing'}"}), "([[1, 2], [3, 4]], names=('a', 'b'), meta={'name': 'test_thing'})\n", (1041, 1106), False, 'from astropy.table import Table\n'), ((1112, 1144), 'hickle.dump', 'hkl.dump', (["{'a': t}", '"""test_ap.h5"""'], {}), "({'a': t}, 'test_ap.h5')\n", (1120, 1144), True, 'import hickle as hkl\n'), ((1535, 1564), 'astropy.units.Quantity', 'Quantity', (['[1, 2, 3]'], {'unit': '"""m"""'}), "([1, 2, 3], unit='m')\n", (1543, 1564), False, 'from astropy.units import Quantity\n'), ((1570, 1595), 'hickle.dump', 'hkl.dump', (['a', '"""test_ap.h5"""'], {}), "(a, 'test_ap.h5')\n", (1578, 1595), True, 'import hickle as hkl\n'), ((1604, 1626), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (1612, 1626), True, 'import hickle as hkl\n'), ((1639, 1668), 'numpy.allclose', 'np.allclose', (['a.value', 'b.value'], {}), '(a.value, b.value)\n', (1650, 1668), True, 'import numpy as np\n'), ((1808, 1847), 'astropy.time.Time', 'Time', (['times'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "(times, format='isot', scale='utc')\n", (1812, 1847), False, 'from astropy.time import Time\n'), ((1852, 1879), 'hickle.dump', 'hkl.dump', (['t1', '"""test_ap2.h5"""'], {}), "(t1, 'test_ap2.h5')\n", (1860, 1879), True, 'import hickle as hkl\n'), ((1889, 1912), 'hickle.load', 'hkl.load', (['"""test_ap2.h5"""'], {}), "('test_ap2.h5')\n", (1897, 1912), True, 'import hickle as hkl\n'), ((2170, 2208), 'astropy.time.Time', 'Time', (['times'], {'format': '"""mjd"""', 'scale': '"""utc"""'}), "(times, format='mjd', scale='utc')\n", (2174, 2208), False, 'from astropy.time import Time\n'), ((2213, 2240), 'hickle.dump', 'hkl.dump', (['t1', '"""test_ap2.h5"""'], {}), "(t1, 'test_ap2.h5')\n", (2221, 2240), True, 'import hickle as hkl\n'), ((2250, 2273), 'hickle.load', 'hkl.load', (['"""test_ap2.h5"""'], {}), "('test_ap2.h5')\n", (2258, 2273), True, 'import hickle as hkl\n'), ((2358, 2389), 'numpy.allclose', 'np.allclose', (['t1.value', 't2.value'], {}), '(t1.value, t2.value)\n', (2369, 2389), True, 'import numpy as np\n'), ((2719, 2750), 'astropy.coordinates.Angle', 'Angle', (['[1, 2, 3]'], {'unit': '"""degree"""'}), "([1, 2, 3], unit='degree')\n", (2724, 2750), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((2756, 2781), 'hickle.dump', 'hkl.dump', (['a', '"""test_ap.h5"""'], {}), "(a, 'test_ap.h5')\n", (2764, 2781), True, 'import hickle as hkl\n'), ((2790, 2812), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (2798, 2812), True, 'import hickle as hkl\n'), ((2825, 2854), 'numpy.allclose', 'np.allclose', (['a.value', 'b.value'], {}), '(a.value, b.value)\n', (2836, 2854), True, 'import numpy as np\n'), ((2923, 2952), 'astropy.coordinates.Angle', 'Angle', (['"""1d20m"""'], {'unit': '"""degree"""'}), "('1d20m', unit='degree')\n", (2928, 2952), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((2963, 2994), 'astropy.coordinates.Angle', 'Angle', (['"""33d0m0s"""'], {'unit': '"""degree"""'}), "('33d0m0s', unit='degree')\n", (2968, 2994), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3007, 3024), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec'], {}), '(ra, dec)\n', (3015, 3024), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3029, 3058), 'hickle.dump', 'hkl.dump', (['radec', '"""test_ap.h5"""'], {}), "(radec, 'test_ap.h5')\n", (3037, 3058), True, 'import hickle as hkl\n'), ((3072, 3094), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (3080, 3094), True, 'import hickle as hkl\n'), ((3173, 3205), 'astropy.coordinates.Angle', 'Angle', (['"""1d20m"""'], {'unit': '"""hourangle"""'}), "('1d20m', unit='hourangle')\n", (3178, 3205), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3216, 3247), 'astropy.coordinates.Angle', 'Angle', (['"""33d0m0s"""'], {'unit': '"""degree"""'}), "('33d0m0s', unit='degree')\n", (3221, 3247), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3260, 3277), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec'], {}), '(ra, dec)\n', (3268, 3277), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3282, 3311), 'hickle.dump', 'hkl.dump', (['radec', '"""test_ap.h5"""'], {}), "(radec, 'test_ap.h5')\n", (3290, 3311), True, 'import hickle as hkl\n'), ((3325, 3347), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (3333, 3347), True, 'import hickle as hkl\n'), ((3462, 3502), 'astropy.coordinates.Angle', 'Angle', (["['1d20m', '0d21m']"], {'unit': '"""degree"""'}), "(['1d20m', '0d21m'], unit='degree')\n", (3467, 3502), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3513, 3557), 'astropy.coordinates.Angle', 'Angle', (["['33d0m0s', '-33d01m']"], {'unit': '"""degree"""'}), "(['33d0m0s', '-33d01m'], unit='degree')\n", (3518, 3557), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3570, 3587), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec'], {}), '(ra, dec)\n', (3578, 3587), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3592, 3621), 'hickle.dump', 'hkl.dump', (['radec', '"""test_ap.h5"""'], {}), "(radec, 'test_ap.h5')\n", (3600, 3621), True, 'import hickle as hkl\n'), ((3635, 3657), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (3643, 3657), True, 'import hickle as hkl\n'), ((3669, 3713), 'numpy.allclose', 'np.allclose', (['radec.ra.value', 'radec2.ra.value'], {}), '(radec.ra.value, radec2.ra.value)\n', (3680, 3713), True, 'import numpy as np\n'), ((3725, 3771), 'numpy.allclose', 'np.allclose', (['radec.dec.value', 'radec2.dec.value'], {}), '(radec.dec.value, radec2.dec.value)\n', (3736, 3771), True, 'import numpy as np\n'), ((3874, 3939), 'astropy.coordinates.Angle', 'Angle', (["[['1d20m', '0d21m'], ['1d20m', '0d21m']]"], {'unit': '"""hourangle"""'}), "([['1d20m', '0d21m'], ['1d20m', '0d21m']], unit='hourangle')\n", (3879, 3939), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((3950, 4018), 'astropy.coordinates.Angle', 'Angle', (["[['33d0m0s', '33d01m'], ['33d0m0s', '33d01m']]"], {'unit': '"""degree"""'}), "([['33d0m0s', '33d01m'], ['33d0m0s', '33d01m']], unit='degree')\n", (3955, 4018), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((4031, 4048), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec'], {}), '(ra, dec)\n', (4039, 4048), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((4053, 4082), 'hickle.dump', 'hkl.dump', (['radec', '"""test_ap.h5"""'], {}), "(radec, 'test_ap.h5')\n", (4061, 4082), True, 'import hickle as hkl\n'), ((4096, 4118), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (4104, 4118), True, 'import hickle as hkl\n'), ((4130, 4174), 'numpy.allclose', 'np.allclose', (['radec.ra.value', 'radec2.ra.value'], {}), '(radec.ra.value, radec2.ra.value)\n', (4141, 4174), True, 'import numpy as np\n'), ((4186, 4232), 'numpy.allclose', 'np.allclose', (['radec.dec.value', 'radec2.dec.value'], {}), '(radec.dec.value, radec2.dec.value)\n', (4197, 4232), True, 'import numpy as np\n'), ((356, 376), 'py.path.local.get_temproot', 'local.get_temproot', ([], {}), '()\n', (374, 376), False, 'from py.path import local\n'), ((497, 517), 'astropy.units.Quantity', 'Quantity', (['(7)'], {'unit': 'uu'}), '(7, unit=uu)\n', (505, 517), False, 'from astropy.units import Quantity\n'), ((527, 552), 'hickle.dump', 'hkl.dump', (['a', '"""test_ap.h5"""'], {}), "(a, 'test_ap.h5')\n", (535, 552), True, 'import hickle as hkl\n'), ((565, 587), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (573, 587), True, 'import hickle as hkl\n'), ((667, 692), 'hickle.dump', 'hkl.dump', (['a', '"""test_ap.h5"""'], {}), "(a, 'test_ap.h5')\n", (675, 692), True, 'import hickle as hkl\n'), ((705, 727), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (713, 727), True, 'import hickle as hkl\n'), ((1154, 1176), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (1162, 1176), True, 'import hickle as hkl\n'), ((2532, 2552), 'astropy.coordinates.Angle', 'Angle', (['(1.02)'], {'unit': 'uu'}), '(1.02, unit=uu)\n', (2537, 2552), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((2562, 2587), 'hickle.dump', 'hkl.dump', (['a', '"""test_ap.h5"""'], {}), "(a, 'test_ap.h5')\n", (2570, 2587), True, 'import hickle as hkl\n'), ((2600, 2622), 'hickle.load', 'hkl.load', (['"""test_ap.h5"""'], {}), "('test_ap.h5')\n", (2608, 2622), True, 'import hickle as hkl\n')] |
"""
Module which contains all the imports and data available to unit tests
"""
import os
import sys
import json
import time
import shutil
import timeit
import inspect
import logging
import platform
import tempfile
import unittest
import itertools
import subprocess
import numpy as np
import sympy as sp
import trimesh
import collections
from collections import deque
from copy import deepcopy
from trimesh.constants import tol, tol_path
from trimesh.base import Trimesh
try:
from shapely.geometry import Point, Polygon
has_path = True
except ImportError:
has_path = False
python_version = np.array([sys.version_info.major,
sys.version_info.minor])
# python 3
try:
from cStringIO import StringIO
_PY3 = False
except ImportError:
from io import StringIO
from io import BytesIO
_PY3 = True
# are we on linux
is_linux = 'linux' in platform.system().lower()
dir_current = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
dir_models = os.path.abspath(os.path.join(dir_current, '..', 'models'))
dir_2D = os.path.abspath(os.path.join(dir_current, '..', 'models', '2D'))
dir_data = os.path.abspath(os.path.join(dir_current, 'data'))
log = logging.getLogger('trimesh')
log.addHandler(logging.NullHandler())
"""
# block will print who is importing us
for i in inspect.stack():
if i.code_context is None:
continue
if any('import generic' in j for j in i.code_context if j is not None):
file_name = os.path.split(i.filename)[-1]
print('\n\nRunning tests contained in: {}'.format(file_name))
break
"""
def io_wrap(item):
if isinstance(item, str):
return StringIO(item)
if _PY3 and isinstance(item, bytes):
return BytesIO(item)
return item
def _load_data():
data = {}
for file_name in os.listdir(dir_data):
name, extension = os.path.splitext(file_name)
if extension != '.json':
continue
file_path = os.path.join(dir_data, file_name)
with open(file_path, 'r') as file_obj:
data[name] = json.load(file_obj)
data['model_paths'] = [os.path.join(dir_models, f)
for f in os.listdir(dir_models)]
data['2D_files'] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)]
return data
def get_mesh(file_name, *args, **kwargs):
meshes = collections.deque()
for name in np.append(file_name, args):
location = os.path.join(dir_models, name)
log.info('loading mesh from: %s', location)
meshes.append(trimesh.load(location, **kwargs))
if len(meshes) == 1:
return meshes[0]
return list(meshes)
def get_meshes(count=np.inf,
raise_error=False,
only_watertight=True):
"""
Get a list of meshes to test with.
Arguments
----------
count: int, approximate number of meshes you want
raise_error: bool, if True raise a ValueError if a mesh
that should be loadable returns a non- Trimesh object.
Returns
----------
meshes: list, of Trimesh objects
"""
# use deterministic file name order
file_names = np.sort(os.listdir(dir_models))
meshes = deque()
for file_name in file_names:
extension = trimesh.util.split_extension(file_name).lower()
if extension in trimesh.available_formats():
loaded = trimesh.util.make_sequence(get_mesh(file_name))
for i in loaded:
is_mesh = trimesh.util.is_instance_named(i, 'Trimesh')
is_scene = trimesh.util.is_instance_named(i, 'Scene')
if raise_error and not is_mesh and not is_scene:
raise ValueError('%s returned a non- Trimesh object!',
file_name)
if not is_mesh or (only_watertight and not i.is_watertight):
continue
meshes.append(i)
else:
log.warning('%s has no loader, not running test on!',
file_name)
if len(meshes) >= count:
break
return list(meshes)
def get_2D(count=None):
"""
Get Path2D objects to test with.
"""
# if no path loading return empty list
if not has_path:
return []
# all files in the 2D models directory
ls = os.listdir(dir_2D)
# if count isn't passed return all files
if count is None:
count = len(ls)
# save resulting loaded paths
paths = []
for file_name in ls:
# check to see if the file is loadable
ext = trimesh.util.split_extension(file_name)
if ext not in trimesh.available_formats():
continue
# full path
location = os.path.join(dir_2D, file_name)
try:
paths.append(trimesh.load(location))
except BaseException as E:
log.error('failed on: {}'.format(file_name),
exc_info=True)
raise E
# if we don't need every path break
if len(paths) >= count:
break
return paths
data = _load_data()
# formats supported by meshlab
meshlab_formats = ['3ds', 'ply', 'stl', 'obj', 'qobj', 'off', 'ptx', 'vmi',
'bre', 'dae', 'ctm', 'pts', 'apts', 'xyz', 'gts', 'pdb',
'tri', 'asc', 'x3d', 'x3dv', 'wrl']
| [
"logging.getLogger",
"logging.NullHandler",
"trimesh.util.is_instance_named",
"os.listdir",
"collections.deque",
"trimesh.util.split_extension",
"inspect.currentframe",
"os.path.join",
"io.BytesIO",
"os.path.splitext",
"numpy.append",
"numpy.array",
"platform.system",
"trimesh.load",
"tr... | [((606, 664), 'numpy.array', 'np.array', (['[sys.version_info.major, sys.version_info.minor]'], {}), '([sys.version_info.major, sys.version_info.minor])\n', (614, 664), True, 'import numpy as np\n'), ((1228, 1256), 'logging.getLogger', 'logging.getLogger', (['"""trimesh"""'], {}), "('trimesh')\n", (1245, 1256), False, 'import logging\n'), ((1042, 1083), 'os.path.join', 'os.path.join', (['dir_current', '""".."""', '"""models"""'], {}), "(dir_current, '..', 'models')\n", (1054, 1083), False, 'import os\n'), ((1110, 1157), 'os.path.join', 'os.path.join', (['dir_current', '""".."""', '"""models"""', '"""2D"""'], {}), "(dir_current, '..', 'models', '2D')\n", (1122, 1157), False, 'import os\n'), ((1186, 1219), 'os.path.join', 'os.path.join', (['dir_current', '"""data"""'], {}), "(dir_current, 'data')\n", (1198, 1219), False, 'import os\n'), ((1272, 1293), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (1291, 1293), False, 'import logging\n'), ((1849, 1869), 'os.listdir', 'os.listdir', (['dir_data'], {}), '(dir_data)\n', (1859, 1869), False, 'import os\n'), ((2391, 2410), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2408, 2410), False, 'import collections\n'), ((2427, 2453), 'numpy.append', 'np.append', (['file_name', 'args'], {}), '(file_name, args)\n', (2436, 2453), True, 'import numpy as np\n'), ((3229, 3236), 'collections.deque', 'deque', ([], {}), '()\n', (3234, 3236), False, 'from collections import deque\n'), ((4361, 4379), 'os.listdir', 'os.listdir', (['dir_2D'], {}), '(dir_2D)\n', (4371, 4379), False, 'import os\n'), ((1693, 1707), 'io.StringIO', 'StringIO', (['item'], {}), '(item)\n', (1701, 1707), False, 'from io import StringIO\n'), ((1764, 1777), 'io.BytesIO', 'BytesIO', (['item'], {}), '(item)\n', (1771, 1777), False, 'from io import BytesIO\n'), ((1897, 1924), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1913, 1924), False, 'import os\n'), ((1999, 2032), 'os.path.join', 'os.path.join', (['dir_data', 'file_name'], {}), '(dir_data, file_name)\n', (2011, 2032), False, 'import os\n'), ((2153, 2180), 'os.path.join', 'os.path.join', (['dir_models', 'f'], {}), '(dir_models, f)\n', (2165, 2180), False, 'import os\n'), ((2265, 2288), 'os.path.join', 'os.path.join', (['dir_2D', 'f'], {}), '(dir_2D, f)\n', (2277, 2288), False, 'import os\n'), ((2474, 2504), 'os.path.join', 'os.path.join', (['dir_models', 'name'], {}), '(dir_models, name)\n', (2486, 2504), False, 'import os\n'), ((3191, 3213), 'os.listdir', 'os.listdir', (['dir_models'], {}), '(dir_models)\n', (3201, 3213), False, 'import os\n'), ((4606, 4645), 'trimesh.util.split_extension', 'trimesh.util.split_extension', (['file_name'], {}), '(file_name)\n', (4634, 4645), False, 'import trimesh\n'), ((4757, 4788), 'os.path.join', 'os.path.join', (['dir_2D', 'file_name'], {}), '(dir_2D, file_name)\n', (4769, 4788), False, 'import os\n'), ((893, 910), 'platform.system', 'platform.system', ([], {}), '()\n', (908, 910), False, 'import platform\n'), ((987, 1009), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1007, 1009), False, 'import inspect\n'), ((2105, 2124), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (2114, 2124), False, 'import json\n'), ((2217, 2239), 'os.listdir', 'os.listdir', (['dir_models'], {}), '(dir_models)\n', (2227, 2239), False, 'import os\n'), ((2298, 2316), 'os.listdir', 'os.listdir', (['dir_2D'], {}), '(dir_2D)\n', (2308, 2316), False, 'import os\n'), ((2579, 2611), 'trimesh.load', 'trimesh.load', (['location'], {}), '(location, **kwargs)\n', (2591, 2611), False, 'import trimesh\n'), ((3362, 3389), 'trimesh.available_formats', 'trimesh.available_formats', ([], {}), '()\n', (3387, 3389), False, 'import trimesh\n'), ((4668, 4695), 'trimesh.available_formats', 'trimesh.available_formats', ([], {}), '()\n', (4693, 4695), False, 'import trimesh\n'), ((3290, 3329), 'trimesh.util.split_extension', 'trimesh.util.split_extension', (['file_name'], {}), '(file_name)\n', (3318, 3329), False, 'import trimesh\n'), ((3515, 3559), 'trimesh.util.is_instance_named', 'trimesh.util.is_instance_named', (['i', '"""Trimesh"""'], {}), "(i, 'Trimesh')\n", (3545, 3559), False, 'import trimesh\n'), ((3587, 3629), 'trimesh.util.is_instance_named', 'trimesh.util.is_instance_named', (['i', '"""Scene"""'], {}), "(i, 'Scene')\n", (3617, 3629), False, 'import trimesh\n'), ((4827, 4849), 'trimesh.load', 'trimesh.load', (['location'], {}), '(location)\n', (4839, 4849), False, 'import trimesh\n')] |
"""Helper functions for the Taylor-Green vortices application."""
import numpy
def taylor_green_vortex(x, y, t, nu):
"""Return the solution of the Taylor-Green vortex at given time.
Parameters
----------
x : numpy.ndarray
Gridline locations in the x direction as a 1D array of floats.
y : numpy.ndarray
Gridline locations in the y direction as a 1D array of floats.
t : float
Time value.
nu : float
Coefficient of viscosity.
Returns
-------
numpy.ndarray
x-component of the velocity field as a 2D array of floats.
numpy.ndarray
y-component of the velocity field as a 2D array of floats.
numpy.ndarray
pressure field as a 2D array of floats.
"""
X, Y = numpy.meshgrid(x, y)
a = 2 * numpy.pi
u = -numpy.cos(a * X) * numpy.sin(a * Y) * numpy.exp(-2 * a**2 * nu * t)
v = +numpy.sin(a * X) * numpy.cos(a * Y) * numpy.exp(-2 * a**2 * nu * t)
p = (-0.25 * (numpy.cos(2 * a * X) + numpy.cos(2 * a * Y)) *
numpy.exp(-4 * a**2 * nu * t))
return u, v, p
| [
"numpy.exp",
"numpy.meshgrid",
"numpy.sin",
"numpy.cos"
] | [((770, 790), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (784, 790), False, 'import numpy\n'), ((859, 890), 'numpy.exp', 'numpy.exp', (['(-2 * a ** 2 * nu * t)'], {}), '(-2 * a ** 2 * nu * t)\n', (868, 890), False, 'import numpy\n'), ((936, 967), 'numpy.exp', 'numpy.exp', (['(-2 * a ** 2 * nu * t)'], {}), '(-2 * a ** 2 * nu * t)\n', (945, 967), False, 'import numpy\n'), ((1040, 1071), 'numpy.exp', 'numpy.exp', (['(-4 * a ** 2 * nu * t)'], {}), '(-4 * a ** 2 * nu * t)\n', (1049, 1071), False, 'import numpy\n'), ((840, 856), 'numpy.sin', 'numpy.sin', (['(a * Y)'], {}), '(a * Y)\n', (849, 856), False, 'import numpy\n'), ((917, 933), 'numpy.cos', 'numpy.cos', (['(a * Y)'], {}), '(a * Y)\n', (926, 933), False, 'import numpy\n'), ((821, 837), 'numpy.cos', 'numpy.cos', (['(a * X)'], {}), '(a * X)\n', (830, 837), False, 'import numpy\n'), ((898, 914), 'numpy.sin', 'numpy.sin', (['(a * X)'], {}), '(a * X)\n', (907, 914), False, 'import numpy\n'), ((984, 1004), 'numpy.cos', 'numpy.cos', (['(2 * a * X)'], {}), '(2 * a * X)\n', (993, 1004), False, 'import numpy\n'), ((1007, 1027), 'numpy.cos', 'numpy.cos', (['(2 * a * Y)'], {}), '(2 * a * Y)\n', (1016, 1027), False, 'import numpy\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Python permanent wrapper function"""
# pylint: disable=no-self-use
from itertools import chain, product
import pytest
import numpy as np
from scipy.special import factorial as fac
from scipy.linalg import sqrtm
from scipy.stats import unitary_group
from thewalrus import perm, permanent_repeated, brs, ubrs
from thewalrus._permanent import fock_prob, fock_threshold_prob
perm_real = perm
perm_complex = perm
perm_BBFG_real = lambda x: perm(x, method="bbfg")
perm_BBFG_complex = lambda x: perm(x, method="bbfg")
class TestPermanentWrapper:
"""Tests for the Permanent function"""
def test_array_exception(self):
"""Check exception for non-matrix argument"""
with pytest.raises(TypeError):
perm(1)
def test_square_exception(self):
"""Check exception for non-square argument"""
A = np.zeros([2, 3])
with pytest.raises(ValueError):
perm(A)
def test_nan(self):
"""Check exception for non-finite matrix"""
A = np.array([[2, 1], [1, np.nan]])
with pytest.raises(ValueError):
perm(A)
def test_0x0(self):
"""Check 0x0 permanent returns 1"""
A = np.zeros((0, 0))
p = perm(A, method="ryser")
expected = 1
assert p == expected
p = perm(A, method="bbfg")
assert p == expected
def test_1x1(self, random_matrix):
"""Check 1x1 permanent"""
A = np.array([[random_matrix(1)]])
p = perm(A, method="ryser")
expected = A[0, 0]
assert p == expected
p = perm(A, method="bbfg")
assert p == expected
def test_2x2(self, random_matrix):
"""Check 2x2 permanent"""
A = random_matrix(2)
p = perm(A, method="ryser")
expected = A[0, 0] * A[1, 1] + A[0, 1] * A[1, 0]
assert p == expected
p = perm(A, method="bbfg")
assert p == expected
def test_3x3(self, random_matrix):
"""Check 3x3 permanent"""
A = random_matrix(3)
p = perm(A, method="ryser")
expected = (
A[0, 2] * A[1, 1] * A[2, 0]
+ A[0, 1] * A[1, 2] * A[2, 0]
+ A[0, 2] * A[1, 0] * A[2, 1]
+ A[0, 0] * A[1, 2] * A[2, 1]
+ A[0, 1] * A[1, 0] * A[2, 2]
+ A[0, 0] * A[1, 1] * A[2, 2]
)
assert p == expected
p = perm(A, method="bbfg")
assert p == expected
@pytest.mark.parametrize("dtype", [np.float64])
def test_real(self, random_matrix):
"""Check perm(A) == perm_real(A) and perm(A, method="bbfg") == perm_BBFG_real(A) for a random real matrix."""
A = random_matrix(6)
p = perm(A, method="ryser")
expected = perm_real(A)
assert np.allclose(p, expected)
A = random_matrix(6)
A = np.array(A, dtype=np.complex128)
p = perm(A, method="ryser")
expected = perm_real(np.float64(A.real))
assert np.allclose(p, expected)
A = random_matrix(6)
p = perm(A, method="bbfg")
expected = perm_BBFG_real(A)
assert np.allclose(p, expected)
A = random_matrix(6)
A = np.array(A, dtype=np.complex128)
p = perm(A, method="bbfg")
expected = perm_BBFG_real(np.float64(A.real))
assert np.allclose(p, expected)
@pytest.mark.parametrize("dtype", [np.complex128])
def test_complex(self, random_matrix):
"""Check perm(A) == perm_complex(A) and perm(A) == perm_BBFG_complex(A) for a complex."""
A = random_matrix(6)
p = perm(A, method="ryser")
expected = perm_complex(A)
assert np.allclose(p, expected)
A = random_matrix(6)
p = perm(A, method="ryser")
expected = perm_BBFG_complex(A)
assert np.allclose(p, expected)
@pytest.mark.parametrize("dtype", [np.float64])
def test_complex_no_imag(self, random_matrix):
"""Check perm(A) == perm_real(A) and perm(A) == perm_BBFG_real(A) for a complex random matrix with zero imaginary parts."""
A = np.complex128(random_matrix(6))
p = perm(A, method="ryser")
expected = perm_real(A.real)
assert np.allclose(p, expected)
A = np.complex128(random_matrix(6))
p = perm(A, method="ryser")
expected = perm_BBFG_real(A.real)
assert np.allclose(p, expected)
class TestPermanentRepeated:
"""Tests for the repeated permanent"""
def test_rpt_zero(self):
"""Check 2x2 permanent when rpt is all 0"""
A = np.array([[2, 1], [1, 3]])
rpt = [0, 0]
res = permanent_repeated(A, rpt)
assert res == 1.0
def test_2x2(self, random_matrix):
"""Check 2x2 permanent"""
A = random_matrix(2)
p = permanent_repeated(A, [1] * 2)
assert np.allclose(p, A[0, 0] * A[1, 1] + A[1, 0] * A[0, 1])
def test_3x3(self, random_matrix):
"""Check 3x3 permanent"""
A = random_matrix(3)
p = permanent_repeated(A, [1] * 3)
exp = (
A[0, 0] * A[1, 1] * A[2, 2]
+ A[0, 1] * A[1, 2] * A[2, 0]
+ A[0, 2] * A[1, 0] * A[2, 1]
+ A[2, 0] * A[1, 1] * A[0, 2]
+ A[0, 1] * A[1, 0] * A[2, 2]
+ A[0, 0] * A[1, 2] * A[2, 1]
)
assert np.allclose(p, exp)
@pytest.mark.parametrize("n", [6, 8, 10, 15, 20])
def test_ones(self, n):
"""Check all ones matrix has perm(J_n)=n!"""
A = np.array([[1]])
p = permanent_repeated(A, [n])
assert np.allclose(p, fac(n))
def test_brs_HOM():
"""HOM test"""
U = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
n = [1, 1]
d = [1, 1]
assert np.isclose(fock_threshold_prob(n, d, U), fock_prob(n, d, U))
d = [1, 0]
m = [2, 0]
assert np.isclose(fock_threshold_prob(n, d, U), fock_prob(n, m, U))
@pytest.mark.parametrize("eta", [0.2, 0.5, 0.9, 1])
def test_brs_HOM_lossy(eta):
"""lossy HOM dip test"""
T = np.sqrt(eta / 2) * np.array([[1, 1], [1, -1]])
n = [1, 1]
d = [1, 1]
assert np.isclose(fock_prob(n, d, T), fock_threshold_prob(n, d, T))
def test_brs_ZTL():
"""test 3-mode ZTL suppression"""
U = np.fft.fft(np.eye(3)) / np.sqrt(3)
n = [1, 1, 1]
d = [1, 1, 0]
p1 = fock_threshold_prob(n, d, U)
p2 = fock_prob(n, [1, 2, 0], U) + fock_prob(n, [2, 1, 0], U)
assert np.isclose(p1, p2)
n = [1, 1, 1]
d = [1, 1, 1]
p1 = fock_threshold_prob(n, d, U)
p2 = fock_prob(n, d, U)
assert np.isclose(p1, p2)
T = U[:2, :]
d = [1, 1]
p1 = fock_threshold_prob(n, d, T)
p2 = fock_prob(n, [1, 1, 1], U)
assert np.isclose(p1, p2)
d = [1, 0, 0]
p1 = fock_threshold_prob(n, d, U)
p2 = fock_prob(n, [3, 0, 0], U)
assert np.isclose(p1, p2)
n = [1, 2, 0]
d = [0, 1, 1]
p1 = fock_threshold_prob(n, d, U)
p2 = fock_prob(n, [0, 2, 1], U) + fock_prob(n, [0, 1, 2], U)
assert np.isclose(p1, p2)
@pytest.mark.parametrize("eta", [0.2, 0.5, 0.9, 1])
def test_brs_ZTL_lossy(eta):
"""test lossy 3-mode ZTL suppression"""
T = np.sqrt(eta) * np.fft.fft(np.eye(3)) / np.sqrt(3)
n = [1, 1, 1]
d = [1, 1, 0]
p1 = eta**2 * (1 - eta) / 3
p2 = fock_threshold_prob(n, d, T)
assert np.allclose(p1, p2)
@pytest.mark.parametrize("d", [[1, 1, 1], [1, 1, 0], [1, 0, 0]])
def test_brs_ubrs(d):
"""test that brs and ubrs give same results for unitary transformation"""
U = np.fft.fft(np.eye(3)) / np.sqrt(3)
n = np.array([2, 1, 0])
d = np.array(d)
in_modes = np.array(list(chain(*[[i] * j for i, j in enumerate(n) if j > 0])))
click_modes = np.where(d > 0)[0]
U_dn = U[np.ix_(click_modes, in_modes)]
b1 = ubrs(U_dn)
R = sqrtm(np.eye(U.shape[1]) - U.conj().T @ U)[:, in_modes]
E = R.conj().T @ R
b2 = brs(U_dn, E)
assert np.allclose(b1, b2)
@pytest.mark.parametrize("M", range(2, 7))
def test_brs_random(M):
"""test that brs and per agree for random matices"""
n = np.ones(M, dtype=int)
n[np.random.randint(0, M)] = 0
d = np.ones(M, dtype=int)
d[np.random.randint(0, M)] = 0
loss_in = np.random.random(M)
loss_out = np.random.random(M)
U = unitary_group.rvs(M)
T = np.diag(loss_in) @ U @ np.diag(loss_out)
p1 = fock_threshold_prob(n, d, T)
p2 = fock_prob(n, d, T)
assert np.isclose(p1, p2)
@pytest.mark.parametrize("M", range(2, 5))
def test_brs_prob_normed(M):
"""test that fock threshold probability is normalised"""
N = M + 1 # guarentee at least some bunching
in_modes = np.random.choice(np.arange(M), N)
n = np.bincount(in_modes, minlength=M)
loss_in = np.random.random(M)
loss_out = np.random.random(M)
U = unitary_group.rvs(M)
T = np.diag(loss_in) @ U @ np.diag(loss_out)
p_total = 0
for det_pattern in product([0, 1], repeat=M):
p = fock_threshold_prob(n, det_pattern, T)
p_total += p
assert np.isclose(p_total, 1)
def test_fock_thresh_valueerror():
"""test that input checks are raised"""
with pytest.raises(ValueError):
n = [1, 1, 1]
T = np.ones((2, 2))
d = [1, 1]
fock_threshold_prob(n, d, T)
with pytest.raises(ValueError):
n = [1, 1]
d = [1, 1, 1]
T = np.ones((2, 2))
fock_threshold_prob(n, d, T)
with pytest.raises(ValueError):
n = [1, 1]
d = [1, 1, 1]
T = np.ones((3, 2))
fock_threshold_prob(n, d, T)
def test_fock_prob_valueerror():
"""test that input checks are raised"""
with pytest.raises(ValueError):
n = [1, 1, 2, 1]
m = [1, 1, 1, 3]
U = np.eye((4))
fock_prob(n, m, U)
| [
"thewalrus.perm",
"numpy.sqrt",
"scipy.special.factorial",
"numpy.array",
"thewalrus._permanent.fock_threshold_prob",
"numpy.arange",
"scipy.stats.unitary_group.rvs",
"thewalrus._permanent.fock_prob",
"numpy.random.random",
"numpy.where",
"numpy.float64",
"itertools.product",
"numpy.ix_",
... | [((6470, 6520), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""eta"""', '[0.2, 0.5, 0.9, 1]'], {}), "('eta', [0.2, 0.5, 0.9, 1])\n", (6493, 6520), False, 'import pytest\n'), ((7586, 7636), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""eta"""', '[0.2, 0.5, 0.9, 1]'], {}), "('eta', [0.2, 0.5, 0.9, 1])\n", (7609, 7636), False, 'import pytest\n'), ((7911, 7974), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""d"""', '[[1, 1, 1], [1, 1, 0], [1, 0, 0]]'], {}), "('d', [[1, 1, 1], [1, 1, 0], [1, 0, 0]])\n", (7934, 7974), False, 'import pytest\n'), ((1051, 1073), 'thewalrus.perm', 'perm', (['x'], {'method': '"""bbfg"""'}), "(x, method='bbfg')\n", (1055, 1073), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((1104, 1126), 'thewalrus.perm', 'perm', (['x'], {'method': '"""bbfg"""'}), "(x, method='bbfg')\n", (1108, 1126), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3045, 3091), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float64]'], {}), "('dtype', [np.float64])\n", (3068, 3091), False, 'import pytest\n'), ((3939, 3988), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.complex128]'], {}), "('dtype', [np.complex128])\n", (3962, 3988), False, 'import pytest\n'), ((4422, 4468), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float64]'], {}), "('dtype', [np.float64])\n", (4445, 4468), False, 'import pytest\n'), ((5933, 5981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[6, 8, 10, 15, 20]'], {}), "('n', [6, 8, 10, 15, 20])\n", (5956, 5981), False, 'import pytest\n'), ((6889, 6917), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (6908, 6917), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6994, 7012), 'numpy.isclose', 'np.isclose', (['p1', 'p2'], {}), '(p1, p2)\n', (7004, 7012), True, 'import numpy as np\n'), ((7060, 7088), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (7079, 7088), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7098, 7116), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (7107, 7116), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7128, 7146), 'numpy.isclose', 'np.isclose', (['p1', 'p2'], {}), '(p1, p2)\n', (7138, 7146), True, 'import numpy as np\n'), ((7190, 7218), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (7209, 7218), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7228, 7254), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', '[1, 1, 1]', 'U'], {}), '(n, [1, 1, 1], U)\n', (7237, 7254), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7267, 7285), 'numpy.isclose', 'np.isclose', (['p1', 'p2'], {}), '(p1, p2)\n', (7277, 7285), True, 'import numpy as np\n'), ((7315, 7343), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (7334, 7343), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7353, 7379), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', '[3, 0, 0]', 'U'], {}), '(n, [3, 0, 0], U)\n', (7362, 7379), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7392, 7410), 'numpy.isclose', 'np.isclose', (['p1', 'p2'], {}), '(p1, p2)\n', (7402, 7410), True, 'import numpy as np\n'), ((7458, 7486), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (7477, 7486), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7564, 7582), 'numpy.isclose', 'np.isclose', (['p1', 'p2'], {}), '(p1, p2)\n', (7574, 7582), True, 'import numpy as np\n'), ((7847, 7875), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (7866, 7875), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7888, 7907), 'numpy.allclose', 'np.allclose', (['p1', 'p2'], {}), '(p1, p2)\n', (7899, 7907), True, 'import numpy as np\n'), ((8128, 8147), 'numpy.array', 'np.array', (['[2, 1, 0]'], {}), '([2, 1, 0])\n', (8136, 8147), True, 'import numpy as np\n'), ((8156, 8167), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (8164, 8167), True, 'import numpy as np\n'), ((8344, 8354), 'thewalrus.ubrs', 'ubrs', (['U_dn'], {}), '(U_dn)\n', (8348, 8354), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((8453, 8465), 'thewalrus.brs', 'brs', (['U_dn', 'E'], {}), '(U_dn, E)\n', (8456, 8465), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((8478, 8497), 'numpy.allclose', 'np.allclose', (['b1', 'b2'], {}), '(b1, b2)\n', (8489, 8497), True, 'import numpy as np\n'), ((8633, 8654), 'numpy.ones', 'np.ones', (['M'], {'dtype': 'int'}), '(M, dtype=int)\n', (8640, 8654), True, 'import numpy as np\n'), ((8698, 8719), 'numpy.ones', 'np.ones', (['M'], {'dtype': 'int'}), '(M, dtype=int)\n', (8705, 8719), True, 'import numpy as np\n'), ((8770, 8789), 'numpy.random.random', 'np.random.random', (['M'], {}), '(M)\n', (8786, 8789), True, 'import numpy as np\n'), ((8805, 8824), 'numpy.random.random', 'np.random.random', (['M'], {}), '(M)\n', (8821, 8824), True, 'import numpy as np\n'), ((8833, 8853), 'scipy.stats.unitary_group.rvs', 'unitary_group.rvs', (['M'], {}), '(M)\n', (8850, 8853), False, 'from scipy.stats import unitary_group\n'), ((8913, 8941), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (8932, 8941), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((8951, 8969), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (8960, 8969), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((8982, 9000), 'numpy.isclose', 'np.isclose', (['p1', 'p2'], {}), '(p1, p2)\n', (8992, 9000), True, 'import numpy as np\n'), ((9245, 9279), 'numpy.bincount', 'np.bincount', (['in_modes'], {'minlength': 'M'}), '(in_modes, minlength=M)\n', (9256, 9279), True, 'import numpy as np\n'), ((9295, 9314), 'numpy.random.random', 'np.random.random', (['M'], {}), '(M)\n', (9311, 9314), True, 'import numpy as np\n'), ((9330, 9349), 'numpy.random.random', 'np.random.random', (['M'], {}), '(M)\n', (9346, 9349), True, 'import numpy as np\n'), ((9358, 9378), 'scipy.stats.unitary_group.rvs', 'unitary_group.rvs', (['M'], {}), '(M)\n', (9375, 9378), False, 'from scipy.stats import unitary_group\n'), ((9468, 9493), 'itertools.product', 'product', (['[0, 1]'], {'repeat': 'M'}), '([0, 1], repeat=M)\n', (9475, 9493), False, 'from itertools import chain, product\n'), ((9579, 9601), 'numpy.isclose', 'np.isclose', (['p_total', '(1)'], {}), '(p_total, 1)\n', (9589, 9601), True, 'import numpy as np\n'), ((1454, 1470), 'numpy.zeros', 'np.zeros', (['[2, 3]'], {}), '([2, 3])\n', (1462, 1470), True, 'import numpy as np\n'), ((1620, 1651), 'numpy.array', 'np.array', (['[[2, 1], [1, np.nan]]'], {}), '([[2, 1], [1, np.nan]])\n', (1628, 1651), True, 'import numpy as np\n'), ((1793, 1809), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (1801, 1809), True, 'import numpy as np\n'), ((1822, 1845), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (1826, 1845), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((1909, 1931), 'thewalrus.perm', 'perm', (['A'], {'method': '"""bbfg"""'}), "(A, method='bbfg')\n", (1913, 1931), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((2090, 2113), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (2094, 2113), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((2183, 2205), 'thewalrus.perm', 'perm', (['A'], {'method': '"""bbfg"""'}), "(A, method='bbfg')\n", (2187, 2205), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((2350, 2373), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (2354, 2373), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((2473, 2495), 'thewalrus.perm', 'perm', (['A'], {'method': '"""bbfg"""'}), "(A, method='bbfg')\n", (2477, 2495), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((2640, 2663), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (2644, 2663), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((2987, 3009), 'thewalrus.perm', 'perm', (['A'], {'method': '"""bbfg"""'}), "(A, method='bbfg')\n", (2991, 3009), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3291, 3314), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (3295, 3314), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3362, 3386), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (3373, 3386), True, 'import numpy as np\n'), ((3429, 3461), 'numpy.array', 'np.array', (['A'], {'dtype': 'np.complex128'}), '(A, dtype=np.complex128)\n', (3437, 3461), True, 'import numpy as np\n'), ((3474, 3497), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (3478, 3497), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3562, 3586), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (3573, 3586), True, 'import numpy as np\n'), ((3629, 3651), 'thewalrus.perm', 'perm', (['A'], {'method': '"""bbfg"""'}), "(A, method='bbfg')\n", (3633, 3651), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3704, 3728), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (3715, 3728), True, 'import numpy as np\n'), ((3771, 3803), 'numpy.array', 'np.array', (['A'], {'dtype': 'np.complex128'}), '(A, dtype=np.complex128)\n', (3779, 3803), True, 'import numpy as np\n'), ((3816, 3838), 'thewalrus.perm', 'perm', (['A'], {'method': '"""bbfg"""'}), "(A, method='bbfg')\n", (3820, 3838), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3908, 3932), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (3919, 3932), True, 'import numpy as np\n'), ((4171, 4194), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (4175, 4194), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((4245, 4269), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (4256, 4269), True, 'import numpy as np\n'), ((4312, 4335), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (4316, 4335), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((4391, 4415), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (4402, 4415), True, 'import numpy as np\n'), ((4708, 4731), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (4712, 4731), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((4784, 4808), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (4795, 4808), True, 'import numpy as np\n'), ((4866, 4889), 'thewalrus.perm', 'perm', (['A'], {'method': '"""ryser"""'}), "(A, method='ryser')\n", (4870, 4889), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((4947, 4971), 'numpy.allclose', 'np.allclose', (['p', 'expected'], {}), '(p, expected)\n', (4958, 4971), True, 'import numpy as np\n'), ((5140, 5166), 'numpy.array', 'np.array', (['[[2, 1], [1, 3]]'], {}), '([[2, 1], [1, 3]])\n', (5148, 5166), True, 'import numpy as np\n'), ((5202, 5228), 'thewalrus.permanent_repeated', 'permanent_repeated', (['A', 'rpt'], {}), '(A, rpt)\n', (5220, 5228), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((5370, 5400), 'thewalrus.permanent_repeated', 'permanent_repeated', (['A', '([1] * 2)'], {}), '(A, [1] * 2)\n', (5388, 5400), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((5416, 5469), 'numpy.allclose', 'np.allclose', (['p', '(A[0, 0] * A[1, 1] + A[1, 0] * A[0, 1])'], {}), '(p, A[0, 0] * A[1, 1] + A[1, 0] * A[0, 1])\n', (5427, 5469), True, 'import numpy as np\n'), ((5585, 5615), 'thewalrus.permanent_repeated', 'permanent_repeated', (['A', '([1] * 3)'], {}), '(A, [1] * 3)\n', (5603, 5615), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((5907, 5926), 'numpy.allclose', 'np.allclose', (['p', 'exp'], {}), '(p, exp)\n', (5918, 5926), True, 'import numpy as np\n'), ((6075, 6090), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (6083, 6090), True, 'import numpy as np\n'), ((6103, 6129), 'thewalrus.permanent_repeated', 'permanent_repeated', (['A', '[n]'], {}), '(A, [n])\n', (6121, 6129), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((6218, 6245), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (6226, 6245), True, 'import numpy as np\n'), ((6248, 6258), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6255, 6258), True, 'import numpy as np\n'), ((6313, 6341), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (6332, 6341), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6343, 6361), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (6352, 6361), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6417, 6445), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'U'], {}), '(n, d, U)\n', (6436, 6445), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6447, 6465), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', 'm', 'U'], {}), '(n, m, U)\n', (6456, 6465), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6587, 6603), 'numpy.sqrt', 'np.sqrt', (['(eta / 2)'], {}), '(eta / 2)\n', (6594, 6603), True, 'import numpy as np\n'), ((6606, 6633), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (6614, 6633), True, 'import numpy as np\n'), ((6688, 6706), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (6697, 6706), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6708, 6736), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (6727, 6736), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6831, 6841), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (6838, 6841), True, 'import numpy as np\n'), ((6927, 6953), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', '[1, 2, 0]', 'U'], {}), '(n, [1, 2, 0], U)\n', (6936, 6953), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((6956, 6982), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', '[2, 1, 0]', 'U'], {}), '(n, [2, 1, 0], U)\n', (6965, 6982), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7496, 7522), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', '[0, 2, 1]', 'U'], {}), '(n, [0, 2, 1], U)\n', (7505, 7522), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7525, 7551), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', '[0, 1, 2]', 'U'], {}), '(n, [0, 1, 2], U)\n', (7534, 7551), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((7757, 7767), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (7764, 7767), True, 'import numpy as np\n'), ((8108, 8118), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (8115, 8118), True, 'import numpy as np\n'), ((8270, 8285), 'numpy.where', 'np.where', (['(d > 0)'], {}), '(d > 0)\n', (8278, 8285), True, 'import numpy as np\n'), ((8303, 8332), 'numpy.ix_', 'np.ix_', (['click_modes', 'in_modes'], {}), '(click_modes, in_modes)\n', (8309, 8332), True, 'import numpy as np\n'), ((8661, 8684), 'numpy.random.randint', 'np.random.randint', (['(0)', 'M'], {}), '(0, M)\n', (8678, 8684), True, 'import numpy as np\n'), ((8726, 8749), 'numpy.random.randint', 'np.random.randint', (['(0)', 'M'], {}), '(0, M)\n', (8743, 8749), True, 'import numpy as np\n'), ((8885, 8902), 'numpy.diag', 'np.diag', (['loss_out'], {}), '(loss_out)\n', (8892, 8902), True, 'import numpy as np\n'), ((9220, 9232), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (9229, 9232), True, 'import numpy as np\n'), ((9410, 9427), 'numpy.diag', 'np.diag', (['loss_out'], {}), '(loss_out)\n', (9417, 9427), True, 'import numpy as np\n'), ((9507, 9545), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'det_pattern', 'T'], {}), '(n, det_pattern, T)\n', (9526, 9545), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((9692, 9717), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9705, 9717), False, 'import pytest\n'), ((9753, 9768), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (9760, 9768), True, 'import numpy as np\n'), ((9796, 9824), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (9815, 9824), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((9835, 9860), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9848, 9860), False, 'import pytest\n'), ((9915, 9930), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (9922, 9930), True, 'import numpy as np\n'), ((9939, 9967), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (9958, 9967), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((9978, 10003), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9991, 10003), False, 'import pytest\n'), ((10058, 10073), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (10065, 10073), True, 'import numpy as np\n'), ((10082, 10110), 'thewalrus._permanent.fock_threshold_prob', 'fock_threshold_prob', (['n', 'd', 'T'], {}), '(n, d, T)\n', (10101, 10110), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((10199, 10224), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10212, 10224), False, 'import pytest\n'), ((10289, 10298), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (10295, 10298), True, 'import numpy as np\n'), ((10310, 10328), 'thewalrus._permanent.fock_prob', 'fock_prob', (['n', 'm', 'U'], {}), '(n, m, U)\n', (10319, 10328), False, 'from thewalrus._permanent import fock_prob, fock_threshold_prob\n'), ((1304, 1328), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1317, 1328), False, 'import pytest\n'), ((1342, 1349), 'thewalrus.perm', 'perm', (['(1)'], {}), '(1)\n', (1346, 1349), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((1484, 1509), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1497, 1509), False, 'import pytest\n'), ((1523, 1530), 'thewalrus.perm', 'perm', (['A'], {}), '(A)\n', (1527, 1530), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((1665, 1690), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1678, 1690), False, 'import pytest\n'), ((1704, 1711), 'thewalrus.perm', 'perm', (['A'], {}), '(A)\n', (1708, 1711), False, 'from thewalrus import perm, permanent_repeated, brs, ubrs\n'), ((3527, 3545), 'numpy.float64', 'np.float64', (['A.real'], {}), '(A.real)\n', (3537, 3545), True, 'import numpy as np\n'), ((3873, 3891), 'numpy.float64', 'np.float64', (['A.real'], {}), '(A.real)\n', (3883, 3891), True, 'import numpy as np\n'), ((6160, 6166), 'scipy.special.factorial', 'fac', (['n'], {}), '(n)\n', (6163, 6166), True, 'from scipy.special import factorial as fac\n'), ((6818, 6827), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6824, 6827), True, 'import numpy as np\n'), ((7718, 7730), 'numpy.sqrt', 'np.sqrt', (['eta'], {}), '(eta)\n', (7725, 7730), True, 'import numpy as np\n'), ((8095, 8104), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8101, 8104), True, 'import numpy as np\n'), ((8862, 8878), 'numpy.diag', 'np.diag', (['loss_in'], {}), '(loss_in)\n', (8869, 8878), True, 'import numpy as np\n'), ((9387, 9403), 'numpy.diag', 'np.diag', (['loss_in'], {}), '(loss_in)\n', (9394, 9403), True, 'import numpy as np\n'), ((7744, 7753), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7750, 7753), True, 'import numpy as np\n'), ((8370, 8388), 'numpy.eye', 'np.eye', (['U.shape[1]'], {}), '(U.shape[1])\n', (8376, 8388), True, 'import numpy as np\n')] |
# @Time : 2019/5/21 19:24
# @Author : shakespere
# @FileName: baseline3.py
import sys, os, re, csv, codecs, numpy as np, pandas as pd
# =================Keras==============
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation
from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization,CuDNNGRU
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers, backend
# =================nltk===============
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
# from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn import svm,metrics
import tqdm
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
path = './data/'
BATCH_SIZE = 512
EMBEDDING_FILE = f'{path}glove.6B.50d.txt'
EMBEDDING_FILE1 = f'{path}glove.840B.300d.txt'
EMBEDDING_FILE2 = f'{path}crawl-300d-2M.vec'
TRAIN_DATA_FILE = f'{path}train.csv'
TEST_DATA_FILE = f'{path}20190529_test.csv'
embed_size = 300 # how big is each word vector
max_features = 20000 # how many unique words to use (i.e num rows in embedding vector)
maxlen = 100 # max number of words in a comment to use
number_filters = 100 # the number of CNN filters
train = pd.read_csv(TRAIN_DATA_FILE,lineterminator='\n')
test = pd.read_csv(TEST_DATA_FILE,lineterminator='\n')
#
# train_text = pd.read_csv(TRAIN_DATA_FILE,index_col='ID',lineterminator='\n')
# test_text = pd.read_csv(TEST_DATA_FILE,index_col='ID',lineterminator='\n')
list_sentences_train = train["review"].fillna("_na_").values
y = train['label'].map({'Negative':0, 'Positive': 1})
list_sentences_test = test["review"].fillna("_na_").values
print(y[:10])
special_character_removal = re.compile(r'[^a-z\d ]', re.IGNORECASE)
replace_numbers = re.compile(r'\d+', re.IGNORECASE)
# preprocess
#
# import re
#
# import random
#
#
# def set_seed(seed=0):
# random.seed(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
# np.random.seed(seed)
#
#
# set_seed(2411)
# SEED = 42
# import psutil
# from multiprocessing import Pool
# import multiprocessing
#
# num_partitions = 10 # number of partitions to split dataframe
# num_cores = psutil.cpu_count() # number of cores on your machine
#
# print('number of cores:', num_cores)
#
#
# def df_parallelize_run(df, func):
# df_split = np.array_split(df, num_partitions)
# pool = Pool(num_cores)
# df = pd.concat(pool.map(func, df_split))
# pool.close()
# pool.join()
#
# return df
# # remove space
# spaces = ['\u200b', '\u200e', '\u202a', '\u202c', '\ufeff', '\uf0d8', '\u2061', '\x10', '\x7f', '\x9d', '\xad', '\xa0']
#
#
# def remove_space(text):
# """
# remove extra spaces and ending space if any
# """
# for space in spaces:
# text = text.replace(space, ' ')
# text = text.strip()
# text = re.sub('\s+', ' ', text)
# return text
#
#
# # replace strange punctuations and raplace diacritics
# from unicodedata import category, name, normalize
#
#
# def remove_diacritics(s):
# return ''.join(
# c for c in normalize('NFKD', s.replace('ø', 'o').replace('Ø', 'O').replace('⁻', '-').replace('₋', '-'))
# if category(c) != 'Mn')
#
#
# special_punc_mappings = {"—": "-", "–": "-", "_": "-", '”': '"', "″": '"', '“': '"', '•': '.', '−': '-',
# "’": "'", "‘": "'", "´": "'", "`": "'", '\u200b': ' ', '\xa0': ' ', '،': '', '„': '',
# '…': ' ... ', '\ufeff': ''}
#
#
# def clean_special_punctuations(text):
# for punc in special_punc_mappings:
# if punc in text:
# text = text.replace(punc, special_punc_mappings[punc])
# text = remove_diacritics(text)
# return text
#
#
# # clean numbers
# def clean_number(text):
# text = re.sub(r'(\d+)([a-zA-Z])', '\g<1> \g<2>', text) # digits followed by a single alphabet...
# text = re.sub(r'(\d+) (th|st|nd|rd) ', '\g<1>\g<2> ', text) # 1st, 2nd, 3rd, 4th...
# text = re.sub(r'(\d+),(\d+)', '\g<1>\g<2>', text)
# return text
#
#
# import string
#
# regular_punct = list(string.punctuation)
# extra_punct = [
# ',', '.', '"', ':', ')', '(', '!', '?', '|', ';', "'", '$', '&',
# '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
# '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›',
# '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”',
# '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾',
# '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼',
# '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲',
# 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»',
# ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø',
# '¹', '≤', '‡', '√', '«', '»', '´', 'º', '¾', '¡', '§', '£', '₤',
# ':)', ': )', ':-)', '(:', '( :', '(-:', ':\')',
# ':D', ': D', ':-D', 'xD', 'x-D', 'XD', 'X-D',
# '<3', ':*',
# ';-)', ';)', ';-D', ';D', '(;', '(-;',
# ':-(', ': (', ':(', '\'):', ')-:',
# '-- :', '(', ':\'(', ':"(\'', ]
#
#
# def handle_emojis(text):
# # Smile -- :), : ), :-), (:, ( :, (-:, :')
# text = re.sub(r'(:\s?\)|:-\)|\(\s?:|\(-:|:\'\))', ' EMO_POS ', text)
# # Laugh -- :D, : D, :-D, xD, x-D, XD, X-D
# text = re.sub(r'(:\s?D|:-D|x-?D|X-?D)', ' EMO_POS ', text)
# # Love -- <3, :*
# text = re.sub(r'(<3|:\*)', ' EMO_POS ', text)
# # Wink -- ;-), ;), ;-D, ;D, (;, (-;
# text = re.sub(r'(;-?\)|;-?D|\(-?;)', ' EMO_POS ', text)
# # Sad -- :-(, : (, :(, ):, )-:
# text = re.sub(r'(:\s?\(|:-\(|\)\s?:|\)-:)', ' EMO_NEG ', text)
# # Cry -- :,(, :'(, :"(
# text = re.sub(r'(:,\(|:\'\(|:"\()', ' EMO_NEG ', text)
# return text
#
#
# def stop(text):
# from nltk.corpus import stopwords
#
# text = " ".join([w.lower() for w in text.split()])
# stop_words = stopwords.words('english')
#
# words = [w for w in text.split() if not w in stop_words]
# return " ".join(words)
#
#
# all_punct = list(set(regular_punct + extra_punct))
# # do not spacing - and .
# all_punct.remove('-')
# all_punct.remove('.')
#
#
# # clean repeated letters
# def clean_repeat_words(text):
# text = re.sub(r"(I|i)(I|i)+ng", "ing", text)
# text = re.sub(r"(L|l)(L|l)(L|l)+y", "lly", text)
# text = re.sub(r"(A|a)(A|a)(A|a)+", "a", text)
# text = re.sub(r"(C|c)(C|c)(C|c)+", "cc", text)
# text = re.sub(r"(D|d)(D|d)(D|d)+", "dd", text)
# text = re.sub(r"(E|e)(E|e)(E|e)+", "ee", text)
# text = re.sub(r"(F|f)(F|f)(F|f)+", "ff", text)
# text = re.sub(r"(G|g)(G|g)(G|g)+", "gg", text)
# text = re.sub(r"(I|i)(I|i)(I|i)+", "i", text)
# text = re.sub(r"(K|k)(K|k)(K|k)+", "k", text)
# text = re.sub(r"(L|l)(L|l)(L|l)+", "ll", text)
# text = re.sub(r"(M|m)(M|m)(M|m)+", "mm", text)
# text = re.sub(r"(N|n)(N|n)(N|n)+", "nn", text)
# text = re.sub(r"(O|o)(O|o)(O|o)+", "oo", text)
# text = re.sub(r"(P|p)(P|p)(P|p)+", "pp", text)
# text = re.sub(r"(Q|q)(Q|q)+", "q", text)
# text = re.sub(r"(R|r)(R|r)(R|r)+", "rr", text)
# text = re.sub(r"(S|s)(S|s)(S|s)+", "ss", text)
# text = re.sub(r"(T|t)(T|t)(T|t)+", "tt", text)
# text = re.sub(r"(V|v)(V|v)+", "v", text)
# text = re.sub(r"(Y|y)(Y|y)(Y|y)+", "y", text)
# text = re.sub(r"plzz+", "please", text)
# text = re.sub(r"(Z|z)(Z|z)(Z|z)+", "zz", text)
# text = re.sub(r"(-+|\.+)", " ", text) # new haha #this adds a space token so we need to remove xtra spaces
# return text
#
#
# def spacing_punctuation(text):
# """
# add space before and after punctuation and symbols
# """
# for punc in all_punct:
# if punc in text:
# text = text.replace(punc, f' {punc} ')
# return text
#
#
# def preprocess(text):
# """
# preprocess text main steps
# """
# text = remove_space(text)
# text = clean_special_punctuations(text)
# text = handle_emojis(text)
# text = clean_number(text)
# text = spacing_punctuation(text)
# text = clean_repeat_words(text)
# text = remove_space(text)
# # text = stop(text)# if changing this, then chnage the dims
# # (not to be done yet as its effecting the embeddings..,we might be
# # loosing words)...
# return text
#
#
# mispell_dict = {'😉': 'wink', '😂': 'joy', '😀': 'stuck out tongue', 'theguardian': 'the guardian',
# 'deplorables': 'deplorable', 'theglobeandmail': 'the globe and mail', 'justiciaries': 'justiciary',
# 'creditdation': 'Accreditation', 'doctrne': 'doctrine', 'fentayal': 'fentanyl',
# 'designation-': 'designation', 'CONartist': 'con-artist', 'Mutilitated': 'Mutilated',
# 'Obumblers': 'bumblers', 'negotiatiations': 'negotiations', 'dood-': 'dood', 'irakis': 'iraki',
# 'cooerate': 'cooperate', 'COx': 'cox', 'racistcomments': 'racist comments',
# 'envirnmetalists': 'environmentalists', }
# contraction_mapping = {"ain't": "is not", "aren't": "are not", "can't": "cannot", "'cause": "because",
# "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not",
# "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
# "he'd": "he would", "he'll": "he will", "he's": "he is", "how'd": "how did",
# "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would",
# "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am",
# "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will",
# "i'll've": "i will have", "i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
# "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is",
# "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have",
# "mightn't": "might not", "mightn't've": "might not have", "must've": "must have",
# "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not",
# "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not",
# "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not",
# "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have",
# "she'll": "she will", "she'll've": "she will have", "she's": "she is",
# "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have",
# "so've": "so have", "so's": "so as", "this's": "this is", "that'd": "that would",
# "that'd've": "that would have", "that's": "that is", "there'd": "there would",
# "there'd've": "there would have", "there's": "there is", "here's": "here is",
# "they'd": "they would", "they'd've": "they would have", "they'll": "they will",
# "they'll've": "they will have", "they're": "they are", "they've": "they have",
# "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have",
# "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have",
# "weren't": "were not", "what'll": "what will", "what'll've": "what will have",
# "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is",
# "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have",
# "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
# "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not",
# "won't've": "will not have", "would've": "would have", "wouldn't": "would not",
# "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would",
# "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have",
# "you'd": "you would", "you'd've": "you would have", "you'll": "you will",
# "you'll've": "you will have", "you're": "you are", "you've": "you have"}
#
#
# def correct_spelling(x, dic):
# for word in dic.keys():
# x = x.replace(word, dic[word])
# return x
#
#
# def correct_contraction(x, dic):
# for word in dic.keys():
# x = x.replace(word, dic[word])
# return x
#
#
# from tqdm import tqdm
#
# tqdm.pandas()
#
#
# def text_clean_wrapper(df):
# print(df)
# df["review"] = df["review"].transform(preprocess)
# df['review'] = df['review'].transform(lambda x: correct_spelling(x, mispell_dict))
# df['review'] = df['review'].transform(lambda x: correct_contraction(x, contraction_mapping))
# return df
#
# train = df_parallelize_run(train_text, text_clean_wrapper)
# test = df_parallelize_run(test_text, text_clean_wrapper)
# preprocess
def text_to_wordlist(text, remove_stopwords=True, stem_words=True):
# Remove Special Characters
text = special_character_removal.sub('', text)
# Replace Numbers
text = replace_numbers.sub('n', text)
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally, remove stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return (text)
comments = []
for text in list_sentences_train:
comments.append(text_to_wordlist(text))
test_comments = []
for text in list_sentences_test:
test_comments.append(text_to_wordlist(text))
tokenizer = Tokenizer(num_words=max_features, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n\'', lower=True)
# tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train['review']) + list(test['review']))
comments_sequence = tokenizer.texts_to_sequences(list(train['review']))
test_comments_sequence = tokenizer.texts_to_sequences(list(test['review']))
word_index = tokenizer.word_index
X_t = pad_sequences(comments_sequence, maxlen=maxlen)
X_te = pad_sequences(test_comments_sequence, maxlen=maxlen)
# X_t = X_t.reshape((X_t.shape[0], 1, X_t.shape[1]))
# X_te = X_te.reshape((X_te.shape[0], 1, X_te.shape[1]))
def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32')
# embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))
def load_embeddings(embed_dir=EMBEDDING_FILE):
embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(embed_dir))
return embeddings_index
def build_embedding_matrix(word_index,embeddings_index,max_features,lower=True,verbose=True):
embedding_matrix = np.zeros((max_features, 300))
for word, i in tqdm(word_index.items(), disable=not verbose):
if lower:
word = word.lower()
if i >= max_features: continue
try:
embedding_vector = embeddings_index[word]
except:
embedding_vector = embeddings_index["unknown"]
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
def build_matrix(word_index, embeddings_index):
embedding_matrix = np.zeros((len(word_index) + 1,embed_size))
for word, i in word_index.items():
try:
embedding_matrix[i] = embeddings_index[word]
except:
embedding_matrix[i] = embeddings_index["unknown"]
return embedding_matrix
# all_embs = np.stack(embeddings_index.values())
# emb_mean, emb_std = all_embs.mean(), all_embs.std()
#
#
# word_index = tokenizer.word_index
# nb_words = min(max_features, len(word_index))
# # embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
#
# embedding_matrix = l
# for word, i in word_index.items():
# if i >= max_features: continue
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None: embedding_matrix[i] = embedding_vector
embedding_index = load_embeddings(embed_dir=EMBEDDING_FILE1)
embedding_matrix = build_matrix(word_index=word_index,embeddings_index=embedding_index)
def model_text_cnn():
inp = Input(shape=(1, maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x1 = Conv2D(number_filters, (3, embed_size), data_format='channels_first')(x)
x1 = BatchNormalization()(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling2D((int(int(x1.shape[2]) / 1.5), 1), data_format='channels_first')(x1)
x1 = Flatten()(x1)
x2 = Conv2D(number_filters, (4, embed_size), data_format='channels_first')(x)
x2 = BatchNormalization()(x2)
x2 = Activation('elu')(x2)
x2 = MaxPooling2D((int(int(x2.shape[2]) / 1.5), 1), data_format='channels_first')(x2)
x2 = Flatten()(x2)
x3 = Conv2D(number_filters, (5, embed_size), data_format='channels_first')(x)
x3 = BatchNormalization()(x3)
x3 = Activation('relu')(x3)
x3 = MaxPooling2D((int(int(x3.shape[2]) / 1.5), 1), data_format='channels_first')(x3)
x3 = Flatten()(x3)
x4 = Conv2D(number_filters, (6, embed_size), data_format='channels_first')(x)
x4 = BatchNormalization()(x4)
x4 = Activation('elu')(x4)
x4 = MaxPooling2D((int(int(x4.shape[2]) / 1.5), 1), data_format='channels_first')(x4)
x4 = Flatten()(x4)
x5 = Conv2D(number_filters, (7, embed_size), data_format='channels_first')(x)
x5 = BatchNormalization()(x5)
x5 = Activation('relu')(x5)
x5 = MaxPooling2D((int(int(x5.shape[2]) / 1.5), 1), data_format='channels_first')(x5)
x5 = Flatten()(x5)
x = concatenate([x1, x2, x3, x4, x5])
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers,regularizers,constraints,optimizers,layers
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
def model_gru_attn():
inp = Input(shape=(maxlen,))
x = Embedding(len(word_index) + 1, embed_size, weights=[embedding_matrix],trainable=False)(inp)
# x = Bidirectional(CuDNNGRU(128,return_sequences=True))(x)
# x = Bidirectional(CuDNNGRU(100,return_sequences=True))(x)
x = Bidirectional(CuDNNGRU(64,return_sequences=True))(x)
x = Attention(maxlen)(x)
x = Dense(1,activation='sigmoid')(x)
model = Model(inputs=inp,outputs=x)
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
return model
# folds = StratifiedKFold(n_splits=10, shuffle=False, random_state=2019)
predictions = np.zeros(X_te.shape[0])
aucs = []
oof_preds = np.zeros(X_t.shape[0])
# for fold_, (train_index, test_index) in enumerate(folds.split(X_t, y)):
# print("Fold :{}".format(fold_ + 1))
# cv_train_data, cv_train_label= X_t[train_index], y[train_index]
# cv_test_data, cv_test_label = X_t[test_index], y[test_index]
#
# model.fit(cv_train_data, cv_train_label)
# auc = metrics.roc_auc_score(cv_test_label, model.predict([cv_test_data], batch_size=1024, verbose=1))
# preds = model.predict([X_te], batch_size=1024, verbose=1) / folds.n_splits
# print(preds[:10])
# print(predictions[:10])
#
# aucs.append(auc)
# print("auc score: %.5f" % auc)
n_splits = 5
splits = list(KFold(n_splits=n_splits,random_state=2019).split(X_t,y))
# skf = StratifiedKFold(y, n_folds=n_splits, shuffle=True,random_state=2019)
for fold_ in range(n_splits):
train_index,test_index = splits[fold_]
# for fold_, (train_index, test_index) in enumerate(skf):
print("Fold :{}".format(fold_ + 1))
backend.clear_session()
cv_train_data, cv_train_label= X_t[train_index], y[train_index]
cv_test_data, cv_test_label = X_t[test_index], y[test_index]
model = model_gru_attn()
model.fit(cv_train_data, cv_train_label>0.5,batch_size=BATCH_SIZE,epochs=30,validation_data=(cv_test_data,cv_test_label>0.5))
oof_preds[test_index] += model.predict(cv_test_data)[:,0]
predictions += model.predict(X_te)[:,0]
# auc = metrics.roc_auc_score(cv_test_label, model.predict([cv_test_data], batch_size=1024, verbose=1)[:,0])
# preds = model.predict([X_te], batch_size=1024, verbose=1) / n_splits
# print(preds[:10])
# print(predictions[:10])
#
# aucs.append(auc)
# print("auc score: %.5f" % auc)
predictions /= n_splits
auc = metrics.roc_auc_score(y,oof_preds)
print('Mean auc', auc)
predictions = pd.DataFrame(predictions)
id = pd.DataFrame(np.arange(1, len(predictions) + 1))
data = pd.concat([id, predictions], axis=1)
data.to_csv('./data/{}_predictions.csv'.format(auc), header=['ID', 'Pred'], index=False)
# model.fit(X_t, y, batch_size=1280, epochs=3)
#
# y_test = model.predict([X_te], batch_size=1024, verbose=1)
# sample_submission = pd.read_csv(f'{path}{comp}sample_submission.csv')
# sample_submission[list_classes] = y_test
# sample_submission.to_csv('submission_textcnn.csv', index=False) | [
"keras.layers.Conv2D",
"keras.backend.sum",
"pandas.read_csv",
"re.compile",
"keras.backend.reshape",
"keras.layers.CuDNNGRU",
"keras.backend.floatx",
"sklearn.metrics.roc_auc_score",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.model_... | [((1436, 1485), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_DATA_FILE'], {'lineterminator': '"""\n"""'}), "(TRAIN_DATA_FILE, lineterminator='\\n')\n", (1447, 1485), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((1493, 1541), 'pandas.read_csv', 'pd.read_csv', (['TEST_DATA_FILE'], {'lineterminator': '"""\n"""'}), "(TEST_DATA_FILE, lineterminator='\\n')\n", (1504, 1541), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((1927, 1966), 're.compile', 're.compile', (['"""[^a-z\\\\d ]"""', 're.IGNORECASE'], {}), "('[^a-z\\\\d ]', re.IGNORECASE)\n", (1937, 1966), False, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((1986, 2019), 're.compile', 're.compile', (['"""\\\\d+"""', 're.IGNORECASE'], {}), "('\\\\d+', re.IGNORECASE)\n", (1996, 2019), False, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((15012, 15113), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'max_features', 'filters': '"""!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\t\n\'"""', 'lower': '(True)'}), '(num_words=max_features, filters=\n """!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\t\n\'""", lower=True)\n', (15021, 15113), False, 'from keras.preprocessing.text import Tokenizer\n'), ((15419, 15466), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['comments_sequence'], {'maxlen': 'maxlen'}), '(comments_sequence, maxlen=maxlen)\n', (15432, 15466), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((15475, 15527), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_comments_sequence'], {'maxlen': 'maxlen'}), '(test_comments_sequence, maxlen=maxlen)\n', (15488, 15527), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((22502, 22525), 'numpy.zeros', 'np.zeros', (['X_te.shape[0]'], {}), '(X_te.shape[0])\n', (22510, 22525), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((22550, 22572), 'numpy.zeros', 'np.zeros', (['X_t.shape[0]'], {}), '(X_t.shape[0])\n', (22558, 22572), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((24322, 24357), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y', 'oof_preds'], {}), '(y, oof_preds)\n', (24343, 24357), False, 'from sklearn import svm, metrics\n'), ((24396, 24421), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {}), '(predictions)\n', (24408, 24421), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((24485, 24521), 'pandas.concat', 'pd.concat', (['[id, predictions]'], {'axis': '(1)'}), '([id, predictions], axis=1)\n', (24494, 24521), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((16093, 16122), 'numpy.zeros', 'np.zeros', (['(max_features, 300)'], {}), '((max_features, 300))\n', (16101, 16122), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((17660, 17684), 'keras.layers.Input', 'Input', ([], {'shape': '(1, maxlen)'}), '(shape=(1, maxlen))\n', (17665, 17684), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((19113, 19146), 'keras.layers.concatenate', 'concatenate', (['[x1, x2, x3, x4, x5]'], {}), '([x1, x2, x3, x4, x5])\n', (19124, 19146), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((19205, 19233), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'x'}), '(inputs=inp, outputs=x)\n', (19210, 19233), False, 'from keras.models import Model\n'), ((21881, 21903), 'keras.layers.Input', 'Input', ([], {'shape': '(maxlen,)'}), '(shape=(maxlen,))\n', (21886, 21903), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((22282, 22310), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'x'}), '(inputs=inp, outputs=x)\n', (22287, 22310), False, 'from keras.models import Model\n'), ((23541, 23564), 'keras.backend.clear_session', 'backend.clear_session', ([], {}), '()\n', (23562, 23564), False, 'from keras import initializers, regularizers, constraints, optimizers, layers, backend\n'), ((14611, 14637), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (14626, 14637), False, 'from nltk.stem import SnowballStemmer\n'), ((15687, 15719), 'numpy.asarray', 'np.asarray', (['arr'], {'dtype': '"""float32"""'}), "(arr, dtype='float32')\n", (15697, 15719), True, 'import sys, os, re, csv, codecs, numpy as np, pandas as pd\n'), ((17695, 17758), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embed_size'], {'weights': '[embedding_matrix]'}), '(max_features, embed_size, weights=[embedding_matrix])\n', (17704, 17758), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((17774, 17843), 'keras.layers.Conv2D', 'Conv2D', (['number_filters', '(3, embed_size)'], {'data_format': '"""channels_first"""'}), "(number_filters, (3, embed_size), data_format='channels_first')\n", (17780, 17843), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((17857, 17877), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (17875, 17877), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((17892, 17910), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17902, 17910), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18016, 18025), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (18023, 18025), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18042, 18111), 'keras.layers.Conv2D', 'Conv2D', (['number_filters', '(4, embed_size)'], {'data_format': '"""channels_first"""'}), "(number_filters, (4, embed_size), data_format='channels_first')\n", (18048, 18111), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18125, 18145), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18143, 18145), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18160, 18177), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (18170, 18177), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18283, 18292), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (18290, 18292), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18309, 18378), 'keras.layers.Conv2D', 'Conv2D', (['number_filters', '(5, embed_size)'], {'data_format': '"""channels_first"""'}), "(number_filters, (5, embed_size), data_format='channels_first')\n", (18315, 18378), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18392, 18412), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18410, 18412), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18427, 18445), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18437, 18445), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18551, 18560), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (18558, 18560), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18577, 18646), 'keras.layers.Conv2D', 'Conv2D', (['number_filters', '(6, embed_size)'], {'data_format': '"""channels_first"""'}), "(number_filters, (6, embed_size), data_format='channels_first')\n", (18583, 18646), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18660, 18680), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18678, 18680), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18695, 18712), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (18705, 18712), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18818, 18827), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (18825, 18827), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18844, 18913), 'keras.layers.Conv2D', 'Conv2D', (['number_filters', '(7, embed_size)'], {'data_format': '"""channels_first"""'}), "(number_filters, (7, embed_size), data_format='channels_first')\n", (18850, 18913), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((18927, 18947), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18945, 18947), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((18962, 18980), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18972, 18980), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((19086, 19095), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (19093, 19095), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((19158, 19188), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (19163, 19188), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((19761, 19795), 'keras.initializers.get', 'initializers.get', (['"""glorot_uniform"""'], {}), "('glorot_uniform')\n", (19777, 19795), False, 'from keras import initializers, regularizers, constraints, optimizers, layers\n'), ((19828, 19859), 'keras.regularizers.get', 'regularizers.get', (['W_regularizer'], {}), '(W_regularizer)\n', (19844, 19859), False, 'from keras import initializers, regularizers, constraints, optimizers, layers\n'), ((19890, 19921), 'keras.regularizers.get', 'regularizers.get', (['b_regularizer'], {}), '(b_regularizer)\n', (19906, 19921), False, 'from keras import initializers, regularizers, constraints, optimizers, layers\n'), ((19953, 19982), 'keras.constraints.get', 'constraints.get', (['W_constraint'], {}), '(W_constraint)\n', (19968, 19982), False, 'from keras import initializers, regularizers, constraints, optimizers, layers\n'), ((20012, 20041), 'keras.constraints.get', 'constraints.get', (['b_constraint'], {}), '(b_constraint)\n', (20027, 20041), False, 'from keras import initializers, regularizers, constraints, optimizers, layers\n'), ((21437, 21448), 'keras.backend.tanh', 'K.tanh', (['eij'], {}), '(eij)\n', (21443, 21448), True, 'from keras import backend as K\n'), ((21464, 21474), 'keras.backend.exp', 'K.exp', (['eij'], {}), '(eij)\n', (21469, 21474), True, 'from keras import backend as K\n'), ((21647, 21663), 'keras.backend.expand_dims', 'K.expand_dims', (['a'], {}), '(a)\n', (21660, 21663), True, 'from keras import backend as K\n'), ((21712, 21741), 'keras.backend.sum', 'K.sum', (['weighted_input'], {'axis': '(1)'}), '(weighted_input, axis=1)\n', (21717, 21741), True, 'from keras import backend as K\n'), ((22236, 22266), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (22241, 22266), False, 'from keras.layers import Dense, Input, Conv2D, Embedding, Dropout, Activation\n'), ((14382, 14408), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (14397, 14408), False, 'from nltk.corpus import stopwords\n'), ((21620, 21630), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (21628, 21630), True, 'from keras import backend as K\n'), ((22158, 22193), 'keras.layers.CuDNNGRU', 'CuDNNGRU', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (22166, 22193), False, 'from keras.layers import Bidirectional, MaxPooling1D, MaxPooling2D, Reshape, Flatten, concatenate, BatchNormalization, CuDNNGRU\n'), ((23224, 23267), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'random_state': '(2019)'}), '(n_splits=n_splits, random_state=2019)\n', (23229, 23267), False, 'from sklearn.model_selection import KFold\n'), ((21254, 21286), 'keras.backend.reshape', 'K.reshape', (['x', '(-1, features_dim)'], {}), '(x, (-1, features_dim))\n', (21263, 21286), True, 'from keras import backend as K\n'), ((21313, 21349), 'keras.backend.reshape', 'K.reshape', (['self.W', '(features_dim, 1)'], {}), '(self.W, (features_dim, 1))\n', (21322, 21349), True, 'from keras import backend as K\n'), ((21538, 21548), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (21546, 21548), True, 'from keras import backend as K\n'), ((21573, 21604), 'keras.backend.sum', 'K.sum', (['a'], {'axis': '(1)', 'keepdims': '(True)'}), '(a, axis=1, keepdims=True)\n', (21578, 21604), True, 'from keras import backend as K\n'), ((21607, 21618), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (21616, 21618), True, 'from keras import backend as K\n')] |
import bagel
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Sequence, Tuple, Dict, Optional
class AutoencoderLayer(tf.keras.layers.Layer):
def __init__(self, hidden_dims: Sequence[int], output_dim: int):
super().__init__()
self._hidden = tf.keras.Sequential()
for hidden_dim in hidden_dims:
self._hidden.add(
tf.keras.layers.Dense(hidden_dim, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(0.001))
)
self._mean = tf.keras.layers.Dense(output_dim, kernel_regularizer=tf.keras.regularizers.L2(0.001))
self._std = tf.keras.layers.Dense(output_dim, kernel_regularizer=tf.keras.regularizers.L2(0.001))
def call(self, inputs, **kwargs):
x = self._hidden(inputs)
mean = self._mean(x)
std = tf.math.softplus(self._std(x)) + 1e-6
return mean, std
class ConditionalVariationalAutoencoder(tf.keras.Model):
def __init__(self, encoder: AutoencoderLayer, decoder: AutoencoderLayer):
super().__init__()
self._encoder = encoder
self._decoder = decoder
def call(self, inputs, **kwargs):
x, y = tuple(inputs)
n_samples = kwargs.get('n_samples', 1)
concatted = tf.keras.layers.Concatenate()([x, y])
z_mean, z_std = self._encoder(concatted)
q_zx = tfp.distributions.Normal(z_mean, z_std)
p_z = tfp.distributions.Normal(tf.zeros(z_mean.shape), tf.ones(z_std.shape))
z = p_z.sample((n_samples,)) * tf.expand_dims(z_std, 0) + tf.expand_dims(z_mean, 0)
y = tf.broadcast_to(y, [n_samples, y.shape[0], y.shape[1]])
concatted = tf.keras.layers.Concatenate()([z, y])
x_mean, x_std = self._decoder(concatted)
p_xz = tfp.distributions.Normal(x_mean, x_std)
return q_zx, p_xz, z
def get_config(self):
return super().get_config()
class Bagel:
def __init__(self,
window_size: int = 120,
time_feature: Optional[str] = 'MHw',
hidden_dims: Sequence = (100, 100),
latent_dim: int = 8,
learning_rate: float = 1e-3,
dropout_rate: float = 0.1):
self._window_size = window_size
self._time_feature = time_feature
self._dropout_rate = dropout_rate
self._model = ConditionalVariationalAutoencoder(
encoder=AutoencoderLayer(hidden_dims, latent_dim),
decoder=AutoencoderLayer(list(reversed(hidden_dims)), self._window_size),
)
self._p_z = tfp.distributions.Normal(tf.zeros(latent_dim), tf.ones(latent_dim))
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=learning_rate,
decay_steps=10000,
decay_rate=0.75,
staircase=True
)
self._optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler, clipnorm=10.)
self._checkpoint = tf.train.Checkpoint(model=self._model, optimizer=self._optimizer)
@staticmethod
def _m_elbo(x: tf.Tensor,
z: tf.Tensor,
normal: tf.Tensor,
q_zx: tfp.distributions.Normal,
p_z: tfp.distributions.Normal,
p_xz: tfp.distributions.Normal) -> tf.Tensor:
x = tf.expand_dims(x, 0)
normal = tf.expand_dims(normal, 0)
log_p_xz = p_xz.log_prob(x)
log_q_zx = tf.math.reduce_sum(q_zx.log_prob(z), axis=-1)
log_p_z = tf.math.reduce_sum(p_z.log_prob(z), axis=-1)
ratio = (tf.math.reduce_sum(normal, axis=-1) / float(normal.shape[-1]))
return tf.math.reduce_mean(tf.math.reduce_sum(log_p_xz * normal, axis=-1) + log_p_z * ratio - log_q_zx)
def _missing_imputation(self, x: tf.Tensor, y: tf.Tensor, normal: tf.Tensor, steps: int = 10) -> tf.Tensor:
cond = tf.cast(normal, 'bool')
for _ in range(steps):
_, p_xz, _ = self._model([x, y])
reconstruction = p_xz.sample()[0]
x = tf.where(cond, x, reconstruction)
return x
@tf.function
def _train_step(self, x: tf.Tensor, y: tf.Tensor, normal: tf.Tensor) -> tf.Tensor:
with tf.GradientTape() as tape:
y = tf.keras.layers.Dropout(self._dropout_rate)(y)
q_zx, p_xz, z = self._model([x, y])
loss = -self._m_elbo(x, z, normal, q_zx, self._p_z, p_xz)
loss += tf.math.add_n(self._model.losses)
self._optimizer.minimize(loss, self._model.trainable_weights, tape=tape)
return loss
@tf.function
def _validation_step(self, x: tf.Tensor, y: tf.Tensor, normal: tf.Tensor) -> tf.Tensor:
q_zx, p_xz, z = self._model([x, y])
val_loss = -self._m_elbo(x, z, normal, q_zx, self._p_z, p_xz)
val_loss += tf.math.add_n(self._model.losses)
return val_loss
@tf.function
def _test_step(self, x: tf.Tensor, y: tf.Tensor, normal: tf.Tensor) -> Tuple[tf.Tensor, np.ndarray]:
x = self._missing_imputation(x, y, normal)
q_zx, p_xz, z = self._model([x, y], n_samples=128)
test_loss = -self._m_elbo(x, z, normal, q_zx, self._p_z, p_xz)
log_p_xz = p_xz.log_prob(x)
return test_loss, log_p_xz
def fit(self,
kpi: bagel.data.KPI,
epochs: int,
validation_kpi: Optional[bagel.data.KPI] = None,
batch_size: int = 256,
verbose: int = 1) -> Dict:
dataset = bagel.data.KPIDataset(kpi,
window_size=self._window_size,
time_feature=self._time_feature,
missing_injection_rate=0.01).to_tensorflow()
dataset = dataset.shuffle(len(dataset)).batch(batch_size, drop_remainder=True)
validation_dataset = None
if validation_kpi is not None:
validation_dataset = bagel.data.KPIDataset(validation_kpi,
window_size=self._window_size,
time_feature=self._time_feature).to_tensorflow()
validation_dataset = validation_dataset.shuffle(len(validation_dataset)).batch(batch_size)
losses = []
val_losses = []
history = {}
progbar = None
if verbose == 1:
print('Training Epochs')
progbar = tf.keras.utils.Progbar(epochs,
interval=0.5,
stateful_metrics=['loss', 'val_loss'],
unit_name='epoch')
for epoch in range(epochs):
epoch_losses = []
epoch_val_losses = []
epoch_val_loss = np.nan
if verbose == 2:
print(f'Training Epoch {epoch + 1}/{epochs}')
progbar = tf.keras.utils.Progbar(
target=len(dataset) + (0 if validation_kpi is None else len(validation_dataset)),
interval=0.5
)
for batch in dataset:
loss = self._train_step(*batch)
epoch_losses.append(loss)
if verbose == 2:
progbar.add(1, values=[('loss', loss)])
epoch_loss = tf.math.reduce_mean(epoch_losses).numpy()
losses.append(epoch_loss)
if validation_kpi is not None:
for batch in validation_dataset:
val_loss = self._validation_step(*batch)
epoch_val_losses.append(val_loss)
if verbose == 2:
progbar.add(1, values=[('val_loss', val_loss)])
epoch_val_loss = tf.math.reduce_mean(epoch_val_losses).numpy()
val_losses.append(epoch_val_loss)
if verbose == 1:
values = []
if not np.isnan(epoch_loss):
values.append(('loss', epoch_loss))
if not np.isnan(epoch_val_loss):
values.append(('val_loss', epoch_val_loss))
progbar.add(1, values=values)
history['loss'] = losses
if len(val_losses) > 0:
history['val_loss'] = val_losses
return history
def predict(self, kpi: bagel.data.KPI, batch_size: int = 256, verbose: int = 1) -> np.ndarray:
kpi = kpi.no_labels()
dataset = bagel.data.KPIDataset(kpi,
window_size=self._window_size,
time_feature=self._time_feature).to_tensorflow()
dataset = dataset.batch(batch_size)
progbar = None
if verbose == 1:
print('Testing Epoch')
progbar = tf.keras.utils.Progbar(len(dataset), interval=0.5)
anomaly_scores = []
for batch in dataset:
test_loss, log_p_xz = self._test_step(*batch)
anomaly_scores.extend(-np.mean(log_p_xz[:, :, -1], axis=0))
if verbose == 1:
progbar.add(1, values=[('test_loss', test_loss)])
anomaly_scores = np.asarray(anomaly_scores, dtype=np.float32)
anomaly_scores = np.concatenate([np.ones(self._window_size - 1) * np.min(anomaly_scores), anomaly_scores])
return anomaly_scores
def save(self, prefix: str):
self._checkpoint.write(prefix)
def load(self, prefix: str):
self._checkpoint.read(prefix).expect_partial()
| [
"tensorflow.train.Checkpoint",
"tensorflow.math.add_n",
"bagel.data.KPIDataset",
"tensorflow.GradientTape",
"tensorflow.cast",
"numpy.mean",
"tensorflow.keras.Sequential",
"numpy.asarray",
"tensorflow.math.reduce_mean",
"numpy.min",
"tensorflow.zeros",
"tensorflow_probability.distributions.Nor... | [((314, 335), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (333, 335), True, 'import tensorflow as tf\n'), ((1397, 1436), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', (['z_mean', 'z_std'], {}), '(z_mean, z_std)\n', (1421, 1436), True, 'import tensorflow_probability as tfp\n'), ((1626, 1681), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['y', '[n_samples, y.shape[0], y.shape[1]]'], {}), '(y, [n_samples, y.shape[0], y.shape[1]])\n', (1641, 1681), True, 'import tensorflow as tf\n'), ((1804, 1843), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', (['x_mean', 'x_std'], {}), '(x_mean, x_std)\n', (1828, 1843), True, 'import tensorflow_probability as tfp\n'), ((2703, 2843), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': 'learning_rate', 'decay_steps': '(10000)', 'decay_rate': '(0.75)', 'staircase': '(True)'}), '(initial_learning_rate=\n learning_rate, decay_steps=10000, decay_rate=0.75, staircase=True)\n', (2749, 2843), True, 'import tensorflow as tf\n'), ((2923, 2990), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_scheduler', 'clipnorm': '(10.0)'}), '(learning_rate=lr_scheduler, clipnorm=10.0)\n', (2947, 2990), True, 'import tensorflow as tf\n'), ((3017, 3082), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'self._model', 'optimizer': 'self._optimizer'}), '(model=self._model, optimizer=self._optimizer)\n', (3036, 3082), True, 'import tensorflow as tf\n'), ((3366, 3386), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (3380, 3386), True, 'import tensorflow as tf\n'), ((3404, 3429), 'tensorflow.expand_dims', 'tf.expand_dims', (['normal', '(0)'], {}), '(normal, 0)\n', (3418, 3429), True, 'import tensorflow as tf\n'), ((3914, 3937), 'tensorflow.cast', 'tf.cast', (['normal', '"""bool"""'], {}), "(normal, 'bool')\n", (3921, 3937), True, 'import tensorflow as tf\n'), ((4852, 4885), 'tensorflow.math.add_n', 'tf.math.add_n', (['self._model.losses'], {}), '(self._model.losses)\n', (4865, 4885), True, 'import tensorflow as tf\n'), ((9202, 9246), 'numpy.asarray', 'np.asarray', (['anomaly_scores'], {'dtype': 'np.float32'}), '(anomaly_scores, dtype=np.float32)\n', (9212, 9246), True, 'import numpy as np\n'), ((1295, 1324), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (1322, 1324), True, 'import tensorflow as tf\n'), ((1476, 1498), 'tensorflow.zeros', 'tf.zeros', (['z_mean.shape'], {}), '(z_mean.shape)\n', (1484, 1498), True, 'import tensorflow as tf\n'), ((1500, 1520), 'tensorflow.ones', 'tf.ones', (['z_std.shape'], {}), '(z_std.shape)\n', (1507, 1520), True, 'import tensorflow as tf\n'), ((1588, 1613), 'tensorflow.expand_dims', 'tf.expand_dims', (['z_mean', '(0)'], {}), '(z_mean, 0)\n', (1602, 1613), True, 'import tensorflow as tf\n'), ((1702, 1731), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (1729, 1731), True, 'import tensorflow as tf\n'), ((2637, 2657), 'tensorflow.zeros', 'tf.zeros', (['latent_dim'], {}), '(latent_dim)\n', (2645, 2657), True, 'import tensorflow as tf\n'), ((2659, 2678), 'tensorflow.ones', 'tf.ones', (['latent_dim'], {}), '(latent_dim)\n', (2666, 2678), True, 'import tensorflow as tf\n'), ((3611, 3646), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['normal'], {'axis': '(-1)'}), '(normal, axis=-1)\n', (3629, 3646), True, 'import tensorflow as tf\n'), ((4076, 4109), 'tensorflow.where', 'tf.where', (['cond', 'x', 'reconstruction'], {}), '(cond, x, reconstruction)\n', (4084, 4109), True, 'import tensorflow as tf\n'), ((4245, 4262), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4260, 4262), True, 'import tensorflow as tf\n'), ((4473, 4506), 'tensorflow.math.add_n', 'tf.math.add_n', (['self._model.losses'], {}), '(self._model.losses)\n', (4486, 4506), True, 'import tensorflow as tf\n'), ((6468, 6574), 'tensorflow.keras.utils.Progbar', 'tf.keras.utils.Progbar', (['epochs'], {'interval': '(0.5)', 'stateful_metrics': "['loss', 'val_loss']", 'unit_name': '"""epoch"""'}), "(epochs, interval=0.5, stateful_metrics=['loss',\n 'val_loss'], unit_name='epoch')\n", (6490, 6574), True, 'import tensorflow as tf\n'), ((614, 645), 'tensorflow.keras.regularizers.L2', 'tf.keras.regularizers.L2', (['(0.001)'], {}), '(0.001)\n', (638, 645), True, 'import tensorflow as tf\n'), ((720, 751), 'tensorflow.keras.regularizers.L2', 'tf.keras.regularizers.L2', (['(0.001)'], {}), '(0.001)\n', (744, 751), True, 'import tensorflow as tf\n'), ((1561, 1585), 'tensorflow.expand_dims', 'tf.expand_dims', (['z_std', '(0)'], {}), '(z_std, 0)\n', (1575, 1585), True, 'import tensorflow as tf\n'), ((4288, 4331), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['self._dropout_rate'], {}), '(self._dropout_rate)\n', (4311, 4331), True, 'import tensorflow as tf\n'), ((5515, 5639), 'bagel.data.KPIDataset', 'bagel.data.KPIDataset', (['kpi'], {'window_size': 'self._window_size', 'time_feature': 'self._time_feature', 'missing_injection_rate': '(0.01)'}), '(kpi, window_size=self._window_size, time_feature=self\n ._time_feature, missing_injection_rate=0.01)\n', (5536, 5639), False, 'import bagel\n'), ((8507, 8602), 'bagel.data.KPIDataset', 'bagel.data.KPIDataset', (['kpi'], {'window_size': 'self._window_size', 'time_feature': 'self._time_feature'}), '(kpi, window_size=self._window_size, time_feature=self\n ._time_feature)\n', (8528, 8602), False, 'import bagel\n'), ((3709, 3755), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(log_p_xz * normal)'], {'axis': '(-1)'}), '(log_p_xz * normal, axis=-1)\n', (3727, 3755), True, 'import tensorflow as tf\n'), ((5964, 6069), 'bagel.data.KPIDataset', 'bagel.data.KPIDataset', (['validation_kpi'], {'window_size': 'self._window_size', 'time_feature': 'self._time_feature'}), '(validation_kpi, window_size=self._window_size,\n time_feature=self._time_feature)\n', (5985, 6069), False, 'import bagel\n'), ((7381, 7414), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['epoch_losses'], {}), '(epoch_losses)\n', (7400, 7414), True, 'import tensorflow as tf\n'), ((7988, 8008), 'numpy.isnan', 'np.isnan', (['epoch_loss'], {}), '(epoch_loss)\n', (7996, 8008), True, 'import numpy as np\n'), ((8089, 8113), 'numpy.isnan', 'np.isnan', (['epoch_val_loss'], {}), '(epoch_val_loss)\n', (8097, 8113), True, 'import numpy as np\n'), ((9045, 9080), 'numpy.mean', 'np.mean', (['log_p_xz[:, :, -1]'], {'axis': '(0)'}), '(log_p_xz[:, :, -1], axis=0)\n', (9052, 9080), True, 'import numpy as np\n'), ((9288, 9318), 'numpy.ones', 'np.ones', (['(self._window_size - 1)'], {}), '(self._window_size - 1)\n', (9295, 9318), True, 'import numpy as np\n'), ((9321, 9343), 'numpy.min', 'np.min', (['anomaly_scores'], {}), '(anomaly_scores)\n', (9327, 9343), True, 'import numpy as np\n'), ((493, 524), 'tensorflow.keras.regularizers.L2', 'tf.keras.regularizers.L2', (['(0.001)'], {}), '(0.001)\n', (517, 524), True, 'import tensorflow as tf\n'), ((7811, 7848), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['epoch_val_losses'], {}), '(epoch_val_losses)\n', (7830, 7848), True, 'import tensorflow as tf\n')] |
import os
import sys
import json
import copy
import numpy as np
import pandas as pd
import random
import tensorflow as tf
# import PIL
seed_value = 123
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
tf.set_random_seed(seed_value)
from keras.utils import to_categorical
import keras.backend as k
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
k.set_session(tf.Session(config=config))
sys.path.append('/'.join(os.getcwd().split('/')))
from ornstein_auto_encoder import logging_daily
from ornstein_auto_encoder import configuration
from ornstein_auto_encoder import readers
from ornstein_auto_encoder import samplers
from ornstein_auto_encoder import build_network
from ornstein_auto_encoder.utils import argv_parse
if '1.15' in tf.__version__:
from ornstein_auto_encoder.fid_v1_15 import get_fid as _get_fid
else:
from ornstein_auto_encoder.fid import get_fid as _get_fid
from ornstein_auto_encoder.inception_score import get_inception_score as _get_inception_score
#####################################################################################################
def get_fid(images1, images2):
imgs1 = np.clip(255*((images1).transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
imgs2 = np.clip(255*((images2).transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
return _get_fid(imgs1, imgs2)
def get_is(images, size=100):
imgs = np.clip(255*(images.transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
return _get_inception_score(imgs, splits=1)[0]
if __name__=='__main__':
argdict = argv_parse(sys.argv)
logger = logging_daily.logging_daily(argdict['log_info'][0])
logger.reset_logging()
log = logger.get_logging()
log.setLevel(logging_daily.logging.INFO)
log.info('-----------------------------------------------------------------------------------')
log.info('Evaluate the performance measures for VGGFace2')
log.info('-----------------------------------------------------------------------------------')
model_path = argdict['model_path'][0].strip()
try:
model_aka = argdict['model_aka'][0].strip()
except:
model_aka = model_path.split('/')[-1]
feature_b = True
path_info_config = argdict['path_info'][0]
network_info_config = argdict['network_info'][0]
##############################################################################################
# Set hyper-parameter for testing
config_data = configuration.Configurator(path_info_config, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(network_info_config, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
##############################################################################################
# Reader
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, network_info, mode='train', verbose=True)
def get_numerics(model_path, model_aka,
path_info_config = "configurations/vggface2/psoae_path_info.cfg",
network_info_config = "configurations/vggface2/psoae_network_total_info.cfg",
unknown=False, feature_b=False):
# Set hyper-parameter for testing
config_data = configuration.Configurator(path_info_config, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(network_info_config, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
log.info('-----------------------------------------------------------------')
unknown = unknown
log.info('%s: unknown=%s' % (model_aka, unknown))
log.info('-----------------------------------------------------------------')
config_data = configuration.Configurator(argdict['path_info'][0], log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(argdict['network_info'][0], log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
# Set hyper-parameter for testing
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
### Bulid network ####################################################################################
log.info('-----------------------------------------------------------------')
network_class = getattr(build_network, ''.join(network_info['model_info']['network_class'].strip().split('FixedB')))
network = network_class(log, path_info, network_info, n_label=reader.get_n_label())
network.build_model('./%s/%s' % (model_path, path_info['model_info']['model_architecture']), verbose=0)
network.load(model_path)
log.info('-----------------------------------------------------------------')
# Training
test_tot_idxs_path = os.path.join(model_path, path_info['model_info']['test_tot_idxs'])
test_idx = np.load(test_tot_idxs_path)
if unknown:
# Real Test data sampler (not-trained subject)
new_network_info = copy.deepcopy(network_info)
new_path_info = copy.deepcopy(path_info)
new_reader = reader_class(log, new_path_info, new_network_info, mode='test', verbose=False)
real_test_idx = np.arange(new_reader.get_label().shape[0])
test_idx = real_test_idx
log.info('Construct test data sampler')
validation_sampler_class = getattr(samplers, network_info['validation_info']['sampler_class'].strip())
if unknown:
test_sampler = validation_sampler_class(log, test_idx, new_reader, network_info['validation_info'], verbose=False)
else:
test_sampler = validation_sampler_class(log, test_idx, reader, network_info['validation_info'], verbose=False)
tot_sharpness_original = []
tot_is_original = []
# tot_reconstruction = []
tot_gen_fid = []
tot_gen_is = []
tot_sharpness_gen = []
tot_one_shot_gen_fid = []
tot_one_shot_gen_is = []
tot_one_shot_sharpness_gen = []
for nrepeat in range(10):
log.info('-%d------------------------------------------------' % nrepeat)
nunit = 30
nobservations = 300
picked_y_class = np.random.choice(test_sampler.y_class, nunit, replace=False)
test_idxs = []
picked_one_shot_idxs = []
for yc in picked_y_class:
try: chosen_observations = np.random.choice(test_sampler.train_idx[test_sampler.y_index.get_loc(yc)], nobservations)
except: chosen_observations = np.random.choice(test_sampler.train_idx[test_sampler.y_index.get_loc(yc)], nobservations, replace=True)
test_idxs.append(chosen_observations)
picked_one_shot_idxs.append(np.random.choice(np.arange(nobservations), 1)[0])
test_idxs = np.array(test_idxs).flatten()
picked_one_shot_idxs = np.array(picked_one_shot_idxs)
x, y = test_sampler.reader.get_batch(test_idxs)
y_table = pd.Series(y)
y_index = pd.Index(y)
y_class = y_table.unique()
y_table = pd.Series(y)
y_counts = y_table.value_counts()
log.info('-------------------------------------------------')
log.info('Images per Class')
log.info('\n%s', y_counts)
log.info('-------------------------------------------------')
log.info('Summary')
log.info('\n%s', y_counts.describe())
log.info('-------------------------------------------------')
repeated = 300
esp = 1.
gen_y_class = np.repeat(y_class, repeated, axis=0)
try:
if len(x.shape) == 4: real_img = x
except: real_img = x[0]
if 'randomintercept' in network_info['model_info']['network_class'].lower():
b_sd = float(network_info['model_info']['b_sd'])
estimate_b, fake_noise = network.encoder_model.predict(x, batch_size=100)
new_b = np.random.multivariate_normal(np.zeros(network.get_z_dim()),
b_sd**2.*np.identity(network.get_z_dim()),
y_class.shape[0]).astype(np.float32)
b = np.array([np.mean(estimate_b[np.random.choice(np.where(y==cls)[0],5)], axis=0) for cls in y_class])
picked_one_shot_idxs_per_class = np.array([np.where(y==cls)[0][picked_one_shot_idxs[i]] for i, cls in enumerate(y_class)])
one_shot_b = np.array([estimate_b[picked_one_shot_idxs_per_class[i]] for i, cls in enumerate(y_class)])
fake_latent = estimate_b + fake_noise
elif 'productspace' in network_info['model_info']['network_class'].lower():
wx, wy = network.main_sampler(x,y)
if feature_b: img, feature, clss, b_noise = wx
else: img, clss, b_noise = wx
b_sd = float(network_info['model_info']['b_sd'])
if feature_b:
sample_b, b_given_x, estimate_b = network.encoder_b_model.predict_on_batch([feature, clss])
else:
sample_b, b_given_x, estimate_b = network.encoder_b_model.predict_on_batch([img, clss])
b = np.array([np.mean(estimate_b[np.random.choice(np.where(y==cls)[0],5)], axis=0) for cls in y_class])
fake_noise = network.encoder_e_model.predict(wx[:-1], batch_size=100)
new_b = np.random.multivariate_normal(np.zeros(b.shape[-1]),
b_sd**2.*np.identity(b.shape[-1]),
y_class.shape[0]).astype(np.float32)
picked_one_shot_idxs_per_class = np.array([np.where(y==cls)[0][picked_one_shot_idxs[i]] for i, cls in enumerate(y_class)])
one_shot_b = np.array([estimate_b[picked_one_shot_idxs_per_class[i]] for i, cls in enumerate(y_class)])
else:
fake_latent = network.encoder_model.predict(real_img, batch_size=100)
fake_noise = fake_latent
mean = np.zeros(fake_noise.shape[-1])
cov = float(network_info['model_info']['e_sd'])**2.*np.identity(fake_noise.shape[-1])
noise = np.random.multivariate_normal(mean,cov,y_class.shape[0]*repeated).astype(np.float32)
if 'randomintercept' in network_info['model_info']['network_class'].lower():
generated_images = network.decoder_model.predict(noise + np.repeat(b, repeated, axis=0), batch_size=100)
one_shot_generated_images = network.decoder_model.predict(noise + np.repeat(one_shot_b, repeated, axis=0), batch_size=100)
elif 'productspace' in network_info['model_info']['network_class'].lower():
generated_images = network.decoder_model.predict(np.concatenate([np.repeat(b, repeated, axis=0), noise], axis=-1),
batch_size=100)
one_shot_generated_images = network.decoder_model.predict(np.concatenate([np.repeat(one_shot_b, repeated, axis=0), noise], axis=-1),
batch_size=100)
elif 'conditional' in network_info['model_info']['network_class'].lower():
generated_images = network.decoder_model.predict([noise, to_categorical(gen_y_class, reader.get_n_label())], batch_size=100)
else:
generated_images = network.decoder_model.predict(noise, batch_size=100)
numeric_dict = {}
origin_sharpness = np.min(network.blurr_model.predict(real_img,batch_size=100))
gen_sharpness = np.min(network.blurr_model.predict(generated_images,batch_size=100))
numeric_dict['sharpness_original'] = origin_sharpness
numeric_dict['original_is'] = get_is(real_img)
numeric_dict['sharpness_gen'] = gen_sharpness
numeric_dict['gen_fid'] = get_fid(real_img, generated_images)
numeric_dict['gen_is'] = get_is(generated_images)
if 'oae' in network_info['model_info']['network_class'].lower():
one_shot_gen_sharpness = np.min(network.blurr_model.predict(one_shot_generated_images, batch_size=100))
numeric_dict['sharpness_one_shot_gen'] = one_shot_gen_sharpness
numeric_dict['one_shot_gen_fid'] = get_fid(real_img, one_shot_generated_images)
numeric_dict['one_shot_gen_is'] = get_is(one_shot_generated_images)
log.info(numeric_dict)
tot_sharpness_original.append(numeric_dict['sharpness_original'])
tot_gen_fid.append(numeric_dict['gen_fid'])
tot_gen_is.append(numeric_dict['gen_is'])
tot_is_original.append(numeric_dict['original_is'])
tot_sharpness_gen.append(numeric_dict['sharpness_gen'])
if 'oae' in network_info['model_info']['network_class'].lower():
tot_one_shot_gen_fid.append(numeric_dict['one_shot_gen_fid'])
tot_one_shot_gen_is.append(numeric_dict['one_shot_gen_is'])
tot_one_shot_sharpness_gen.append(numeric_dict['sharpness_one_shot_gen'])
log.info('-----------------------------------------------------------------')
log.info('Results of %s: unknown=%s' % (model_aka, unknown))
log.info('Original IS: %.3f (\pm %.3f)' % (np.mean(tot_is_original), np.std(tot_is_original)))
log.info('Original_sharpness: %.3f (\pm %.3f)' % (np.mean(tot_sharpness_original), np.std(tot_sharpness_original)))
log.info('FID: %.3f (\pm %.3f)' % (np.mean(tot_gen_fid), np.std(tot_gen_fid)))
log.info('IS: %.3f (\pm %.3f)' % (np.mean(tot_gen_is), np.std(tot_gen_is)))
log.info('Sharpness: %.3f (\pm %.3f)' % (np.mean(tot_sharpness_gen), np.std(tot_sharpness_gen)))
if 'oae' in network_info['model_info']['network_class'].lower():
log.info('One-shot FID: %.3f (\pm %.3f)' % (np.mean(tot_one_shot_gen_fid), np.std(tot_one_shot_gen_fid)))
log.info('One-shot IS: %.3f (\pm %.3f)' % (np.mean(tot_one_shot_gen_is), np.std(tot_one_shot_gen_is)))
log.info('One-shot Sharpness: %.3f (\pm %.3f)' % (np.mean(tot_one_shot_sharpness_gen), np.std(tot_one_shot_sharpness_gen)))
# tot_numerics = [
# tot_sharpness_original,
# tot_is_original,
# tot_gen_fid,
# tot_gen_is,
# tot_sharpness_gen,
# tot_one_shot_gen_fid,
# tot_one_shot_gen_is,
# tot_one_shot_sharpness_gen
# ]
# else:
# tot_numerics = [
# tot_sharpness_original,
# tot_is_original,
# tot_gen_fid,
# tot_gen_is,
# tot_sharpness_gen
# ]
# if unknown: np.save('./analysis/numerics_%s_unknown.npy' % model_aka, np.array(tot_numerics))
# else: np.save('./analysis/numerics_%s.npy' % model_aka, np.array(tot_numerics))
log.info('-----------------------------------------------------------------------------------')
##############################################################################################
log.info('-----------------------------------------------------------------------------------')
get_numerics(model_path, model_aka, path_info_config, network_info_config, unknown=False, feature_b=feature_b)
get_numerics(model_path, model_aka, path_info_config, network_info_config, unknown=True, feature_b=feature_b)
log.info('-----------------------------------------------------------------------------------')
log.info('Finished')
log.info('-----------------------------------------------------------------------------------') | [
"pandas.Index",
"numpy.array",
"copy.deepcopy",
"tensorflow.set_random_seed",
"numpy.arange",
"ornstein_auto_encoder.fid.get_fid",
"numpy.mean",
"numpy.repeat",
"numpy.where",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.ConfigProto",
"numpy.identity",
"ornstein_auto_encoder.ince... | [((198, 221), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (209, 221), False, 'import random\n'), ((222, 248), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (236, 248), True, 'import numpy as np\n'), ((249, 279), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed_value'], {}), '(seed_value)\n', (267, 279), True, 'import tensorflow as tf\n'), ((356, 397), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (370, 397), True, 'import tensorflow as tf\n'), ((449, 474), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (459, 474), True, 'import tensorflow as tf\n'), ((1401, 1423), 'ornstein_auto_encoder.fid.get_fid', '_get_fid', (['imgs1', 'imgs2'], {}), '(imgs1, imgs2)\n', (1409, 1423), True, 'from ornstein_auto_encoder.fid import get_fid as _get_fid\n'), ((1637, 1657), 'ornstein_auto_encoder.utils.argv_parse', 'argv_parse', (['sys.argv'], {}), '(sys.argv)\n', (1647, 1657), False, 'from ornstein_auto_encoder.utils import argv_parse\n'), ((1671, 1722), 'ornstein_auto_encoder.logging_daily.logging_daily', 'logging_daily.logging_daily', (["argdict['log_info'][0]"], {}), "(argdict['log_info'][0])\n", (1698, 1722), False, 'from ornstein_auto_encoder import logging_daily\n'), ((2546, 2610), 'ornstein_auto_encoder.configuration.Configurator', 'configuration.Configurator', (['path_info_config', 'log'], {'verbose': '(False)'}), '(path_info_config, log, verbose=False)\n', (2572, 2610), False, 'from ornstein_auto_encoder import configuration\n'), ((2694, 2761), 'ornstein_auto_encoder.configuration.Configurator', 'configuration.Configurator', (['network_info_config', 'log'], {'verbose': '(False)'}), '(network_info_config, log, verbose=False)\n', (2720, 2761), False, 'from ornstein_auto_encoder import configuration\n'), ((1557, 1593), 'ornstein_auto_encoder.inception_score.get_inception_score', '_get_inception_score', (['imgs'], {'splits': '(1)'}), '(imgs, splits=1)\n', (1577, 1593), True, 'from ornstein_auto_encoder.inception_score import get_inception_score as _get_inception_score\n'), ((4192, 4256), 'ornstein_auto_encoder.configuration.Configurator', 'configuration.Configurator', (['path_info_config', 'log'], {'verbose': '(False)'}), '(path_info_config, log, verbose=False)\n', (4218, 4256), False, 'from ornstein_auto_encoder import configuration\n'), ((4348, 4415), 'ornstein_auto_encoder.configuration.Configurator', 'configuration.Configurator', (['network_info_config', 'log'], {'verbose': '(False)'}), '(network_info_config, log, verbose=False)\n', (4374, 4415), False, 'from ornstein_auto_encoder import configuration\n'), ((5537, 5608), 'ornstein_auto_encoder.configuration.Configurator', 'configuration.Configurator', (["argdict['path_info'][0]", 'log'], {'verbose': '(False)'}), "(argdict['path_info'][0], log, verbose=False)\n", (5563, 5608), False, 'from ornstein_auto_encoder import configuration\n'), ((5700, 5774), 'ornstein_auto_encoder.configuration.Configurator', 'configuration.Configurator', (["argdict['network_info'][0]", 'log'], {'verbose': '(False)'}), "(argdict['network_info'][0], log, verbose=False)\n", (5726, 5774), False, 'from ornstein_auto_encoder import configuration\n'), ((7358, 7424), 'os.path.join', 'os.path.join', (['model_path', "path_info['model_info']['test_tot_idxs']"], {}), "(model_path, path_info['model_info']['test_tot_idxs'])\n", (7370, 7424), False, 'import os\n'), ((7444, 7471), 'numpy.load', 'np.load', (['test_tot_idxs_path'], {}), '(test_tot_idxs_path)\n', (7451, 7471), True, 'import numpy as np\n'), ((7583, 7610), 'copy.deepcopy', 'copy.deepcopy', (['network_info'], {}), '(network_info)\n', (7596, 7610), False, 'import copy\n'), ((7639, 7663), 'copy.deepcopy', 'copy.deepcopy', (['path_info'], {}), '(path_info)\n', (7652, 7663), False, 'import copy\n'), ((8816, 8876), 'numpy.random.choice', 'np.random.choice', (['test_sampler.y_class', 'nunit'], {'replace': '(False)'}), '(test_sampler.y_class, nunit, replace=False)\n', (8832, 8876), True, 'import numpy as np\n'), ((9500, 9530), 'numpy.array', 'np.array', (['picked_one_shot_idxs'], {}), '(picked_one_shot_idxs)\n', (9508, 9530), True, 'import numpy as np\n'), ((9615, 9627), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (9624, 9627), True, 'import pandas as pd\n'), ((9650, 9661), 'pandas.Index', 'pd.Index', (['y'], {}), '(y)\n', (9658, 9661), True, 'import pandas as pd\n'), ((9723, 9735), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (9732, 9735), True, 'import pandas as pd\n'), ((10241, 10277), 'numpy.repeat', 'np.repeat', (['y_class', 'repeated'], {'axis': '(0)'}), '(y_class, repeated, axis=0)\n', (10250, 10277), True, 'import numpy as np\n'), ((12814, 12844), 'numpy.zeros', 'np.zeros', (['fake_noise.shape[-1]'], {}), '(fake_noise.shape[-1])\n', (12822, 12844), True, 'import numpy as np\n'), ((502, 513), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (511, 513), False, 'import os\n'), ((12909, 12942), 'numpy.identity', 'np.identity', (['fake_noise.shape[-1]'], {}), '(fake_noise.shape[-1])\n', (12920, 12942), True, 'import numpy as np\n'), ((9435, 9454), 'numpy.array', 'np.array', (['test_idxs'], {}), '(test_idxs)\n', (9443, 9454), True, 'import numpy as np\n'), ((12963, 13032), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(y_class.shape[0] * repeated)'], {}), '(mean, cov, y_class.shape[0] * repeated)\n', (12992, 13032), True, 'import numpy as np\n'), ((16156, 16180), 'numpy.mean', 'np.mean', (['tot_is_original'], {}), '(tot_is_original)\n', (16163, 16180), True, 'import numpy as np\n'), ((16182, 16205), 'numpy.std', 'np.std', (['tot_is_original'], {}), '(tot_is_original)\n', (16188, 16205), True, 'import numpy as np\n'), ((16266, 16297), 'numpy.mean', 'np.mean', (['tot_sharpness_original'], {}), '(tot_sharpness_original)\n', (16273, 16297), True, 'import numpy as np\n'), ((16299, 16329), 'numpy.std', 'np.std', (['tot_sharpness_original'], {}), '(tot_sharpness_original)\n', (16305, 16329), True, 'import numpy as np\n'), ((16376, 16396), 'numpy.mean', 'np.mean', (['tot_gen_fid'], {}), '(tot_gen_fid)\n', (16383, 16396), True, 'import numpy as np\n'), ((16398, 16417), 'numpy.std', 'np.std', (['tot_gen_fid'], {}), '(tot_gen_fid)\n', (16404, 16417), True, 'import numpy as np\n'), ((16462, 16481), 'numpy.mean', 'np.mean', (['tot_gen_is'], {}), '(tot_gen_is)\n', (16469, 16481), True, 'import numpy as np\n'), ((16483, 16501), 'numpy.std', 'np.std', (['tot_gen_is'], {}), '(tot_gen_is)\n', (16489, 16501), True, 'import numpy as np\n'), ((16553, 16579), 'numpy.mean', 'np.mean', (['tot_sharpness_gen'], {}), '(tot_sharpness_gen)\n', (16560, 16579), True, 'import numpy as np\n'), ((16581, 16606), 'numpy.std', 'np.std', (['tot_sharpness_gen'], {}), '(tot_sharpness_gen)\n', (16587, 16606), True, 'import numpy as np\n'), ((13211, 13241), 'numpy.repeat', 'np.repeat', (['b', 'repeated'], {'axis': '(0)'}), '(b, repeated, axis=0)\n', (13220, 13241), True, 'import numpy as np\n'), ((13342, 13381), 'numpy.repeat', 'np.repeat', (['one_shot_b', 'repeated'], {'axis': '(0)'}), '(one_shot_b, repeated, axis=0)\n', (13351, 13381), True, 'import numpy as np\n'), ((16739, 16768), 'numpy.mean', 'np.mean', (['tot_one_shot_gen_fid'], {}), '(tot_one_shot_gen_fid)\n', (16746, 16768), True, 'import numpy as np\n'), ((16770, 16798), 'numpy.std', 'np.std', (['tot_one_shot_gen_fid'], {}), '(tot_one_shot_gen_fid)\n', (16776, 16798), True, 'import numpy as np\n'), ((16856, 16884), 'numpy.mean', 'np.mean', (['tot_one_shot_gen_is'], {}), '(tot_one_shot_gen_is)\n', (16863, 16884), True, 'import numpy as np\n'), ((16886, 16913), 'numpy.std', 'np.std', (['tot_one_shot_gen_is'], {}), '(tot_one_shot_gen_is)\n', (16892, 16913), True, 'import numpy as np\n'), ((16978, 17013), 'numpy.mean', 'np.mean', (['tot_one_shot_sharpness_gen'], {}), '(tot_one_shot_sharpness_gen)\n', (16985, 17013), True, 'import numpy as np\n'), ((17015, 17049), 'numpy.std', 'np.std', (['tot_one_shot_sharpness_gen'], {}), '(tot_one_shot_sharpness_gen)\n', (17021, 17049), True, 'import numpy as np\n'), ((9378, 9402), 'numpy.arange', 'np.arange', (['nobservations'], {}), '(nobservations)\n', (9387, 9402), True, 'import numpy as np\n'), ((11082, 11100), 'numpy.where', 'np.where', (['(y == cls)'], {}), '(y == cls)\n', (11090, 11100), True, 'import numpy as np\n'), ((12185, 12206), 'numpy.zeros', 'np.zeros', (['b.shape[-1]'], {}), '(b.shape[-1])\n', (12193, 12206), True, 'import numpy as np\n'), ((13568, 13598), 'numpy.repeat', 'np.repeat', (['b', 'repeated'], {'axis': '(0)'}), '(b, repeated, axis=0)\n', (13577, 13598), True, 'import numpy as np\n'), ((13789, 13828), 'numpy.repeat', 'np.repeat', (['one_shot_b', 'repeated'], {'axis': '(0)'}), '(one_shot_b, repeated, axis=0)\n', (13798, 13828), True, 'import numpy as np\n'), ((12272, 12296), 'numpy.identity', 'np.identity', (['b.shape[-1]'], {}), '(b.shape[-1])\n', (12283, 12296), True, 'import numpy as np\n'), ((12449, 12467), 'numpy.where', 'np.where', (['(y == cls)'], {}), '(y == cls)\n', (12457, 12467), True, 'import numpy as np\n'), ((10969, 10987), 'numpy.where', 'np.where', (['(y == cls)'], {}), '(y == cls)\n', (10977, 10987), True, 'import numpy as np\n'), ((11991, 12009), 'numpy.where', 'np.where', (['(y == cls)'], {}), '(y == cls)\n', (11999, 12009), True, 'import numpy as np\n')] |
import numpy as np
import pickle
from copy import deepcopy
from det3d.core import box_np_ops
from det3d.datasets.custom import PointCloudDataset
from det3d.datasets.registry import DATASETS
from .eval import get_lyft_eval_result
@DATASETS.register_module
class LyftDataset(PointCloudDataset):
NumPointFeatures = 4
DatasetName = "LyftDataset"
def __init__(
self,
root_path,
info_path,
cfg=None,
pipeline=None,
test_mode=False,
**kwargs
):
super(LyftDataset, self).__init__(
root_path, info_path, pipeline, test_mode=test_mode
)
self._info_path = info_path
self._class_names = ["car", "pedestrian", "motorcycle", "bicycle",
"other_vehicle", "bus", "truck"]
self.load_infos(self._info_path)
self._num_point_features = __class__.NumPointFeatures
self._cls2label = {}
self._label2cls = {}
for i in range(len(self._class_names)):
self._cls2label[self._class_names[i]] = i
self._label2cls[i] = self._class_names[i]
def load_infos(self, info_path):
with open(self._info_path, "rb") as f:
_lyft_infos_all = pickle.load(f)
if not self.test_mode: # if training
self.frac = int(len(_lyft_infos_all) * 0.25)
_cls_infos = {name: [] for name in self._class_names}
for info in _lyft_infos_all:
for name in set(info["gt_names"]):
if name in self._class_names:
_cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in _cls_infos.items()])
_cls_dist = {k: len(v) / duplicated_samples for k, v in _cls_infos.items()}
self._lyft_infos = []
frac = 1.0 / len(self._class_names)
ratios = [frac / v for v in _cls_dist.values()]
for cls_infos, ratio in zip(list(_cls_infos.values()), ratios):
self._lyft_infos += np.random.choice(
cls_infos, int(len(cls_infos) * ratio)
).tolist()
_cls_infos = {name: [] for name in self._class_names}
for info in self._lyft_infos:
for name in set(info["gt_names"]):
if name in self._class_names:
_cls_infos[name].append(info)
_cls_dist = {
k: len(v) / len(self._lyft_infos) for k, v in _cls_infos.items()
}
else:
if isinstance(_lyft_infos_all, dict):
self._lyft_infos = []
for v in _lyft_infos_all.values():
self._lyft_infos.extend(v)
else:
self._lyft_infos = _lyft_infos_all
def __len__(self):
with open(self._info_path, "rb") as f:
self._lyft_infos = pickle.load(f)
return len(self._lyft_infos)
@property
def num_point_features(self):
return self._num_point_features
@property
def ground_truth_annotations(self):
annos = []
for i in range(len(self._lyft_infos)):
info = self._lyft_infos[i]
token = info["token"]
anno = {}
gt_mask = np.where(
np.array(
[1 if cls in self._cls2label else 0 for cls in info["gt_names"]]
)
== 1
)[0]
gt_boxes = info["gt_boxes"][gt_mask]
box_num = gt_boxes.shape[0]
anno["bbox"] = np.zeros((box_num, 4))
anno["alpha"] = np.zeros(box_num, dtype=np.float32)
anno["location"] = gt_boxes[:, :3]
anno["dimensions"] = gt_boxes[:, 3:6]
anno["rotation_y"] = gt_boxes[:, -1]
anno["name"] = info["gt_names"][gt_mask].tolist()
anno["gt_labels"] = np.array([self._cls2label[cls] for cls in anno["name"]])
annos.append(anno)
return annos
def get_sensor_data(self, idx):
info = self._lyft_infos[idx]
res = {
"lidar": {"type": "lidar", "points": None,},
"metadata": {
"image_prefix": self._root_path,
"num_point_features": self._num_point_features,
"token": info["token"],
},
"calib": None,
"cam": {},
"mode": "val" if self.test_mode else "train",
}
data, _ = self.pipeline(res, info)
return data
def __getitem__(self, idx):
return self.get_sensor_data(idx)
def convert_detection_to_lyft_annos(self, dt_annos):
annos = []
for token, dt_anno in dt_annos.items():
anno = {}
dt_boxes = dt_anno["box3d_lidar"].cpu().numpy()
box_num = dt_boxes.shape[0]
labels = dt_anno["label_preds"].cpu().numpy()
scores = dt_anno["scores"].cpu().numpy()
anno["score"] = scores
anno["bbox"] = np.zeros((box_num, 4))
anno["alpha"] = np.zeros(box_num, dtype=np.float32)
anno["dimensions"] = dt_boxes[:, 3:6]
anno["location"] = dt_boxes[:, :3]
anno["rotation_y"] = dt_boxes[:, -1]
anno["name"] = [self._label2cls[label] for label in labels]
annos.append(anno)
return annos
def evaluation(self, detections, output_dir=None):
gt_annos = self.ground_truth_annotations
dt_annos = self.convert_detection_to_lyft_annos(detections)
result_lyft = get_lyft_eval_result(gt_annos, dt_annos, self._class_names)
return result_lyft_dict
| [
"numpy.array",
"numpy.zeros",
"pickle.load"
] | [((1242, 1256), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1253, 1256), False, 'import pickle\n'), ((2909, 2923), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2920, 2923), False, 'import pickle\n'), ((3581, 3603), 'numpy.zeros', 'np.zeros', (['(box_num, 4)'], {}), '((box_num, 4))\n', (3589, 3603), True, 'import numpy as np\n'), ((3632, 3667), 'numpy.zeros', 'np.zeros', (['box_num'], {'dtype': 'np.float32'}), '(box_num, dtype=np.float32)\n', (3640, 3667), True, 'import numpy as np\n'), ((3908, 3964), 'numpy.array', 'np.array', (["[self._cls2label[cls] for cls in anno['name']]"], {}), "([self._cls2label[cls] for cls in anno['name']])\n", (3916, 3964), True, 'import numpy as np\n'), ((5037, 5059), 'numpy.zeros', 'np.zeros', (['(box_num, 4)'], {}), '((box_num, 4))\n', (5045, 5059), True, 'import numpy as np\n'), ((5088, 5123), 'numpy.zeros', 'np.zeros', (['box_num'], {'dtype': 'np.float32'}), '(box_num, dtype=np.float32)\n', (5096, 5123), True, 'import numpy as np\n'), ((3314, 3390), 'numpy.array', 'np.array', (["[(1 if cls in self._cls2label else 0) for cls in info['gt_names']]"], {}), "([(1 if cls in self._cls2label else 0) for cls in info['gt_names']])\n", (3322, 3390), True, 'import numpy as np\n')] |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, TYPE_CHECKING
import warnings
import pandas as pd
import sympy
from matplotlib import pyplot as plt
import numpy as np
from cirq import circuits, ops, study, value, _import
from cirq._compat import proper_repr
if TYPE_CHECKING:
import cirq
# We initialize optimize lazily, otherwise it slows global import speed.
optimize = _import.LazyLoader("optimize", globals(), "scipy.optimize")
# TODO(#3388) Add documentation for Raises.
# pylint: disable=missing-raises-doc
def t1_decay(
sampler: 'cirq.Sampler',
*,
qubit: 'cirq.Qid',
num_points: int,
max_delay: 'cirq.DURATION_LIKE',
min_delay: 'cirq.DURATION_LIKE' = None,
repetitions: int = 1000,
) -> 'cirq.experiments.T1DecayResult':
"""Runs a t1 decay experiment.
Initializes a qubit into the |1⟩ state, waits for a variable amount of time,
and measures the qubit. Plots how often the |1⟩ state is observed for each
amount of waiting.
Args:
sampler: The quantum engine or simulator to run the circuits.
qubit: The qubit under test.
num_points: The number of evenly spaced delays to test.
max_delay: The largest delay to test.
min_delay: The smallest delay to test. Defaults to no delay.
repetitions: The number of repetitions of the circuit for each delay.
Returns:
A T1DecayResult object that stores and can plot the data.
"""
min_delay_dur = value.Duration(min_delay)
max_delay_dur = value.Duration(max_delay)
if repetitions <= 0:
raise ValueError('repetitions <= 0')
if max_delay_dur < min_delay_dur:
raise ValueError('max_delay < min_delay')
if min_delay_dur < 0:
raise ValueError('min_delay < 0')
var = sympy.Symbol('delay_ns')
sweep = study.Linspace(
var,
start=min_delay_dur.total_nanos(),
stop=max_delay_dur.total_nanos(),
length=num_points,
)
circuit = circuits.Circuit(
ops.X(qubit),
ops.wait(qubit, nanos=var),
ops.measure(qubit, key='output'),
)
results = sampler.sample(circuit, params=sweep, repetitions=repetitions)
# Cross tabulate into a delay_ns, false_count, true_count table.
tab = pd.crosstab(results.delay_ns, results.output)
tab.rename_axis(None, axis="columns", inplace=True)
tab = tab.rename(columns={0: 'false_count', 1: 'true_count'}).reset_index()
for col_index, name in [(1, 'false_count'), (2, 'true_count')]:
if name not in tab:
tab.insert(col_index, name, [0] * tab.shape[0])
return T1DecayResult(tab)
# pylint: enable=missing-raises-doc
class T1DecayResult:
"""Results from a Rabi oscillation experiment."""
def __init__(self, data: pd.DataFrame):
"""Inits T1DecayResult.
Args:
data: A data frame with three columns:
delay_ns, false_count, true_count.
"""
assert list(data.columns) == ['delay_ns', 'false_count', 'true_count']
self._data = data
@property
def data(self) -> pd.DataFrame:
"""A data frame with delay_ns, false_count, true_count columns."""
return self._data
@property
def constant(self) -> float:
"""The t1 decay constant."""
def exp_decay(x, t1):
return np.exp(-x / t1)
xs = self._data['delay_ns']
ts = self._data['true_count']
fs = self._data['false_count']
probs = ts / (fs + ts)
# Find the point closest to probability of 1/e
guess_index = np.argmin(np.abs(probs - 1.0 / np.e))
t1_guess = xs[guess_index]
# Fit to exponential decay to find the t1 constant
try:
popt, _ = optimize.curve_fit(exp_decay, xs, probs, p0=[t1_guess])
t1 = popt[0]
return t1
except RuntimeError:
warnings.warn("Optimal parameters could not be found for curve fit", RuntimeWarning)
return np.nan
def plot(
self, ax: Optional[plt.Axes] = None, include_fit: bool = False, **plot_kwargs: Any
) -> plt.Axes:
"""Plots the excited state probability vs the amount of delay.
Args:
ax: the plt.Axes to plot on. If not given, a new figure is created,
plotted on, and shown.
include_fit: boolean to include exponential decay fit on graph
**plot_kwargs: Arguments to be passed to 'plt.Axes.plot'.
Returns:
The plt.Axes containing the plot.
"""
show_plot = not ax
if show_plot:
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
assert ax is not None
ax.set_ylim(ymin=0, ymax=1)
xs = self._data['delay_ns']
ts = self._data['true_count']
fs = self._data['false_count']
ax.plot(xs, ts / (fs + ts), 'ro-', **plot_kwargs)
if include_fit and not np.isnan(self.constant):
ax.plot(xs, np.exp(-xs / self.constant), label='curve fit')
plt.legend()
ax.set_xlabel(r"Delay between initialization and measurement (nanoseconds)")
ax.set_ylabel('Excited State Probability')
ax.set_title('T1 Decay Experiment Data')
if show_plot:
fig.show()
return ax
def __str__(self) -> str:
return f'T1DecayResult with data:\n{self.data}'
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return self.data.equals(other.data)
def __ne__(self, other) -> bool:
return not self == other
def __repr__(self) -> str:
return f'cirq.experiments.T1DecayResult(data={proper_repr(self.data)})'
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Text output in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('T1DecayResult(...)')
else:
p.text(str(self))
| [
"cirq.value.Duration",
"sympy.Symbol",
"numpy.abs",
"cirq.ops.X",
"pandas.crosstab",
"numpy.exp",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"cirq._compat.proper_repr",
"warnings.warn",
"cirq.ops.wait",
"cirq.ops.measure",
"matplotlib.pyplot.legend"
] | [((2040, 2065), 'cirq.value.Duration', 'value.Duration', (['min_delay'], {}), '(min_delay)\n', (2054, 2065), False, 'from cirq import circuits, ops, study, value, _import\n'), ((2086, 2111), 'cirq.value.Duration', 'value.Duration', (['max_delay'], {}), '(max_delay)\n', (2100, 2111), False, 'from cirq import circuits, ops, study, value, _import\n'), ((2349, 2373), 'sympy.Symbol', 'sympy.Symbol', (['"""delay_ns"""'], {}), "('delay_ns')\n", (2361, 2373), False, 'import sympy\n'), ((2831, 2876), 'pandas.crosstab', 'pd.crosstab', (['results.delay_ns', 'results.output'], {}), '(results.delay_ns, results.output)\n', (2842, 2876), True, 'import pandas as pd\n'), ((2575, 2587), 'cirq.ops.X', 'ops.X', (['qubit'], {}), '(qubit)\n', (2580, 2587), False, 'from cirq import circuits, ops, study, value, _import\n'), ((2597, 2623), 'cirq.ops.wait', 'ops.wait', (['qubit'], {'nanos': 'var'}), '(qubit, nanos=var)\n', (2605, 2623), False, 'from cirq import circuits, ops, study, value, _import\n'), ((2633, 2665), 'cirq.ops.measure', 'ops.measure', (['qubit'], {'key': '"""output"""'}), "(qubit, key='output')\n", (2644, 2665), False, 'from cirq import circuits, ops, study, value, _import\n'), ((3911, 3926), 'numpy.exp', 'np.exp', (['(-x / t1)'], {}), '(-x / t1)\n', (3917, 3926), True, 'import numpy as np\n'), ((4160, 4186), 'numpy.abs', 'np.abs', (['(probs - 1.0 / np.e)'], {}), '(probs - 1.0 / np.e)\n', (4166, 4186), True, 'import numpy as np\n'), ((5195, 5229), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 8)'}), '(1, 1, figsize=(8, 8))\n', (5207, 5229), True, 'from matplotlib import pyplot as plt\n'), ((5610, 5622), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5620, 5622), True, 'from matplotlib import pyplot as plt\n'), ((4462, 4550), 'warnings.warn', 'warnings.warn', (['"""Optimal parameters could not be found for curve fit"""', 'RuntimeWarning'], {}), "('Optimal parameters could not be found for curve fit',\n RuntimeWarning)\n", (4475, 4550), False, 'import warnings\n'), ((5501, 5524), 'numpy.isnan', 'np.isnan', (['self.constant'], {}), '(self.constant)\n', (5509, 5524), True, 'import numpy as np\n'), ((5550, 5577), 'numpy.exp', 'np.exp', (['(-xs / self.constant)'], {}), '(-xs / self.constant)\n', (5556, 5577), True, 'import numpy as np\n'), ((6278, 6300), 'cirq._compat.proper_repr', 'proper_repr', (['self.data'], {}), '(self.data)\n', (6289, 6300), False, 'from cirq._compat import proper_repr\n')] |
import numpy as np
import torch
import torch.optim as optim
from collections import OrderedDict
import copy
import lifelong_rl.torch.pytorch_util as ptu
from lifelong_rl.envs.env_utils import get_dim
from lifelong_rl.util.eval_util import create_stats_ordered_dict
from lifelong_rl.core.rl_algorithms.torch_rl_algorithm import TorchTrainer
import lifelong_rl.samplers.utils.path_functions as path_functions
import lifelong_rl.util.pythonplusplus as ppp
class PGTrainer(TorchTrainer):
"""
Encapsulating base trainer for policy gradient methods trained from trajectories.
By itself, trains using vanilla policy gradient with some tricks (GAE, early stopping, etc).
"""
def __init__(
self,
env, # Associated environment
policy, # Associated policy
value_func, # Associated value function V(s)
discount=0.99, # Discount factor
gae_lambda=0.95, # Lambda to use for GAE for value estimation
policy_lr=1e-3, # Learning rate for policy
value_lr=1e-3, # Learning rate for value function
target_kl=0.01, # Can do early termination if KL is reached
entropy_coeff=0., # Coefficient of entropy bonus
num_epochs=10, # Number of epochs for training per train call
num_policy_epochs=None, # Number of epochs for policy (can be < num_epochs)
policy_batch_size=1024, # Batch size for policy training
value_batch_size=1024, # Batch size for value function training
normalize_advantages=True, # Optionally, can normalize advantages
input_normalization=True, # Whether or not to normalize the inputs to policy & value
max_grad_norm=0.5, # Gradient norm clipping
action_dim=None,
):
super().__init__()
self.env = env
self.obs_dim = get_dim(self.env.observation_space)
if hasattr(self.env, "use_desired_goal") and self.env.use_desired_goal:
self.obs_dim += self.env.observation_space["desired_goal"].shape[0]
self.action_dim = self.env.action_space.shape[0] if action_dim is None else action_dim
self.policy = policy
self.value_func = value_func
self.discount = discount
self.gae_lambda = gae_lambda
self.target_kl = target_kl
self.entropy_coeff = entropy_coeff
self.num_epochs = num_epochs
self.num_policy_epochs = num_policy_epochs if num_policy_epochs is not None else num_epochs
self.policy_batch_size = policy_batch_size
self.value_batch_size = value_batch_size
self.normalize_advantages = normalize_advantages
self.input_normalization = input_normalization
self.max_grad_norm = max_grad_norm
if policy_lr is not None:
self.policy_optim = optim.Adam(self.policy.parameters(), lr=policy_lr)
self.value_optim = optim.Adam(self.value_func.parameters(), lr=value_lr)
self._reward_std = 1
self._need_to_update_eval_statistics = True
self.eval_statistics = OrderedDict()
def train_from_paths(self, paths, mode=None):
"""
Path preprocessing; have to copy so we don't modify when paths are used elsewhere
"""
paths = copy.deepcopy(paths)
for path in paths:
# Other places like to have an extra dimension so that all arrays are 2D
path['rewards'] = np.squeeze(path['rewards'], axis=-1)
path['terminals'] = np.squeeze(path['terminals'], axis=-1)
if mode is None:
# Reward normalization; divide by std of reward in replay buffer
path['rewards'] = np.clip(path['rewards'] / (self._reward_std + 1e-3), -10, 10)
obs, actions = [], []
for path in paths:
obs.append(path['observations'])
actions.append(path['actions'])
obs = np.concatenate(obs, axis=0)
actions = np.concatenate(actions, axis=0)
obs_tensor, act_tensor = ptu.from_numpy(obs), ptu.from_numpy(actions)
"""
Policy training loop
"""
old_policy = copy.deepcopy(self.policy)
with torch.no_grad():
log_probs_old = old_policy.get_log_probs(obs_tensor, act_tensor).squeeze(dim=-1)
rem_value_epochs = self.num_epochs
num_p = 0
num_v = 0
for epoch in range(self.num_policy_epochs):
"""
Recompute advantages at the beginning of each epoch. This allows for advantages
to utilize the latest value function.
Note: while this is not present in most implementations, it is recommended
by Andrychowicz et al. 2020.
"""
path_functions.calculate_baselines(paths, self.value_func)
path_functions.calculate_returns(paths, self.discount)
path_functions.calculate_advantages(
paths, self.discount, self.gae_lambda, self.normalize_advantages,
)
advantages, returns, baselines = [], [], []
for path in paths:
advantages = np.append(advantages, path['advantages'])
returns = np.append(returns, path['returns'])
if epoch == 0 and self._need_to_update_eval_statistics:
with torch.no_grad():
values = torch.squeeze(self.value_func(obs_tensor), dim=-1)
values_np = ptu.get_numpy(values)
first_val_loss = ((returns - values_np) ** 2).mean()
old_params = self.policy.get_param_values()
num_policy_steps = len(advantages) // self.policy_batch_size
for _ in range(num_policy_steps):
if num_policy_steps == 1:
batch = dict(
observations=obs,
actions=actions,
advantages=advantages,
)
else:
batch = ppp.sample_batch(
self.policy_batch_size,
observations=obs,
actions=actions,
advantages=advantages,
)
num_p += 1
policy_loss, kl = self.train_policy(batch, old_policy)
with torch.no_grad():
log_probs = self.policy.get_log_probs(obs_tensor, act_tensor).squeeze(dim=-1)
kl = (log_probs_old - log_probs).mean()
# もともとabs(kl)ではなかったけど、マイナスに行きすぎても嫌なのでabs加える
if (self.target_kl is not None and abs(kl) > 1.5 * self.target_kl) or (kl != kl):
if epoch > 0 or kl != kl: # nan check
self.policy.set_param_values(old_params)
break
num_value_steps = len(advantages) // self.value_batch_size
for i in range(num_value_steps):
batch = ppp.sample_batch(
self.value_batch_size,
observations=obs,
targets=returns,
)
value_loss = self.train_value(batch)
num_v += 1
rem_value_epochs -= 1
# Ensure the value function is always updated for the maximum number
# of epochs, regardless of if the policy wants to terminate early.
for _ in range(rem_value_epochs):
num_value_steps = len(advantages) // self.value_batch_size
for i in range(num_value_steps):
batch = ppp.sample_batch(
self.value_batch_size,
observations=obs,
targets=returns,
)
value_loss = self.train_value(batch)
if self._need_to_update_eval_statistics:
with torch.no_grad():
_, _, _, log_pi, *_ = self.policy(obs_tensor, return_log_prob=True)
values = torch.squeeze(self.value_func(obs_tensor), dim=-1)
values_np = ptu.get_numpy(values)
errors = returns - values_np
explained_variance = 1 - (np.var(errors) / np.var(returns))
value_loss = errors ** 2
self.eval_statistics['Num Epochs'] = epoch + 1
print("Policy Loss:", ptu.get_numpy(policy_loss).mean())
print(ptu.get_numpy(policy_loss))
self.eval_statistics['Policy Loss'] = ptu.get_numpy(policy_loss).mean()
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl).mean()
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Advantages',
advantages,
))
self.eval_statistics.update(create_stats_ordered_dict(
'Returns',
returns,
))
self.eval_statistics['Value Loss'] = value_loss.mean()
self.eval_statistics['First Value Loss'] = first_val_loss
self.eval_statistics['Value Explained Variance'] = explained_variance
self.eval_statistics.update(create_stats_ordered_dict(
'Values',
ptu.get_numpy(values),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Value Squared Errors',
value_loss,
))
def fit_input_stats(self, replay_buffer):
if self.input_normalization:
transitions = replay_buffer.get_transitions()
obs = transitions[:,:self.obs_dim]
self.policy.fit_input_stats(obs)
self.value_func.fit_input_stats(obs)
self._reward_std = transitions[:,-(self.obs_dim+2)].std()
if self._reward_std < 0.01:
self._reward_std = transitions[:,-(self.obs_dim+2)].max()
def policy_objective(self, obs, actions, advantages, old_policy):
log_probs = torch.squeeze(self.policy.get_log_probs(obs, actions), dim=-1)
log_probs_old = torch.squeeze(old_policy.get_log_probs(obs, actions), dim=-1)
objective = (log_probs * advantages).mean()
kl = (log_probs_old - log_probs).mean()
return objective, kl
def train_policy(self, batch, old_policy):
obs = ptu.from_numpy(batch['observations'])
actions = ptu.from_numpy(batch['actions'])
advantages = ptu.from_numpy(batch['advantages'])
objective, kl = self.policy_objective(obs, actions, advantages, old_policy)
policy_loss = -objective
self.policy_optim.zero_grad()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy_optim.step()
return policy_loss, kl
def train_value(self, batch):
obs = ptu.from_numpy(batch['observations'])
targets = ptu.from_numpy(batch['targets'])
value_preds = torch.squeeze(self.value_func(obs), dim=-1)
value_loss = 0.5 * ((value_preds - targets) ** 2).mean()
self.value_optim.zero_grad()
value_loss.backward()
self.value_optim.step()
return value_loss
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.value_func,
]
def get_snapshot(self):
return dict(
policy=self.policy,
value_func=self.value_func,
)
| [
"numpy.clip",
"lifelong_rl.samplers.utils.path_functions.calculate_advantages",
"collections.OrderedDict",
"lifelong_rl.torch.pytorch_util.get_numpy",
"lifelong_rl.samplers.utils.path_functions.calculate_baselines",
"lifelong_rl.torch.pytorch_util.from_numpy",
"lifelong_rl.samplers.utils.path_functions.... | [((2050, 2085), 'lifelong_rl.envs.env_utils.get_dim', 'get_dim', (['self.env.observation_space'], {}), '(self.env.observation_space)\n', (2057, 2085), False, 'from lifelong_rl.envs.env_utils import get_dim\n'), ((3261, 3274), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3272, 3274), False, 'from collections import OrderedDict\n'), ((3458, 3478), 'copy.deepcopy', 'copy.deepcopy', (['paths'], {}), '(paths)\n', (3471, 3478), False, 'import copy\n'), ((4095, 4122), 'numpy.concatenate', 'np.concatenate', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (4109, 4122), True, 'import numpy as np\n'), ((4141, 4172), 'numpy.concatenate', 'np.concatenate', (['actions'], {'axis': '(0)'}), '(actions, axis=0)\n', (4155, 4172), True, 'import numpy as np\n'), ((4327, 4353), 'copy.deepcopy', 'copy.deepcopy', (['self.policy'], {}), '(self.policy)\n', (4340, 4353), False, 'import copy\n'), ((10552, 10589), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (["batch['observations']"], {}), "(batch['observations'])\n", (10566, 10589), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((10608, 10640), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (["batch['actions']"], {}), "(batch['actions'])\n", (10622, 10640), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((10662, 10697), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (["batch['advantages']"], {}), "(batch['advantages'])\n", (10676, 10697), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((11084, 11121), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (["batch['observations']"], {}), "(batch['observations'])\n", (11098, 11121), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((11140, 11172), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (["batch['targets']"], {}), "(batch['targets'])\n", (11154, 11172), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((3621, 3657), 'numpy.squeeze', 'np.squeeze', (["path['rewards']"], {'axis': '(-1)'}), "(path['rewards'], axis=-1)\n", (3631, 3657), True, 'import numpy as np\n'), ((3690, 3728), 'numpy.squeeze', 'np.squeeze', (["path['terminals']"], {'axis': '(-1)'}), "(path['terminals'], axis=-1)\n", (3700, 3728), True, 'import numpy as np\n'), ((4207, 4226), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['obs'], {}), '(obs)\n', (4221, 4226), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((4228, 4251), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['actions'], {}), '(actions)\n', (4242, 4251), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((4368, 4383), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4381, 4383), False, 'import torch\n'), ((4937, 4995), 'lifelong_rl.samplers.utils.path_functions.calculate_baselines', 'path_functions.calculate_baselines', (['paths', 'self.value_func'], {}), '(paths, self.value_func)\n', (4971, 4995), True, 'import lifelong_rl.samplers.utils.path_functions as path_functions\n'), ((5008, 5062), 'lifelong_rl.samplers.utils.path_functions.calculate_returns', 'path_functions.calculate_returns', (['paths', 'self.discount'], {}), '(paths, self.discount)\n', (5040, 5062), True, 'import lifelong_rl.samplers.utils.path_functions as path_functions\n'), ((5075, 5180), 'lifelong_rl.samplers.utils.path_functions.calculate_advantages', 'path_functions.calculate_advantages', (['paths', 'self.discount', 'self.gae_lambda', 'self.normalize_advantages'], {}), '(paths, self.discount, self.gae_lambda,\n self.normalize_advantages)\n', (5110, 5180), True, 'import lifelong_rl.samplers.utils.path_functions as path_functions\n'), ((3873, 3935), 'numpy.clip', 'np.clip', (["(path['rewards'] / (self._reward_std + 0.001))", '(-10)', '(10)'], {}), "(path['rewards'] / (self._reward_std + 0.001), -10, 10)\n", (3880, 3935), True, 'import numpy as np\n'), ((5325, 5366), 'numpy.append', 'np.append', (['advantages', "path['advantages']"], {}), "(advantages, path['advantages'])\n", (5334, 5366), True, 'import numpy as np\n'), ((5393, 5428), 'numpy.append', 'np.append', (['returns', "path['returns']"], {}), "(returns, path['returns'])\n", (5402, 5428), True, 'import numpy as np\n'), ((6529, 6544), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6542, 6544), False, 'import torch\n'), ((7122, 7196), 'lifelong_rl.util.pythonplusplus.sample_batch', 'ppp.sample_batch', (['self.value_batch_size'], {'observations': 'obs', 'targets': 'returns'}), '(self.value_batch_size, observations=obs, targets=returns)\n', (7138, 7196), True, 'import lifelong_rl.util.pythonplusplus as ppp\n'), ((7725, 7799), 'lifelong_rl.util.pythonplusplus.sample_batch', 'ppp.sample_batch', (['self.value_batch_size'], {'observations': 'obs', 'targets': 'returns'}), '(self.value_batch_size, observations=obs, targets=returns)\n', (7741, 7799), True, 'import lifelong_rl.util.pythonplusplus as ppp\n'), ((7999, 8014), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8012, 8014), False, 'import torch\n'), ((8204, 8225), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['values'], {}), '(values)\n', (8217, 8225), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((8525, 8551), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (8538, 8551), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((8903, 8954), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Advantages"""', 'advantages'], {}), "('Advantages', advantages)\n", (8928, 8954), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((9043, 9088), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Returns"""', 'returns'], {}), "('Returns', returns)\n", (9068, 9088), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((9544, 9605), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Value Squared Errors"""', 'value_loss'], {}), "('Value Squared Errors', value_loss)\n", (9569, 9605), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((5519, 5534), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5532, 5534), False, 'import torch\n'), ((5648, 5669), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['values'], {}), '(values)\n', (5661, 5669), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((6194, 6296), 'lifelong_rl.util.pythonplusplus.sample_batch', 'ppp.sample_batch', (['self.policy_batch_size'], {'observations': 'obs', 'actions': 'actions', 'advantages': 'advantages'}), '(self.policy_batch_size, observations=obs, actions=actions,\n advantages=advantages)\n', (6210, 6296), True, 'import lifelong_rl.util.pythonplusplus as ppp\n'), ((8306, 8320), 'numpy.var', 'np.var', (['errors'], {}), '(errors)\n', (8312, 8320), True, 'import numpy as np\n'), ((8323, 8338), 'numpy.var', 'np.var', (['returns'], {}), '(returns)\n', (8329, 8338), True, 'import numpy as np\n'), ((8604, 8630), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (8617, 8630), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((8690, 8707), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['kl'], {}), '(kl)\n', (8703, 8707), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((8825, 8846), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['log_pi'], {}), '(log_pi)\n', (8838, 8846), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((9466, 9487), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['values'], {}), '(values)\n', (9479, 9487), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((8472, 8498), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (8485, 8498), True, 'import lifelong_rl.torch.pytorch_util as ptu\n')] |
# -*- coding: utf-8 -*-
'''
Author: <NAME> <<EMAIL>>
Date: 2012-08-25
This example file implements 5 variations of the negative binomial regression
model for count data: NB-P, NB-1, NB-2, geometric and left-truncated.
The NBin class inherits from the GenericMaximumLikelihood statsmodels class
which provides automatic numerical differentiation for the score and hessian.
NB-1, NB-2 and geometric are implemented as special cases of the NB-P model
described in Greene (2008) Functional forms for the negative binomial model for
count data. Economics Letters, v99n3.
Tests are included to check how NB-1, NB-2 and geometric coefficient estimates
compare to equivalent models in R. Results usually agree up to the 4th digit.
The NB-P and left-truncated model results have not been compared to other
implementations. Note that NB-P appears to only have been implemented in the
LIMDEP software.
'''
import numpy as np
from numpy.testing import assert_almost_equal
from scipy.special import digamma
from scipy.stats import nbinom
import pandas
import patsy
from statsmodels.compat.python import urlopen
from statsmodels.base.model import GenericLikelihoodModel
from statsmodels.base.model import GenericLikelihoodModelResults
#### Negative Binomial Log-likelihoods ####
def _ll_nbp(y, X, beta, alph, Q):
r'''
Negative Binomial Log-likelihood -- type P
References:
Greene, W. 2008. "Functional forms for the negtive binomial model
for count data". Economics Letters. Volume 99, Number 3, pp.585-590.
<NAME>. 2011. "Negative binomial regression". Cambridge University Press.
Following notation in Greene (2008), with negative binomial heterogeneity
parameter :math:`\alpha`:
.. math::
\lambda_i = exp(X\beta)\\
\theta = 1 / \alpha \\
g_i = \theta \lambda_i^Q \\
w_i = g_i/(g_i + \lambda_i) \\
r_i = \theta / (\theta+\lambda_i) \\
ln \mathcal{L}_i = ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)
'''
mu = np.exp(np.dot(X, beta))
size = 1/alph*mu**Q
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
def _ll_nb1(y, X, beta, alph):
'''Negative Binomial regression (type 1 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=1)
return ll
def _ll_nb2(y, X, beta, alph):
'''Negative Binomial regression (type 2 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=0)
return ll
def _ll_geom(y, X, beta):
'''Geometric regression'''
ll = _ll_nbp(y, X, beta, alph=1, Q=0)
return ll
def _ll_nbt(y, X, beta, alph, C=0):
r'''
Negative Binomial (truncated)
Truncated densities for count models (Cameron & Trivedi, 2005, 680):
.. math::
f(y|\beta, y \geq C+1) = \frac{f(y|\beta)}{1-F(C|\beta)}
'''
Q = 0
mu = np.exp(np.dot(X, beta))
size = 1/alph*mu**Q
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob) - np.log(1 - nbinom.cdf(C, size, prob))
return ll
#### Model Classes ####
class NBin(GenericLikelihoodModel):
'''
Negative Binomial regression
Parameters
----------
endog : array-like
1-d array of the response variable.
exog : array-like
`exog` is an n x p array where n is the number of observations and p
is the number of regressors including the intercept if one is
included in the data.
ll_type: string
log-likelihood type
`nb2`: Negative Binomial type-2 (most common)
`nb1`: Negative Binomial type-1
`nbp`: Negative Binomial type-P (Greene, 2008)
`nbt`: Left-truncated Negative Binomial (type-2)
`geom`: Geometric regression model
C: integer
Cut-point for `nbt` model
'''
def __init__(self, endog, exog, ll_type='nb2', C=0, **kwds):
self.exog = np.array(exog)
self.endog = np.array(endog)
self.C = C
super(NBin, self).__init__(endog, exog, **kwds)
# Check user input
if ll_type not in ['nb2', 'nb1', 'nbp', 'nbt', 'geom']:
raise NameError('Valid ll_type are: nb2, nb1, nbp, nbt, geom')
self.ll_type = ll_type
# Starting values (assumes first column of exog is constant)
if ll_type == 'geom':
self.start_params_default = np.zeros(self.exog.shape[1])
elif ll_type == 'nbp':
# Greene recommends starting NB-P at NB-2
start_mod = NBin(endog, exog, 'nb2')
start_res = start_mod.fit(disp=False)
self.start_params_default = np.append(start_res.params, 0)
else:
self.start_params_default = np.append(np.zeros(self.exog.shape[1]), .5)
self.start_params_default[0] = np.log(self.endog.mean())
# Define loglik based on ll_type argument
if ll_type == 'nb1':
self.ll_func = _ll_nb1
elif ll_type == 'nb2':
self.ll_func = _ll_nb2
elif ll_type == 'geom':
self.ll_func = _ll_geom
elif ll_type == 'nbp':
self.ll_func = _ll_nbp
elif ll_type == 'nbt':
self.ll_func = _ll_nbt
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:self.exog.shape[1]]
if self.ll_type == 'geom':
return -self.ll_func(self.endog, self.exog, beta)
elif self.ll_type == 'nbt':
return -self.ll_func(self.endog, self.exog, beta, alph, self.C)
elif self.ll_type == 'nbp':
Q = params[-2]
return -self.ll_func(self.endog, self.exog, beta, alph, Q)
else:
return -self.ll_func(self.endog, self.exog, beta, alph)
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
if start_params is None:
countfit = super(NBin, self).fit(start_params=self.start_params_default,
maxiter=maxiter, maxfun=maxfun, **kwds)
else:
countfit = super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun, **kwds)
countfit = CountResults(self, countfit)
return countfit
class CountResults(GenericLikelihoodModelResults):
def __init__(self, model, mlefit):
self.model = model
self.__dict__.update(mlefit.__dict__)
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', ['MLE']),
('Date:', None),
('Time:', None),
('Converged:', ["%s" % self.mle_retvals['converged']])]
top_right = [('No. Observations:', None),
('Log-Likelihood:', None),
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=True)
return smry
#### Score function for NB-P ####
def _score_nbp(y, X, beta, thet, Q):
r'''
Negative Binomial Score -- type P likelihood from Greene (2007)
.. math::
\lambda_i = exp(X\beta)\\
g_i = \theta \lambda_i^Q \\
w_i = g_i/(g_i + \lambda_i) \\
r_i = \theta / (\theta+\lambda_i) \\
A_i = \left [ \Psi(y_i+g_i) - \Psi(g_i) + ln w_i \right ] \\
B_i = \left [ g_i (1-w_i) - y_iw_i \right ] \\
\partial ln \mathcal{L}_i / \partial
\begin{pmatrix} \lambda_i \\ \theta \\ Q \end{pmatrix}=
[A_i+B_i]
\begin{pmatrix} Q/\lambda_i \\ 1/\theta \\ ln(\lambda_i) \end{pmatrix}
-B_i
\begin{pmatrix} 1/\lambda_i\\ 0 \\ 0 \end{pmatrix} \\
\frac{\partial \lambda}{\partial \beta} = \lambda_i \mathbf{x}_i \\
\frac{\partial \mathcal{L}_i}{\partial \beta} =
\left (\frac{\partial\mathcal{L}_i}{\partial \lambda_i} \right )
\frac{\partial \lambda_i}{\partial \beta}
'''
lamb = np.exp(np.dot(X, beta))
g = thet * lamb**Q
w = g / (g + lamb)
r = thet / (thet+lamb)
A = digamma(y+g) - digamma(g) + np.log(w)
B = g*(1-w) - y*w
dl = (A+B) * Q/lamb - B * 1/lamb
dt = (A+B) * 1/thet
dq = (A+B) * np.log(lamb)
db = X * (dl * lamb)[:,np.newaxis]
sc = np.array([dt.sum(), dq.sum()])
sc = np.concatenate([db.sum(axis=0), sc])
return sc
#### Tests ####
medpar = pandas.read_csv(urlopen('https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/medpar.csv'))
mdvis = pandas.read_csv(urlopen('https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/mdvis.csv'))
# NB-2
'''
# R v2.15.1
library(MASS)
library(COUNT)
data(medpar)
f <- los~factor(type)+hmo+white
mod <- glm.nb(f, medpar)
summary(mod)
Call:
glm.nb(formula = f, data = medpar, init.theta = 2.243376203,
link = log)
Deviance Residuals:
Min 1Q Median 3Q Max
-2.4671 -0.9090 -0.2693 0.4320 3.8668
Coefficients:
Estimate Std. Error z value Pr(>|z|)
(Intercept) 2.31028 0.06745 34.253 < 2e-16 ***
factor(type)2 0.22125 0.05046 4.385 1.16e-05 ***
factor(type)3 0.70616 0.07600 9.292 < 2e-16 ***
hmo -0.06796 0.05321 -1.277 0.202
white -0.12907 0.06836 -1.888 0.059 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
(Dispersion parameter for Negative Binomial(2.2434) family taken to be 1)
Null deviance: 1691.1 on 1494 degrees of freedom
Residual deviance: 1568.1 on 1490 degrees of freedom
AIC: 9607
Number of Fisher Scoring iterations: 1
Theta: 2.2434
Std. Err.: 0.0997
2 x log-likelihood: -9594.9530
'''
def test_nb2():
y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar)
y = np.array(y)[:,0]
nb2 = NBin(y,X,'nb2').fit(maxiter=10000, maxfun=5000)
assert_almost_equal(nb2.params,
[2.31027893349935, 0.221248978197356, 0.706158824346228,
-0.067955221930748, -0.129065442248951, 0.4457567],
decimal=2)
# NB-1
'''
# R v2.15.1
# COUNT v1.2.3
library(COUNT)
data(medpar)
f <- los~factor(type)+hmo+white
ml.nb1(f, medpar)
Estimate SE Z LCL UCL
(Intercept) 2.34918407 0.06023641 38.9994023 2.23112070 2.46724744
factor(type)2 0.16175471 0.04585569 3.5274735 0.07187757 0.25163186
factor(type)3 0.41879257 0.06553258 6.3906006 0.29034871 0.54723643
hmo -0.04533566 0.05004714 -0.9058592 -0.14342805 0.05275673
white -0.12951295 0.06071130 -2.1332593 -0.24850710 -0.01051880
alpha 4.57898241 0.22015968 20.7984603 4.14746943 5.01049539
'''
#def test_nb1():
#y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar)
#y = np.array(y)[:,0]
## TODO: Test fails with some of the other optimization methods
#nb1 = NBin(y,X,'nb1').fit(method='ncg', maxiter=10000, maxfun=5000)
#assert_almost_equal(nb1.params,
#[2.34918407014186, 0.161754714412848, 0.418792569970658,
# -0.0453356614650342, -0.129512952033423, 4.57898241219275],
#decimal=2)
# NB-Geometric
'''
MASS v7.3-20
R v2.15.1
library(MASS)
data(medpar)
f <- los~factor(type)+hmo+white
mod <- glm(f, family=negative.binomial(1), data=medpar)
summary(mod)
Call:
glm(formula = f, family = negative.binomial(1), data = medpar)
Deviance Residuals:
Min 1Q Median 3Q Max
-1.7942 -0.6545 -0.1896 0.3044 2.6844
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 2.30849 0.07071 32.649 < 2e-16 ***
factor(type)2 0.22121 0.05283 4.187 2.99e-05 ***
factor(type)3 0.70599 0.08092 8.724 < 2e-16 ***
hmo -0.06779 0.05521 -1.228 0.2197
white -0.12709 0.07169 -1.773 0.0765 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
(Dispersion parameter for Negative Binomial(1) family taken to be 0.5409721)
Null deviance: 872.29 on 1494 degrees of freedom
Residual deviance: 811.95 on 1490 degrees of freedom
AIC: 9927.3
Number of Fisher Scoring iterations: 5
'''
#def test_geom():
#y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar)
#y = np.array(y)[:,0]
## TODO: remove alph from geom params
#geom = NBin(y,X,'geom').fit(maxiter=10000, maxfun=5000)
#assert_almost_equal(geom.params,
#[2.3084850946241, 0.221206159108742, 0.705986369841159,
# -0.0677871843613577, -0.127088772164963],
#decimal=4)
test_nb2()
| [
"scipy.special.digamma",
"statsmodels.compat.python.urlopen",
"numpy.log",
"numpy.append",
"numpy.testing.assert_almost_equal",
"numpy.dot",
"scipy.stats.nbinom.logpmf",
"numpy.array",
"numpy.zeros",
"scipy.stats.nbinom.cdf",
"patsy.dmatrices",
"statsmodels.iolib.summary.Summary"
] | [((2112, 2140), 'scipy.stats.nbinom.logpmf', 'nbinom.logpmf', (['y', 'size', 'prob'], {}), '(y, size, prob)\n', (2125, 2140), False, 'from scipy.stats import nbinom\n'), ((8878, 8982), 'statsmodels.compat.python.urlopen', 'urlopen', (['"""https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/medpar.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/medpar.csv'\n )\n", (8885, 8982), False, 'from statsmodels.compat.python import urlopen\n'), ((8998, 9101), 'statsmodels.compat.python.urlopen', 'urlopen', (['"""https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/mdvis.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/mdvis.csv'\n )\n", (9005, 9101), False, 'from statsmodels.compat.python import urlopen\n'), ((10185, 10239), 'patsy.dmatrices', 'patsy.dmatrices', (['"""los ~ C(type) + hmo + white"""', 'medpar'], {}), "('los ~ C(type) + hmo + white', medpar)\n", (10200, 10239), False, 'import patsy\n'), ((10327, 10487), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nb2.params', '[2.31027893349935, 0.221248978197356, 0.706158824346228, -0.067955221930748,\n -0.129065442248951, 0.4457567]'], {'decimal': '(2)'}), '(nb2.params, [2.31027893349935, 0.221248978197356, \n 0.706158824346228, -0.067955221930748, -0.129065442248951, 0.4457567],\n decimal=2)\n', (10346, 10487), False, 'from numpy.testing import assert_almost_equal\n'), ((2036, 2051), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2042, 2051), True, 'import numpy as np\n'), ((2832, 2847), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2838, 2847), True, 'import numpy as np\n'), ((2908, 2936), 'scipy.stats.nbinom.logpmf', 'nbinom.logpmf', (['y', 'size', 'prob'], {}), '(y, size, prob)\n', (2921, 2936), False, 'from scipy.stats import nbinom\n'), ((3830, 3844), 'numpy.array', 'np.array', (['exog'], {}), '(exog)\n', (3838, 3844), True, 'import numpy as np\n'), ((3866, 3881), 'numpy.array', 'np.array', (['endog'], {}), '(endog)\n', (3874, 3881), True, 'import numpy as np\n'), ((7065, 7074), 'statsmodels.iolib.summary.Summary', 'Summary', ([], {}), '()\n', (7072, 7074), False, 'from statsmodels.iolib.summary import Summary\n'), ((8447, 8462), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (8453, 8462), True, 'import numpy as np\n'), ((8573, 8582), 'numpy.log', 'np.log', (['w'], {}), '(w)\n', (8579, 8582), True, 'import numpy as np\n'), ((8683, 8695), 'numpy.log', 'np.log', (['lamb'], {}), '(lamb)\n', (8689, 8695), True, 'import numpy as np\n'), ((10248, 10259), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (10256, 10259), True, 'import numpy as np\n'), ((4294, 4322), 'numpy.zeros', 'np.zeros', (['self.exog.shape[1]'], {}), '(self.exog.shape[1])\n', (4302, 4322), True, 'import numpy as np\n'), ((8545, 8559), 'scipy.special.digamma', 'digamma', (['(y + g)'], {}), '(y + g)\n', (8552, 8559), False, 'from scipy.special import digamma\n'), ((8560, 8570), 'scipy.special.digamma', 'digamma', (['g'], {}), '(g)\n', (8567, 8570), False, 'from scipy.special import digamma\n'), ((2950, 2975), 'scipy.stats.nbinom.cdf', 'nbinom.cdf', (['C', 'size', 'prob'], {}), '(C, size, prob)\n', (2960, 2975), False, 'from scipy.stats import nbinom\n'), ((4547, 4577), 'numpy.append', 'np.append', (['start_res.params', '(0)'], {}), '(start_res.params, 0)\n', (4556, 4577), True, 'import numpy as np\n'), ((4642, 4670), 'numpy.zeros', 'np.zeros', (['self.exog.shape[1]'], {}), '(self.exog.shape[1])\n', (4650, 4670), True, 'import numpy as np\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.memlet import Memlet
import dace.libraries.mpi as mpi
import numpy as np
import pytest
###############################################################################
def make_sdfg(dtype):
n = dace.symbol("n")
p = dace.symbol("p")
sdfg = dace.SDFG("mpi_scatter")
state = sdfg.add_state("dataflow")
sdfg.add_array("inbuf", [n * p], dtype, transient=False)
sdfg.add_array("outbuf", [n], dtype, transient=False)
sdfg.add_array("root", [1], dace.dtypes.int32, transient=False)
inbuf = state.add_access("inbuf")
outbuf = state.add_access("outbuf")
root = state.add_access("root")
scatter_node = mpi.nodes.scatter.Scatter("scatter")
state.add_memlet_path(inbuf,
scatter_node,
dst_conn="_inbuffer",
memlet=Memlet.simple(inbuf, "0:n*p", num_accesses=n))
state.add_memlet_path(root,
scatter_node,
dst_conn="_root",
memlet=Memlet.simple(root, "0:1", num_accesses=1))
state.add_memlet_path(scatter_node,
outbuf,
src_conn="_outbuffer",
memlet=Memlet.simple(outbuf, "0:n", num_accesses=1))
return sdfg
###############################################################################
@pytest.mark.parametrize("implementation, dtype", [
pytest.param("MPI", dace.float32, marks=pytest.mark.mpi),
pytest.param("MPI", dace.float64, marks=pytest.mark.mpi)
])
def test_mpi(implementation, dtype):
from mpi4py import MPI as MPI4PY
np_dtype = getattr(np, dtype.to_string())
comm = MPI4PY.COMM_WORLD
rank = comm.Get_rank()
commsize = comm.Get_size()
mpi_sdfg = None
if commsize < 2:
raise ValueError(
"This test is supposed to be run with at least two processes!")
for r in range(0, commsize):
if r == rank:
sdfg = make_sdfg(dtype)
mpi_sdfg = sdfg.compile()
comm.Barrier()
size = 8
A = np.full(size * commsize, 7, dtype=np_dtype)
B = np.full(size, 42, dtype=np_dtype)
root = np.array([0], dtype=np.int32)
mpi_sdfg(inbuf=A, outbuf=B, root=root, n=size, p=commsize)
# now B should be an array of size, containing 0
if not np.allclose(B, np.full(size, 7, dtype=np_dtype)):
raise (ValueError("The received values are not what I expected."))
###############################################################################
N = dace.symbol('N', dtype=dace.int64)
P = dace.symbol('P', dtype=dace.int64)
@dace.program
def dace_scatter_gather(A: dace.float32[N * P]):
tmp = np.empty_like(A, shape=[N])
dace.comm.Scatter(A, tmp, root=0)
tmp[:] = np.pi
dace.comm.Gather(tmp, A, root=0)
@pytest.mark.mpi
def test_dace_scatter_gather():
from mpi4py import MPI as MPI4PY
comm = MPI4PY.COMM_WORLD
rank = comm.Get_rank()
commsize = comm.Get_size()
mpi_sdfg = None
if commsize < 2:
raise ValueError(
"This test is supposed to be run with at least two processes!")
for r in range(0, commsize):
if r == rank:
mpi_sdfg = dace_scatter_gather.compile()
comm.Barrier()
length = 128
if rank == 0:
A = np.full([length * commsize], np.pi, dtype=np.float32)
else:
A = np.random.randn(length * commsize).astype(np.float32)
mpi_sdfg(A=A, N=length, P=commsize)
if rank == 0:
assert (np.allclose(
A, np.full([length * commsize], np.pi, dtype=np.float32)))
else:
assert (True)
###############################################################################
if __name__ == "__main__":
test_mpi("MPI", dace.float32)
test_mpi("MPI", dace.float64)
test_dace_scatter_gather()
###############################################################################
| [
"dace.memlet.Memlet.simple",
"dace.symbol",
"pytest.param",
"numpy.array",
"dace.libraries.mpi.nodes.scatter.Scatter",
"dace.SDFG",
"numpy.empty_like",
"numpy.full",
"dace.comm.Scatter",
"numpy.random.randn",
"dace.comm.Gather"
] | [((2641, 2675), 'dace.symbol', 'dace.symbol', (['"""N"""'], {'dtype': 'dace.int64'}), "('N', dtype=dace.int64)\n", (2652, 2675), False, 'import dace\n'), ((2680, 2714), 'dace.symbol', 'dace.symbol', (['"""P"""'], {'dtype': 'dace.int64'}), "('P', dtype=dace.int64)\n", (2691, 2714), False, 'import dace\n'), ((299, 315), 'dace.symbol', 'dace.symbol', (['"""n"""'], {}), "('n')\n", (310, 315), False, 'import dace\n'), ((324, 340), 'dace.symbol', 'dace.symbol', (['"""p"""'], {}), "('p')\n", (335, 340), False, 'import dace\n'), ((353, 377), 'dace.SDFG', 'dace.SDFG', (['"""mpi_scatter"""'], {}), "('mpi_scatter')\n", (362, 377), False, 'import dace\n'), ((738, 774), 'dace.libraries.mpi.nodes.scatter.Scatter', 'mpi.nodes.scatter.Scatter', (['"""scatter"""'], {}), "('scatter')\n", (763, 774), True, 'import dace.libraries.mpi as mpi\n'), ((2175, 2218), 'numpy.full', 'np.full', (['(size * commsize)', '(7)'], {'dtype': 'np_dtype'}), '(size * commsize, 7, dtype=np_dtype)\n', (2182, 2218), True, 'import numpy as np\n'), ((2227, 2260), 'numpy.full', 'np.full', (['size', '(42)'], {'dtype': 'np_dtype'}), '(size, 42, dtype=np_dtype)\n', (2234, 2260), True, 'import numpy as np\n'), ((2272, 2301), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (2280, 2301), True, 'import numpy as np\n'), ((2790, 2817), 'numpy.empty_like', 'np.empty_like', (['A'], {'shape': '[N]'}), '(A, shape=[N])\n', (2803, 2817), True, 'import numpy as np\n'), ((2822, 2855), 'dace.comm.Scatter', 'dace.comm.Scatter', (['A', 'tmp'], {'root': '(0)'}), '(A, tmp, root=0)\n', (2839, 2855), False, 'import dace\n'), ((2879, 2911), 'dace.comm.Gather', 'dace.comm.Gather', (['tmp', 'A'], {'root': '(0)'}), '(tmp, A, root=0)\n', (2895, 2911), False, 'import dace\n'), ((1529, 1585), 'pytest.param', 'pytest.param', (['"""MPI"""', 'dace.float32'], {'marks': 'pytest.mark.mpi'}), "('MPI', dace.float32, marks=pytest.mark.mpi)\n", (1541, 1585), False, 'import pytest\n'), ((1591, 1647), 'pytest.param', 'pytest.param', (['"""MPI"""', 'dace.float64'], {'marks': 'pytest.mark.mpi'}), "('MPI', dace.float64, marks=pytest.mark.mpi)\n", (1603, 1647), False, 'import pytest\n'), ((3409, 3462), 'numpy.full', 'np.full', (['[length * commsize]', 'np.pi'], {'dtype': 'np.float32'}), '([length * commsize], np.pi, dtype=np.float32)\n', (3416, 3462), True, 'import numpy as np\n'), ((930, 975), 'dace.memlet.Memlet.simple', 'Memlet.simple', (['inbuf', '"""0:n*p"""'], {'num_accesses': 'n'}), "(inbuf, '0:n*p', num_accesses=n)\n", (943, 975), False, 'from dace.memlet import Memlet\n'), ((1126, 1168), 'dace.memlet.Memlet.simple', 'Memlet.simple', (['root', '"""0:1"""'], {'num_accesses': '(1)'}), "(root, '0:1', num_accesses=1)\n", (1139, 1168), False, 'from dace.memlet import Memlet\n'), ((1326, 1370), 'dace.memlet.Memlet.simple', 'Memlet.simple', (['outbuf', '"""0:n"""'], {'num_accesses': '(1)'}), "(outbuf, '0:n', num_accesses=1)\n", (1339, 1370), False, 'from dace.memlet import Memlet\n'), ((2444, 2476), 'numpy.full', 'np.full', (['size', '(7)'], {'dtype': 'np_dtype'}), '(size, 7, dtype=np_dtype)\n', (2451, 2476), True, 'import numpy as np\n'), ((3643, 3696), 'numpy.full', 'np.full', (['[length * commsize]', 'np.pi'], {'dtype': 'np.float32'}), '([length * commsize], np.pi, dtype=np.float32)\n', (3650, 3696), True, 'import numpy as np\n'), ((3485, 3519), 'numpy.random.randn', 'np.random.randn', (['(length * commsize)'], {}), '(length * commsize)\n', (3500, 3519), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
def stride_times(Accel,fs,plot=False):
#split filt_signal in windows of 12.8s size, see Activity recognition using a single accelerometer placed at the wrist or ankle
window_size = int(12.8 * fs)
def windows_nonoverlap (lista,m):
y = []
a = m
k=0
for i in range (int(len(lista)/m)+1):
x = lista[k:m]
y.append(x)
k = k + a
m = m + a
return y
acc_windows = windows_nonoverlap(Accel,window_size)
#detect peaks in each window based on walking velocity (slow,normal,fast)
peaks = []
for window in acc_windows:
if np.mean(window) <= 1.7:
peaks_slow, _ = find_peaks(window, distance = 14, prominence = 0.2)
peaks.append(peaks_slow)
elif 1.7 < np.mean(window) <= 2.2:
peaks_normal, _ = find_peaks(window, distance = 12, prominence = 0.25)
peaks.append(peaks_normal)
else:
peaks_fast, _ = find_peaks(window, distance = 10, prominence = 0.35)
peaks.append(peaks_fast)
#set the index of peaks in order to match the respective number of frame of the time-series
k = 0
index = len(acc_windows[k])
for p in range(1,len(peaks)):
peaks[p] = peaks[p] + index
k = k + 1
index = index + len(acc_windows[k])
#concatenate arrays with peaks indexes in one array
concat_peaks = np.empty_like(peaks[0])
for p in range (len(peaks)):
concat_peaks = np.concatenate((concat_peaks,peaks[p]))
steps = concat_peaks[len(peaks[0]):]
stride_times = []
if len(steps)//2 != 0:
for f in range(0,len(steps)-2,2):
time_diff = (steps[f+2] - steps[f])/fs
stride_times.append(time_diff)
else:
for f in range(0,len(steps)-3,2):
time_diff = (steps[f+2] - steps[f])/fs
stride_times.append(time_diff)
if plot == True:
plt.plot(np.array(range(len(stride_times))),np.array(stride_times))
plt.title("Stride Time Variability")
plt.xlabel("Stride Number")
plt.ylabel("Stride Time (s)")
plt.show()
return len(steps),stride_times
if __name__ == "__main__":
#import pilot data
df = pd.read_csv(r"C:\Users\Σπύρος\Documents\ΣΠΥΡΟΣ\Pycharm Projects\Parkinson dfa app\Android-Sensor-Stride\code\python scripts\pilot data acc spyros.csv",sep = ",",usecols = [0,1,2,3],nrows = 8721)
time = df["time"].values
Ax = df["ax (m/s^2)"].values
Ay = df["ay (m/s^2)"].values
Az = df["az (m/s^2)"].values
Atotal = (np.sqrt(Ax**2 + Ay**2 + Az**2))/9.81 #Atotal unit = 1g = 9.81
print(stride_times(Atotal,fs=208,plot=True))
| [
"numpy.mean",
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.empty_like",
"numpy.concatenate",
"scipy.signal.find_peaks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1537, 1560), 'numpy.empty_like', 'np.empty_like', (['peaks[0]'], {}), '(peaks[0])\n', (1550, 1560), True, 'import numpy as np\n'), ((2380, 2593), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Σπύρος\\\\Documents\\\\ΣΠΥΡΟΣ\\\\Pycharm Projects\\\\Parkinson dfa app\\\\Android-Sensor-Stride\\\\code\\\\python scripts\\\\pilot data acc spyros.csv"""'], {'sep': '""","""', 'usecols': '[0, 1, 2, 3]', 'nrows': '(8721)'}), "(\n 'C:\\\\Users\\\\Σπύρος\\\\Documents\\\\ΣΠΥΡΟΣ\\\\Pycharm Projects\\\\Parkinson dfa app\\\\Android-Sensor-Stride\\\\code\\\\python scripts\\\\pilot data acc spyros.csv'\n , sep=',', usecols=[0, 1, 2, 3], nrows=8721)\n", (2391, 2593), True, 'import pandas as pd\n'), ((1627, 1667), 'numpy.concatenate', 'np.concatenate', (['(concat_peaks, peaks[p])'], {}), '((concat_peaks, peaks[p]))\n', (1641, 1667), True, 'import numpy as np\n'), ((2148, 2184), 'matplotlib.pyplot.title', 'plt.title', (['"""Stride Time Variability"""'], {}), "('Stride Time Variability')\n", (2157, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2220), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Stride Number"""'], {}), "('Stride Number')\n", (2203, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2258), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stride Time (s)"""'], {}), "('Stride Time (s)')\n", (2239, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2267, 2277), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2275, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2753), 'numpy.sqrt', 'np.sqrt', (['(Ax ** 2 + Ay ** 2 + Az ** 2)'], {}), '(Ax ** 2 + Ay ** 2 + Az ** 2)\n', (2724, 2753), True, 'import numpy as np\n'), ((747, 762), 'numpy.mean', 'np.mean', (['window'], {}), '(window)\n', (754, 762), True, 'import numpy as np\n'), ((800, 847), 'scipy.signal.find_peaks', 'find_peaks', (['window'], {'distance': '(14)', 'prominence': '(0.2)'}), '(window, distance=14, prominence=0.2)\n', (810, 847), False, 'from scipy.signal import find_peaks\n'), ((2116, 2138), 'numpy.array', 'np.array', (['stride_times'], {}), '(stride_times)\n', (2124, 2138), True, 'import numpy as np\n'), ((910, 925), 'numpy.mean', 'np.mean', (['window'], {}), '(window)\n', (917, 925), True, 'import numpy as np\n'), ((964, 1012), 'scipy.signal.find_peaks', 'find_peaks', (['window'], {'distance': '(12)', 'prominence': '(0.25)'}), '(window, distance=12, prominence=0.25)\n', (974, 1012), False, 'from scipy.signal import find_peaks\n'), ((1099, 1147), 'scipy.signal.find_peaks', 'find_peaks', (['window'], {'distance': '(10)', 'prominence': '(0.35)'}), '(window, distance=10, prominence=0.35)\n', (1109, 1147), False, 'from scipy.signal import find_peaks\n')] |
"""
mpld3 Logo Idea
===============
This example shows how mpld3 can be used to generate relatively intricate
vector graphics in the browser. This is an adaptation of a logo proposal by
github user debjan, in turn based on both the matplotlib and D3js logos.
"""
# Author: <NAME>
import matplotlib.pyplot as plt
from matplotlib import image, patches, colors
from matplotlib.colors import colorConverter
import numpy as np
import mpld3
imsize = np.array([319, 217])
center = [108.5, 108.5]
max_radius = 108.5
radii = np.linspace(16, max_radius, 5)
angles = np.arange(0, 360, 45)
fig = plt.figure(figsize=imsize / 50.)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[])
# Create a clip path for the elements
clip_path = patches.Rectangle((0, 0), imsize[0], imsize[1],
transform=ax.transData)
# Create the background gradient
x = np.array([0, 104, 196, 300])
y = np.linspace(150, 450, 86)[:, None]
c = np.cos(-np.pi / 4)
s = np.sin(-np.pi / 4)
X, Y = (c * x - s * y) - 116, (s * x + c * y)
C = np.arange(255).reshape((3, 85)).T
C = C[::-1, :]
cmap = colors.LinearSegmentedColormap.from_list("mpld3",
[[0.97, 0.6, 0.29],
[0.97, 0.59, 0.27],
[0.97, 0.58, 0.25],
[0.95, 0.44, 0.34],
[0.92, 0.51, 0.29],
[0.68, 0.21, 0.20]])
mesh = ax.pcolormesh(X, Y, C, cmap=cmap, shading='gourand', zorder=0)
mesh.set_clip_path(clip_path)
# cut-off the background to form the "D" and "3" using white patches
# (this could also be done with a clip path)
kwargs = dict(fc='white', ec='none', zorder=1)
ax.add_patch(patches.Rectangle([0, 0], center[0], imsize[1], **kwargs))
ax.add_patch(patches.Circle(center, radii[2], **kwargs))
ax.add_patch(patches.Wedge(center, 127, -90, 90, width=18.5, **kwargs))
ax.add_patch(patches.Circle((252, 66), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 48], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 66), 101, -90, 40.1, width=35, **kwargs))
ax.add_patch(patches.Circle((252, 151), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 133], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 151), 101, -40.1, 90, width=35, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 200, 617, **kwargs))
ax.add_patch(patches.Rectangle([-200, imsize[1]], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([imsize[0], -200], 200, 617, **kwargs))
# plot circles and lines
for radius in radii:
ax.add_patch(patches.Circle(center, radius, lw=0.5,
ec='gray', fc='none', zorder=2))
for angle in angles:
dx, dy = np.sin(np.radians(angle)), np.cos(np.radians(angle))
ax.plot([max_radius * (1 - dx), max_radius * (1 + dx)],
[max_radius * (1 - dy), max_radius * (1 + dy)],
'-', color='gray', lw=0.5, zorder=2)
# plot wedges within the graph
wedges = [(98, 231, 258, '#FF6600'),
(85, 170, 205, '#FFC500'),
(60, 80, 103, '#7DFF78'),
(96, 45, 58, '#FD7C1A'),
(73, 291, 308, '#CCFF28'),
(47, 146, 155, '#28FFCC'),
(25, 340, 360, '#004AFF')]
for (radius, theta1, theta2, color) in wedges:
ax.add_patch(patches.Wedge(center, radius, theta1, theta2,
fc=color, ec='black', alpha=0.6, zorder=3))
for patch in ax.patches:
patch.set_clip_path(clip_path)
ax.set_xlim(0, imsize[0])
ax.set_ylim(imsize[1], 0)
#plt.savefig('mpld3.png')
mpld3.show()
| [
"numpy.radians",
"matplotlib.patches.Rectangle",
"matplotlib.patches.Wedge",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"mpld3.show",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.patches.Circle",
"numpy.arange"
] | [((446, 466), 'numpy.array', 'np.array', (['[319, 217]'], {}), '([319, 217])\n', (454, 466), True, 'import numpy as np\n'), ((518, 548), 'numpy.linspace', 'np.linspace', (['(16)', 'max_radius', '(5)'], {}), '(16, max_radius, 5)\n', (529, 548), True, 'import numpy as np\n'), ((558, 579), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(45)'], {}), '(0, 360, 45)\n', (567, 579), True, 'import numpy as np\n'), ((588, 621), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(imsize / 50.0)'}), '(figsize=imsize / 50.0)\n', (598, 621), True, 'import matplotlib.pyplot as plt\n'), ((741, 812), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(0, 0)', 'imsize[0]', 'imsize[1]'], {'transform': 'ax.transData'}), '((0, 0), imsize[0], imsize[1], transform=ax.transData)\n', (758, 812), False, 'from matplotlib import image, patches, colors\n'), ((881, 909), 'numpy.array', 'np.array', (['[0, 104, 196, 300]'], {}), '([0, 104, 196, 300])\n', (889, 909), True, 'import numpy as np\n'), ((954, 972), 'numpy.cos', 'np.cos', (['(-np.pi / 4)'], {}), '(-np.pi / 4)\n', (960, 972), True, 'import numpy as np\n'), ((977, 995), 'numpy.sin', 'np.sin', (['(-np.pi / 4)'], {}), '(-np.pi / 4)\n', (983, 995), True, 'import numpy as np\n'), ((1102, 1279), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""mpld3"""', '[[0.97, 0.6, 0.29], [0.97, 0.59, 0.27], [0.97, 0.58, 0.25], [0.95, 0.44, \n 0.34], [0.92, 0.51, 0.29], [0.68, 0.21, 0.2]]'], {}), "('mpld3', [[0.97, 0.6, 0.29], [0.97,\n 0.59, 0.27], [0.97, 0.58, 0.25], [0.95, 0.44, 0.34], [0.92, 0.51, 0.29],\n [0.68, 0.21, 0.2]])\n", (1142, 1279), False, 'from matplotlib import image, patches, colors\n'), ((3729, 3741), 'mpld3.show', 'mpld3.show', ([], {}), '()\n', (3739, 3741), False, 'import mpld3\n'), ((914, 939), 'numpy.linspace', 'np.linspace', (['(150)', '(450)', '(86)'], {}), '(150, 450, 86)\n', (925, 939), True, 'import numpy as np\n'), ((1841, 1898), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[0, 0]', 'center[0]', 'imsize[1]'], {}), '([0, 0], center[0], imsize[1], **kwargs)\n', (1858, 1898), False, 'from matplotlib import image, patches, colors\n'), ((1914, 1956), 'matplotlib.patches.Circle', 'patches.Circle', (['center', 'radii[2]'], {}), '(center, radii[2], **kwargs)\n', (1928, 1956), False, 'from matplotlib import image, patches, colors\n'), ((1971, 2028), 'matplotlib.patches.Wedge', 'patches.Wedge', (['center', '(127)', '(-90)', '(90)'], {'width': '(18.5)'}), '(center, 127, -90, 90, width=18.5, **kwargs)\n', (1984, 2028), False, 'from matplotlib import image, patches, colors\n'), ((2044, 2083), 'matplotlib.patches.Circle', 'patches.Circle', (['(252, 66)', '(18)'], {}), '((252, 66), 18, **kwargs)\n', (2058, 2083), False, 'from matplotlib import image, patches, colors\n'), ((2098, 2144), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[216, 48]', '(36)', '(36)'], {}), '([216, 48], 36, 36, **kwargs)\n', (2115, 2144), False, 'from matplotlib import image, patches, colors\n'), ((2159, 2219), 'matplotlib.patches.Wedge', 'patches.Wedge', (['(252, 66)', '(101)', '(-90)', '(40.1)'], {'width': '(35)'}), '((252, 66), 101, -90, 40.1, width=35, **kwargs)\n', (2172, 2219), False, 'from matplotlib import image, patches, colors\n'), ((2235, 2275), 'matplotlib.patches.Circle', 'patches.Circle', (['(252, 151)', '(18)'], {}), '((252, 151), 18, **kwargs)\n', (2249, 2275), False, 'from matplotlib import image, patches, colors\n'), ((2290, 2337), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[216, 133]', '(36)', '(36)'], {}), '([216, 133], 36, 36, **kwargs)\n', (2307, 2337), False, 'from matplotlib import image, patches, colors\n'), ((2352, 2413), 'matplotlib.patches.Wedge', 'patches.Wedge', (['(252, 151)', '(101)', '(-40.1)', '(90)'], {'width': '(35)'}), '((252, 151), 101, -40.1, 90, width=35, **kwargs)\n', (2365, 2413), False, 'from matplotlib import image, patches, colors\n'), ((2429, 2480), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[-200, -200]', '(719)', '(200)'], {}), '([-200, -200], 719, 200, **kwargs)\n', (2446, 2480), False, 'from matplotlib import image, patches, colors\n'), ((2495, 2546), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[-200, -200]', '(200)', '(617)'], {}), '([-200, -200], 200, 617, **kwargs)\n', (2512, 2546), False, 'from matplotlib import image, patches, colors\n'), ((2561, 2617), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[-200, imsize[1]]', '(719)', '(200)'], {}), '([-200, imsize[1]], 719, 200, **kwargs)\n', (2578, 2617), False, 'from matplotlib import image, patches, colors\n'), ((2632, 2688), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['[imsize[0], -200]', '(200)', '(617)'], {}), '([imsize[0], -200], 200, 617, **kwargs)\n', (2649, 2688), False, 'from matplotlib import image, patches, colors\n'), ((2754, 2824), 'matplotlib.patches.Circle', 'patches.Circle', (['center', 'radius'], {'lw': '(0.5)', 'ec': '"""gray"""', 'fc': '"""none"""', 'zorder': '(2)'}), "(center, radius, lw=0.5, ec='gray', fc='none', zorder=2)\n", (2768, 2824), False, 'from matplotlib import image, patches, colors\n'), ((3467, 3560), 'matplotlib.patches.Wedge', 'patches.Wedge', (['center', 'radius', 'theta1', 'theta2'], {'fc': 'color', 'ec': '"""black"""', 'alpha': '(0.6)', 'zorder': '(3)'}), "(center, radius, theta1, theta2, fc=color, ec='black', alpha=\n 0.6, zorder=3)\n", (3480, 3560), False, 'from matplotlib import image, patches, colors\n'), ((1046, 1060), 'numpy.arange', 'np.arange', (['(255)'], {}), '(255)\n', (1055, 1060), True, 'import numpy as np\n'), ((2899, 2916), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2909, 2916), True, 'import numpy as np\n'), ((2926, 2943), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2936, 2943), True, 'import numpy as np\n')] |
import myutil as mu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset # 텐서데이터셋
from torch.utils.data import DataLoader # 데이터로더
from torch.utils.data import Dataset
################################################################################
# - 로지스틱 회귀(Logistic Regression)
# - 일상 속 풀고자하는 많은 문제 중에서는 두 개의 선택지 중에서 정답을 고르는 문제가 많습니다.
# - 예를 들어 시험을 봤는데 이 시험 점수가 합격인지 불합격인지가 궁금할 수도 있고, 어떤 메일을 받았을 때 이게 정상 메일인지 스팸 메일인지를 분류하는 문제도 그렇습니다. 이렇게 둘 중 하나를 결정하는 문제를 이진 분류(Binary Classification)라고 합니다. 그리고 이진 분류를 풀기 위한 대표적인 알고리즘으로 로지스틱 회귀(Logistic Regression)가 있습니다.
# - 로지스틱 회귀는 알고리즘의 이름은 회귀이지만 실제로는 분류(Classification) 작업에 사용할 수 있습니다.
################################################################################
# - 시그모이드 함수(Sigmoid function)
# - 위와 같이 S자 형태로 그래프를 그려주는 시그모이드 함수의 방정식은 아래와 같습니다.
# 
# - 선형 회귀에서는 최적의 W와 b를 찾는 것이 목표였습니다.
# - 여기서도 마찬가지입니다.
# - 선형 회귀에서는 W가 직선의 기울기, b가 y절편을 의미했습니다.
# - 그렇다면 여기에서는 W와 b가 함수의 그래프에 어떤 영향을 주는지 직접 그래프를 그려서 알아보겠습니다.
# %matplotlib inline
import numpy as np # 넘파이 사용
import matplotlib.pyplot as plt # 맷플롯립사용
def sigmoid(x):
res = 1 / (1 + np.exp(-x))
return res
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y, "g")
plt.plot([0, 0], [1, 0], ":")
plt.title("sigmoid function")
plt.show()
################################################################################
# - 비용 함수(Cost function)
# - y 의 실제값이 1일 때 −logH(x) 그래프를 사용하고
# - y의 실제값이 0일 때 −log(1−H(X)) 그래프를 사용해야 합니다.
# - 이는 다음과 같이 하나의 식으로 통합할 수 있습니다.
#  = -\frac{1}{n} \sum_{i=1}^{n} [y^{(i)}logH(x^{(i)}) %2B (1-y^{(i)})log(1-H(x^{(i)}))])
torch.manual_seed(1)
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]]
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
W = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
hypothesis = 1 / (1 + torch.exp(-(x_train.matmul(W) + b)))
mu.log("hypothesis", hypothesis)
mu.log("y_train", y_train)
hypothesis = torch.sigmoid(x_train.matmul(W) + b)
mu.log("hypothesis", hypothesis)
mu.log("y_train", y_train)
losses = -(y_train * torch.log(hypothesis)) + (1 - y_train) * torch.log(1 - hypothesis)
cost = losses.mean()
mu.log("losses", losses)
mu.log("cost", cost)
loss = F.binary_cross_entropy(hypothesis, y_train)
mu.log("loss.item()", loss.item())
################################################################################
# 모델의 훈련 과정까지 추가한 전체 코드는 아래와 같습니다.
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]]
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
W = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
optimizer = optim.SGD([W, b], lr=1)
nb_epoches = 1000
mu.plt_init()
for epoch in range(nb_epoches + 1):
hypothesis = torch.sigmoid(x_train.matmul(W) + b)
cost = -(y_train * torch.log(hypothesis) + (1 - y_train) * torch.log(1 - hypothesis)).mean()
accuracy = mu.get_regression_accuracy(hypothesis, y_train)
optimizer.zero_grad()
cost.backward()
optimizer.step()
if epoch % 100 == 0:
mu.log_epoch(epoch, nb_epoches, cost, accuracy)
mu.plt_show()
mu.log("W", W)
mu.log("b", b)
prediction = hypothesis >= torch.FloatTensor([0.5])
mu.log("prediction", prediction)
mu.log("y_data", y_data)
| [
"torch.manual_seed",
"torch.optim.SGD",
"torch.log",
"myutil.plt_init",
"myutil.log_epoch",
"myutil.plt_show",
"matplotlib.pyplot.plot",
"torch.nn.functional.binary_cross_entropy",
"myutil.get_regression_accuracy",
"numpy.exp",
"torch.zeros",
"myutil.log",
"matplotlib.pyplot.title",
"torch... | [((1436, 1461), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (1445, 1461), True, 'import numpy as np\n'), ((1478, 1497), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""g"""'], {}), "(x, y, 'g')\n", (1486, 1497), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1527), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[1, 0]', '""":"""'], {}), "([0, 0], [1, 0], ':')\n", (1506, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1557), 'matplotlib.pyplot.title', 'plt.title', (['"""sigmoid function"""'], {}), "('sigmoid function')\n", (1537, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1566, 1568), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1975), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (1972, 1975), False, 'import torch\n'), ((2085, 2110), 'torch.FloatTensor', 'torch.FloatTensor', (['x_data'], {}), '(x_data)\n', (2102, 2110), False, 'import torch\n'), ((2121, 2146), 'torch.FloatTensor', 'torch.FloatTensor', (['y_data'], {}), '(y_data)\n', (2138, 2146), False, 'import torch\n'), ((2152, 2191), 'torch.zeros', 'torch.zeros', (['(2, 1)'], {'requires_grad': '(True)'}), '((2, 1), requires_grad=True)\n', (2163, 2191), False, 'import torch\n'), ((2196, 2230), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (2207, 2230), False, 'import torch\n'), ((2290, 2322), 'myutil.log', 'mu.log', (['"""hypothesis"""', 'hypothesis'], {}), "('hypothesis', hypothesis)\n", (2296, 2322), True, 'import myutil as mu\n'), ((2323, 2349), 'myutil.log', 'mu.log', (['"""y_train"""', 'y_train'], {}), "('y_train', y_train)\n", (2329, 2349), True, 'import myutil as mu\n'), ((2401, 2433), 'myutil.log', 'mu.log', (['"""hypothesis"""', 'hypothesis'], {}), "('hypothesis', hypothesis)\n", (2407, 2433), True, 'import myutil as mu\n'), ((2434, 2460), 'myutil.log', 'mu.log', (['"""y_train"""', 'y_train'], {}), "('y_train', y_train)\n", (2440, 2460), True, 'import myutil as mu\n'), ((2571, 2595), 'myutil.log', 'mu.log', (['"""losses"""', 'losses'], {}), "('losses', losses)\n", (2577, 2595), True, 'import myutil as mu\n'), ((2596, 2616), 'myutil.log', 'mu.log', (['"""cost"""', 'cost'], {}), "('cost', cost)\n", (2602, 2616), True, 'import myutil as mu\n'), ((2625, 2668), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['hypothesis', 'y_train'], {}), '(hypothesis, y_train)\n', (2647, 2668), True, 'import torch.nn.functional as F\n'), ((2930, 2955), 'torch.FloatTensor', 'torch.FloatTensor', (['x_data'], {}), '(x_data)\n', (2947, 2955), False, 'import torch\n'), ((2966, 2991), 'torch.FloatTensor', 'torch.FloatTensor', (['y_data'], {}), '(y_data)\n', (2983, 2991), False, 'import torch\n'), ((2997, 3036), 'torch.zeros', 'torch.zeros', (['(2, 1)'], {'requires_grad': '(True)'}), '((2, 1), requires_grad=True)\n', (3008, 3036), False, 'import torch\n'), ((3041, 3075), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (3052, 3075), False, 'import torch\n'), ((3088, 3111), 'torch.optim.SGD', 'optim.SGD', (['[W, b]'], {'lr': '(1)'}), '([W, b], lr=1)\n', (3097, 3111), True, 'import torch.optim as optim\n'), ((3130, 3143), 'myutil.plt_init', 'mu.plt_init', ([], {}), '()\n', (3141, 3143), True, 'import myutil as mu\n'), ((3545, 3558), 'myutil.plt_show', 'mu.plt_show', ([], {}), '()\n', (3556, 3558), True, 'import myutil as mu\n'), ((3559, 3573), 'myutil.log', 'mu.log', (['"""W"""', 'W'], {}), "('W', W)\n", (3565, 3573), True, 'import myutil as mu\n'), ((3574, 3588), 'myutil.log', 'mu.log', (['"""b"""', 'b'], {}), "('b', b)\n", (3580, 3588), True, 'import myutil as mu\n'), ((3642, 3674), 'myutil.log', 'mu.log', (['"""prediction"""', 'prediction'], {}), "('prediction', prediction)\n", (3648, 3674), True, 'import myutil as mu\n'), ((3675, 3699), 'myutil.log', 'mu.log', (['"""y_data"""', 'y_data'], {}), "('y_data', y_data)\n", (3681, 3699), True, 'import myutil as mu\n'), ((3347, 3394), 'myutil.get_regression_accuracy', 'mu.get_regression_accuracy', (['hypothesis', 'y_train'], {}), '(hypothesis, y_train)\n', (3373, 3394), True, 'import myutil as mu\n'), ((3617, 3641), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.5]'], {}), '([0.5])\n', (3634, 3641), False, 'import torch\n'), ((2524, 2549), 'torch.log', 'torch.log', (['(1 - hypothesis)'], {}), '(1 - hypothesis)\n', (2533, 2549), False, 'import torch\n'), ((3496, 3543), 'myutil.log_epoch', 'mu.log_epoch', (['epoch', 'nb_epoches', 'cost', 'accuracy'], {}), '(epoch, nb_epoches, cost, accuracy)\n', (3508, 3543), True, 'import myutil as mu\n'), ((1403, 1413), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1409, 1413), True, 'import numpy as np\n'), ((2483, 2504), 'torch.log', 'torch.log', (['hypothesis'], {}), '(hypothesis)\n', (2492, 2504), False, 'import torch\n'), ((3258, 3279), 'torch.log', 'torch.log', (['hypothesis'], {}), '(hypothesis)\n', (3267, 3279), False, 'import torch\n'), ((3298, 3323), 'torch.log', 'torch.log', (['(1 - hypothesis)'], {}), '(1 - hypothesis)\n', (3307, 3323), False, 'import torch\n')] |
from __future__ import print_function
import argparse
import os
import numpy as np
import torch
from torchvision import transforms
import dataset
from darknet import Darknet
from utils import get_all_boxes, nms, read_data_cfg, logging, map_iou
# etc parameters
use_cuda = True
seed = 22222
eps = 1e-5
FLAGS = None
def main():
# Validation parameters
conf_thresh = FLAGS.conf_threshold
nms_thresh = FLAGS.nms_threshold
iou_thresh = FLAGS.iou_threshold
# output file
out_path = FLAGS.out_path
# Training settings
datacfg = FLAGS.data
cfgfile = FLAGS.config
data_options = read_data_cfg(datacfg)
file_list = data_options['valid']
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
# for testing, batch_size is set to 1 (one)
batch_size = FLAGS.batch_size
global use_cuda
use_cuda = torch.cuda.is_available() and use_cuda
###############
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
global model
model = Darknet(cfgfile)
# model.print_network()
init_width = model.width
init_height = model.height
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
val_loader = torch.utils.data.DataLoader(
dataset.listDataset(file_list, shape=(init_width, init_height),
shuffle=False, jitter=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), validate=True),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model)
model = model.module
model = model.to(torch.device("cuda" if use_cuda else "cpu"))
for w in FLAGS.weights:
# model.load_weights(w)
checkpoint = torch.load(w)
model.load_state_dict(checkpoint['model_state_dict'])
logging('evaluating ... %s' % (w))
test(val_loader, conf_thresh, nms_thresh, iou_thresh, out_path, batch_size)
def test(val_loader, conf_thresh, nms_thresh, iou_thresh, out_path, batch_size):
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
return 50
model.eval()
num_classes = model.num_classes
device = torch.device("cuda" if use_cuda else "cpu")
if model.net_name() == 'region': # region_layer
shape = (0, 0)
else:
shape = (model.width, model.height)
map = []
for i, (imgpath, data, target, org_w, org_h) in enumerate(val_loader):
print('Computing boxes for batch', i, 'of size', batch_size, '. Number computed is:', i * batch_size)
data = data.to(device)
output = model(data)
all_boxes, det_confs = get_all_boxes(output, shape, conf_thresh, num_classes, use_cuda=use_cuda,
output_confidence=True)
for k in range(len(all_boxes)):
boxes = np.array(all_boxes[k])
boxes = boxes[boxes[:, 4] > conf_thresh]
boxes = nms(boxes, nms_thresh)
boxes = np.stack(boxes)
boxes_true = target.cpu().numpy().reshape(-1, 5)
assert len(boxes_true) % 50 == 0, 'max_boxes in image.py "fill_truth_detection" is different from 50 with ' \
'shape {}'.format(boxes_true.shape)
boxes_true = boxes_true[50*k:50*(k+1)]
boxes_true = boxes_true[boxes_true.max(1) > 0, 1:5]
out_boxes = np.array([imgpath[k], boxes_true, boxes], dtype=object)
np.save(out_path + str(i*len(imgpath) + k), out_boxes)
boxes_pred = boxes[:, :4].copy()
scores = boxes[:, 4].copy()
boxes_pred = boxes_pred[scores > 0.03]
scores = scores[scores > 0.03]
map.append(map_iou(boxes_true, boxes_pred, scores)) if len(scores) > 0 else None
map = np.array(map).mean()
print('Validation output saved at ' + out_path)
print('The mAP IoU is: {}'.format(map))
if __name__ == '__main__':
# python validate.py -c cfg/chexdet.cfg -w backup/15.pt -d cfg/chexdet.data --conf_threshold 0.001 -o data/out/ -b 1 --nms_threshold 0.01
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', type=str,
default='cfg/sketch.data', help='data definition file, will validate over "valid" file')
parser.add_argument('--config', '-c', type=str,
default='cfg/sketch.cfg', help='network configuration file')
parser.add_argument('--weights', '-w', type=str, nargs='+',
default=['backup/15.pt'], help='weights')
parser.add_argument('--conf_threshold', type=float, default=0.25, help='confidence threshold')
parser.add_argument('--nms_threshold', type=float, default=0.4, help='nms threshold')
parser.add_argument('--iou_threshold', type=float, default=0.5, help='IOU threshold for metrics')
parser.add_argument('--out_path', '-o', type=str,
help='path to write box predictions in the shape (num_batches, batch_size) where each of these'
' contains img paths, gt bb and predicted bb')
parser.add_argument('--batch_size', '-b', type=int, default=16)
FLAGS, _ = parser.parse_known_args()
main()
| [
"utils.map_iou",
"torch.manual_seed",
"darknet.Darknet",
"argparse.ArgumentParser",
"utils.get_all_boxes",
"torch.load",
"torch.nn.DataParallel",
"utils.logging",
"numpy.array",
"numpy.stack",
"torch.cuda.is_available",
"utils.read_data_cfg",
"utils.nms",
"torch.cuda.manual_seed",
"torch... | [((617, 639), 'utils.read_data_cfg', 'read_data_cfg', (['datacfg'], {}), '(datacfg)\n', (630, 639), False, 'from utils import get_all_boxes, nms, read_data_cfg, logging, map_iou\n'), ((993, 1016), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1010, 1016), False, 'import torch\n'), ((1151, 1167), 'darknet.Darknet', 'Darknet', (['cfgfile'], {}), '(cfgfile)\n', (1158, 1167), False, 'from darknet import Darknet\n'), ((2492, 2535), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2504, 2535), False, 'import torch\n'), ((4423, 4448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4446, 4448), False, 'import argparse\n'), ((929, 954), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (952, 954), False, 'import torch\n'), ((1092, 1120), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1114, 1120), False, 'import torch\n'), ((1877, 1920), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (1889, 1920), False, 'import torch\n'), ((2003, 2016), 'torch.load', 'torch.load', (['w'], {}), '(w)\n', (2013, 2016), False, 'import torch\n'), ((2087, 2119), 'utils.logging', 'logging', (["('evaluating ... %s' % w)"], {}), "('evaluating ... %s' % w)\n", (2094, 2119), False, 'from utils import get_all_boxes, nms, read_data_cfg, logging, map_iou\n'), ((2956, 3057), 'utils.get_all_boxes', 'get_all_boxes', (['output', 'shape', 'conf_thresh', 'num_classes'], {'use_cuda': 'use_cuda', 'output_confidence': '(True)'}), '(output, shape, conf_thresh, num_classes, use_cuda=use_cuda,\n output_confidence=True)\n', (2969, 3057), False, 'from utils import get_all_boxes, nms, read_data_cfg, logging, map_iou\n'), ((1794, 1822), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1815, 1822), False, 'import torch\n'), ((3160, 3182), 'numpy.array', 'np.array', (['all_boxes[k]'], {}), '(all_boxes[k])\n', (3168, 3182), True, 'import numpy as np\n'), ((3256, 3278), 'utils.nms', 'nms', (['boxes', 'nms_thresh'], {}), '(boxes, nms_thresh)\n', (3259, 3278), False, 'from utils import get_all_boxes, nms, read_data_cfg, logging, map_iou\n'), ((3299, 3314), 'numpy.stack', 'np.stack', (['boxes'], {}), '(boxes)\n', (3307, 3314), True, 'import numpy as np\n'), ((3715, 3770), 'numpy.array', 'np.array', (['[imgpath[k], boxes_true, boxes]'], {'dtype': 'object'}), '([imgpath[k], boxes_true, boxes], dtype=object)\n', (3723, 3770), True, 'import numpy as np\n'), ((4122, 4135), 'numpy.array', 'np.array', (['map'], {}), '(map)\n', (4130, 4135), True, 'import numpy as np\n'), ((4041, 4080), 'utils.map_iou', 'map_iou', (['boxes_true', 'boxes_pred', 'scores'], {}), '(boxes_true, boxes_pred, scores)\n', (4048, 4080), False, 'from utils import get_all_boxes, nms, read_data_cfg, logging, map_iou\n'), ((1607, 1628), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1626, 1628), False, 'from torchvision import transforms\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, French National Center for Scientific Research (CNRS)
# Distributed under the (new) BSD License. See LICENSE for more info.
import threading, time, logging
from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner
from teleprox.log import RPCLogHandler, set_process_name, set_thread_name, start_log_server
import numpy as np
from check_qt import requires_qt, qt_available
# Set up nice logging for tests:
# remote processes forward logs to this process
logger = logging.getLogger()
#logger.level = logging.DEBUG
start_log_server(logger)
# local log messages are time-sorted and colored
handler = RPCLogHandler()
logger.addHandler(handler)
# messages originating locally can be easily identified
set_process_name('main_process')
set_thread_name('main_thread')
if qt_available:
qapp = pg.mkQApp()
def test_rpc():
previous_level = logger.level
#logger.level = logging.DEBUG
class TestClass(object):
count = 0
def __init__(self, name):
self.name = name
TestClass.count += 1
def __del__(self):
TestClass.count -= 1
def add(self, x, y):
return x + y
def array(self):
return np.arange(20).astype('int64')
def sleep(self, t):
time.sleep(t)
def get_list(self):
return [0, 'x', 7]
def test(self, obj):
return self.name, obj.name, obj.add(5, 7), obj.array(), obj.get_list()
def types(self):
return {'int': 7, 'float': 0.5, 'str': 'xxx', 'bytes': bytes('xxx', 'utf8'),
'ndarray': np.arange(10), 'dict': {}, 'list': [],
'ObjectProxy': self}
def type(self, x):
return type(x).__name__
server1 = RPCServer()
server1['test_class'] = TestClass
server1['my_object'] = TestClass('obj1')
serve_thread = threading.Thread(target=server1.run_forever, daemon=True)
serve_thread.start()
client = RPCClient.get_client(server1.address)
# test clients are cached
assert client == RPCClient.get_client(server1.address)
try:
# can't manually create client for the same address
RPCClient(server1.address)
assert False, "Should have raised KeyError."
except KeyError:
pass
# get proxy to TestClass instance
obj = client['my_object']
assert isinstance(obj, ObjectProxy)
# check equality with duplicate proxy
obj2 = client['my_object']
assert obj == obj2
assert obj._obj_id == obj2._obj_id
assert obj._ref_id != obj2._ref_id
# check hashability
assert obj in {obj2: None}
assert obj in set([obj2])
logger.info("-- Test call with sync return --")
add = obj.add
assert isinstance(add, ObjectProxy)
assert add(7, 5) == 12
# test return types
for k, v in obj.types().items():
assert type(v).__name__ == k
if k != 'ObjectProxy':
assert obj.type(v) == k
# NOTE: msgpack converts list to tuple.
# See: https://github.com/msgpack/msgpack-python/issues/98
assert obj.get_list() == [0, 'x', 7]
logger.info("-- Test async return --")
fut = obj.sleep(0.1, _sync='async')
assert not fut.done()
assert fut.result() is None
logger.info("-- Test no return --")
assert obj.add(1, 2, _sync='off') is None
logger.info("-- Test return by proxy --")
list_prox = obj.get_list(_return_type='proxy')
assert isinstance(list_prox, ObjectProxy)
assert list_prox._type_str == "<class 'list'>"
assert len(list_prox) == 3
assert list_prox[2] == 7
logger.info("-- Test proxy access to server --")
srv = client['self']
assert srv.address == server1.address
logger.info("-- Test remote exception raising --")
try:
obj.add(7, 'x')
except RemoteCallException as err:
if err.type_str != 'TypeError':
raise
else:
raise AssertionError('should have raised TypeError')
try:
client.asdffhgk
raise AssertionError('should have raised AttributeError')
except AttributeError:
pass
logger.info("-- Test deferred getattr --")
arr = obj.array(_return_type='proxy')
dt1 = arr.dtype.name._get_value()
assert isinstance(dt1, str)
arr._set_proxy_options(defer_getattr=True)
dt2 = arr.dtype.name
assert isinstance(dt2, ObjectProxy)
assert dt2._obj_id == arr._obj_id
assert dt2._attributes == ('dtype', 'name')
dt3 = dt2._undefer()
assert dt3 == dt2
logger.info("-- Test remote object creation / deletion --")
class_proxy = client['test_class']
obj2 = class_proxy('obj2')
assert class_proxy.count == 2
assert obj2.add(3, 4) == 7
obj2._delete()
handler.flush_records() # log records might have refs to the object
assert class_proxy.count._get_value() == 1
try:
obj2.array()
assert False, "Should have raised RemoteCallException"
except RemoteCallException:
pass
logger.info("-- Test proxy auto-delete --")
obj2 = class_proxy('obj2')
obj2._set_proxy_options(auto_delete=True)
assert class_proxy.count == 2
del obj2
handler.flush_records() # log records might have refs to the object
assert class_proxy.count._get_value() == 1
logger.info("-- Test timeouts --")
try:
obj.sleep(0.2, _timeout=0.01)
except TimeoutError:
pass
else:
raise AssertionError('should have raised TimeoutError')
obj.sleep(0.2, _timeout=0.5)
logger.info("-- Test result order --")
a = obj.add(1, 2, _sync='async')
b = obj.add(3, 4, _sync='async')
assert b.result() == 7
assert a.result() == 3
logger.info("-- Test transfer --")
arr = np.ones(10, dtype='float32')
arr_prox = client.transfer(arr)
assert arr_prox.dtype.name == 'float32'
print(arr_prox, arr_prox.shape)
assert arr_prox.shape._get_value() == [10]
logger.info("-- Test import --")
import os.path as osp
rosp = client._import('os.path')
assert osp.abspath(osp.dirname(__file__)) == rosp.abspath(rosp.dirname(__file__))
logger.info("-- Test proxy sharing between servers --")
obj._set_proxy_options(defer_getattr=True)
r1 = obj.test(obj)
server2 = RPCServer()
server2['test_class'] = TestClass
serve_thread2 = threading.Thread(target=server2.run_forever, daemon=True)
serve_thread2.start()
client2 = RPCClient(server2.address)
client2.default_proxy_options['defer_getattr'] = True
obj3 = client2['test_class']('obj3')
# send proxy from server1 to server2
r2 = obj3.test(obj)
# check that we have a new client between the two servers
assert (serve_thread2.ident, server1.address) in RPCClient.clients_by_thread
# check all communication worked correctly
assert r1[0] == 'obj1'
assert r2[0] == 'obj3'
assert r1[1] == r2[1] == 'obj1'
assert r1[2] == r2[2] == 12
assert np.all(r1[3] == r2[3])
assert r1[4] == r2[4]
logger.info("-- Test publishing objects --")
arr = np.arange(5, 10)
client['arr'] = arr # publish to server1
s2rpc = client2._import('teleprox')
s2cli = s2rpc.RPCClient.get_client(client.address) # server2's client for server1
assert np.all(s2cli['arr'] == arr) # retrieve via server2
logger.info("-- Test JSON client --")
# Start a JSON client in a remote process
cli_proc = ProcessSpawner()
cli = cli_proc.client._import('teleprox').RPCClient(server2.address, serializer='json')
# Check everything is ok..
assert cli.serializer.type._get_value() == 'json'
assert cli['test_class']('json-tester').add(3, 4) == 7
cli_proc.kill()
logger.info("-- Setup reentrant communication test.. --")
class PingPong(object):
def set_other(self, o):
self.other = o
def pingpong(self, depth=0):
if depth > 6:
return "reentrant!"
return self.other.pingpong(depth+1)
server1['pp1'] = PingPong()
server2['pp2'] = PingPong()
pp1 = client['pp1']
pp2 = client2['pp2']
pp1.set_other(pp2)
pp2.set_other(pp1)
logger.info("-- Test reentrant communication --")
assert pp1.pingpong() == 'reentrant!'
logger.info("-- Shut down servers --")
client2.close_server()
serve_thread2.join()
client.close_server()
client.close()
serve_thread.join()
logger.level = previous_level
@requires_qt
def test_qt_rpc():
previous_level = logger.level
#logger.level = logging.DEBUG
server = QtRPCServer(quit_on_close=False)
server.run_forever()
# Start a thread that will remotely request a widget to be created in the
# GUI thread.
class TestThread(threading.Thread):
def __init__(self, addr):
threading.Thread.__init__(self, daemon=True)
self.addr = addr
self.done = False
self.lock = threading.Lock()
def run(self):
client = RPCClient(self.addr)
qt = client._import('pyqtgraph.Qt')
# widget creation happens in main GUI thread; we are working with
# proxies from here.
self.l = qt.QtGui.QLabel('remote-controlled label')
self.l.show()
time.sleep(0.3)
self.l.hide()
with self.lock:
self.done = True
thread = TestThread(server.address)
thread.start()
start = time.time()
while True:
with thread.lock:
if thread.done:
break
assert time.time() < start + 5.0, "Thread did not finish within 5 sec."
time.sleep(0.01)
qapp.processEvents()
assert 'QLabel' in thread.l._type_str
server.close()
logger.level = previous_level
def test_disconnect():
#~ logger.level = logging.DEBUG
# Clients receive notification when server disconnects gracefully
server_proc = ProcessSpawner()
client_proc = ProcessSpawner()
cli = client_proc.client._import('teleprox').RPCClient(server_proc.client.address)
cli.close_server()
assert cli.disconnected() is True
assert server_proc.client.disconnected() is True
try:
print(server_proc.client.ping())
assert False, "Expected RuntimeError"
except RuntimeError:
pass
# add by Sam: force the end of process
server_proc.kill()
# Clients receive closure messages even if the server exits without closing
server_proc = ProcessSpawner()
server_proc.client['self']._closed = 'sabotage!'
time.sleep(0.1)
assert server_proc.client.disconnected() is True
# add by Sam: force the end of process
server_proc.kill()
# Clients gracefully handle sudden death of server (with timeout)
server_proc = ProcessSpawner()
server_proc.kill()
try:
server_proc.client.ping(timeout=1)
assert False, "Expected TimeoutError"
except TimeoutError:
pass
# server doesn't hang up if clients are not available to receive disconnect
# message
server_proc = ProcessSpawner()
for i in range(4):
# create a bunch of dead clients
cp = ProcessSpawner()
cli = cp.client._import('teleprox').RPCClient(server_proc.client.address)
cp.kill()
start = time.time()
server_proc.client.close_server()
assert time.time() - start < 1.0
assert server_proc.client.disconnected() == True
# add by Sam: force the end of process
server_proc.kill()
if __name__ == '__main__':
#~ test_rpc()
test_qt_rpc()
#~ test_disconnect()
| [
"logging.getLogger",
"teleprox.RPCClient",
"time.sleep",
"teleprox.ProcessSpawner",
"teleprox.QtRPCServer",
"numpy.arange",
"threading.Thread.__init__",
"threading.Lock",
"teleprox.log.set_process_name",
"numpy.ones",
"teleprox.log.start_log_server",
"teleprox.log.RPCLogHandler",
"os.path.di... | [((558, 577), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (575, 577), False, 'import threading, time, logging\n'), ((608, 632), 'teleprox.log.start_log_server', 'start_log_server', (['logger'], {}), '(logger)\n', (624, 632), False, 'from teleprox.log import RPCLogHandler, set_process_name, set_thread_name, start_log_server\n'), ((692, 707), 'teleprox.log.RPCLogHandler', 'RPCLogHandler', ([], {}), '()\n', (705, 707), False, 'from teleprox.log import RPCLogHandler, set_process_name, set_thread_name, start_log_server\n'), ((791, 823), 'teleprox.log.set_process_name', 'set_process_name', (['"""main_process"""'], {}), "('main_process')\n", (807, 823), False, 'from teleprox.log import RPCLogHandler, set_process_name, set_thread_name, start_log_server\n'), ((824, 854), 'teleprox.log.set_thread_name', 'set_thread_name', (['"""main_thread"""'], {}), "('main_thread')\n", (839, 854), False, 'from teleprox.log import RPCLogHandler, set_process_name, set_thread_name, start_log_server\n'), ((1907, 1918), 'teleprox.RPCServer', 'RPCServer', ([], {}), '()\n', (1916, 1918), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((2021, 2078), 'threading.Thread', 'threading.Thread', ([], {'target': 'server1.run_forever', 'daemon': '(True)'}), '(target=server1.run_forever, daemon=True)\n', (2037, 2078), False, 'import threading, time, logging\n'), ((2122, 2159), 'teleprox.RPCClient.get_client', 'RPCClient.get_client', (['server1.address'], {}), '(server1.address)\n', (2142, 2159), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((5933, 5961), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': '"""float32"""'}), "(10, dtype='float32')\n", (5940, 5961), True, 'import numpy as np\n'), ((6459, 6470), 'teleprox.RPCServer', 'RPCServer', ([], {}), '()\n', (6468, 6470), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((6529, 6586), 'threading.Thread', 'threading.Thread', ([], {'target': 'server2.run_forever', 'daemon': '(True)'}), '(target=server2.run_forever, daemon=True)\n', (6545, 6586), False, 'import threading, time, logging\n'), ((6632, 6658), 'teleprox.RPCClient', 'RPCClient', (['server2.address'], {}), '(server2.address)\n', (6641, 6658), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((7147, 7169), 'numpy.all', 'np.all', (['(r1[3] == r2[3])'], {}), '(r1[3] == r2[3])\n', (7153, 7169), True, 'import numpy as np\n'), ((7260, 7276), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (7269, 7276), True, 'import numpy as np\n'), ((7461, 7488), 'numpy.all', 'np.all', (["(s2cli['arr'] == arr)"], {}), "(s2cli['arr'] == arr)\n", (7467, 7488), True, 'import numpy as np\n'), ((7617, 7633), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (7631, 7633), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((8792, 8824), 'teleprox.QtRPCServer', 'QtRPCServer', ([], {'quit_on_close': '(False)'}), '(quit_on_close=False)\n', (8803, 8824), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((9702, 9713), 'time.time', 'time.time', ([], {}), '()\n', (9711, 9713), False, 'import threading, time, logging\n'), ((10190, 10206), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (10204, 10206), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((10230, 10246), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (10244, 10246), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((10771, 10787), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (10785, 10787), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((10845, 10860), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (10855, 10860), False, 'import threading, time, logging\n'), ((11078, 11094), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (11092, 11094), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((11373, 11389), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (11387, 11389), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((11601, 11612), 'time.time', 'time.time', ([], {}), '()\n', (11610, 11612), False, 'import threading, time, logging\n'), ((2216, 2253), 'teleprox.RPCClient.get_client', 'RPCClient.get_client', (['server1.address'], {}), '(server1.address)\n', (2236, 2253), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((2331, 2357), 'teleprox.RPCClient', 'RPCClient', (['server1.address'], {}), '(server1.address)\n', (2340, 2357), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((9894, 9910), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (9904, 9910), False, 'import threading, time, logging\n'), ((11467, 11483), 'teleprox.ProcessSpawner', 'ProcessSpawner', ([], {}), '()\n', (11481, 11483), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((1382, 1395), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (1392, 1395), False, 'import threading, time, logging\n'), ((6250, 6271), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (6261, 6271), True, 'import os.path as osp\n'), ((9038, 9082), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'daemon': '(True)'}), '(self, daemon=True)\n', (9063, 9082), False, 'import threading, time, logging\n'), ((9166, 9182), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (9180, 9182), False, 'import threading, time, logging\n'), ((9236, 9256), 'teleprox.RPCClient', 'RPCClient', (['self.addr'], {}), '(self.addr)\n', (9245, 9256), False, 'from teleprox import RPCClient, RemoteCallException, RPCServer, QtRPCServer, ObjectProxy, ProcessSpawner\n'), ((9518, 9533), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (9528, 9533), False, 'import threading, time, logging\n'), ((9821, 9832), 'time.time', 'time.time', ([], {}), '()\n', (9830, 9832), False, 'import threading, time, logging\n'), ((11662, 11673), 'time.time', 'time.time', ([], {}), '()\n', (11671, 11673), False, 'import threading, time, logging\n'), ((1735, 1748), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1744, 1748), True, 'import numpy as np\n'), ((1308, 1321), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1317, 1321), True, 'import numpy as np\n')] |
import os,sys
#Please specify the mask_rcnn directory [todo: using json for specify the folder]
#github:https://github.com/matterport/Mask_RCNN
#MASKRCNN_DIR="/home/kiru/common_ws/Mask_RCNN_Mod/"
#sys.path.append(MASKRCNN_DIR)
#sys.path.append(".")
#sys.path.append("./bop_toolkit")
import numpy as np
from mrcnn.config import Config
from mrcnn import utils
from mrcnn.model import log
import skimage
class BopDetectConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
GPU_COUNT = 1
IMAGES_PER_GPU = 1
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) # anchor side in pixels
TRAIN_ROIS_PER_IMAGE = 32
STEPS_PER_EPOCH = 200000/IMAGES_PER_GPU
VALIDATION_STEPS = 5
DETECTION_MIN_CONFIDENCE= 0.5
def __init__(self, dataset,num_classes,im_width,im_height):
self.NAME = dataset
self.NUM_CLASSES = num_classes
self.IMAGE_MAX_DIM =min(max(im_width,im_height),1024)
self.IMAGE_MIN_DIM =max(min(im_width,im_height),480)
if(self.IMAGE_MAX_DIM%64>0):
frac = int(self.IMAGE_MAX_DIM/64)+1
self.IMAGE_MAX_DIM = frac*64 #set image to the nearest size that
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM,self.IMAGE_MAX_DIM ])
super().__init__()
class BopInferenceConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
GPU_COUNT = 1
IMAGES_PER_GPU = 1
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) # anchor side in pixels
TRAIN_ROIS_PER_IMAGE = 1
VALIDATION_STEPS = 5
DETECTION_MIN_CONFIDENCE=0.00001
DETECTION_MAX_INSTANCES=100
DETECTION_NMS_THRESHOLD=0.7
def __init__(self, dataset,num_classes,im_width,im_height):
self.NAME = dataset
self.NUM_CLASSES = num_classes
self.IMAGE_MAX_DIM =min(max(im_width,im_height),1024)
self.IMAGE_MIN_DIM =max(min(im_width,im_height),480)
if(self.IMAGE_MAX_DIM%64>0):
frac = int(self.IMAGE_MAX_DIM/64)+1
self.IMAGE_MAX_DIM = frac*64 #set image to the nearest size that
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM,self.IMAGE_MAX_DIM ])
super().__init__()
class BopDataset(utils.Dataset):
def set_dataset(self, dataset,model_ids,train_dir):
self.dataset = dataset
self.model_ids = model_ids
self.train_dir = train_dir
self.n_class=self.model_ids.shape[0]
for i in range(self.model_ids.shape[0]):
self.add_class(self.dataset, i+1, "{:02d}".format(self.model_ids[i]))
def load_dataset(self):
self.class_map = self.model_ids
self.gts=[]
self.mask_fns=[]
files = sorted(os.listdir(self.train_dir))
n_img=0
for i,file in enumerate(files):
if file.endswith(".png"):
img_path = os.path.join(self.train_dir,file)
mask_fn = file.replace(".png",".npy")
mask_path = os.path.join(self.train_dir+"/mask/",mask_fn)
self.add_image(self.dataset,image_id=n_img,path=img_path)
self.mask_fns.append(mask_path)
n_img+=1
self.n_real = len(self.mask_fns)
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
##Real-time augmentation gogo
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
return image
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == self.dataset:
return info[self.dataset]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_fn = self.mask_fns[image_id]
mask = np.load(mask_fn)
n_inst=0
mask_gt = np.zeros((mask.shape[0],mask.shape[1],np.max(mask)+1),np.bool)
class_ids = np.zeros((np.max(mask)+1),np.int32)
'''
for i in np.arange(1,self.n_class+1):
mask_temp = (mask==i)
if(np.sum(mask_temp)>0):
mask_gt[mask_temp,n_inst]=1
class_ids[n_inst]=i
n_inst+=1
'''
mask = mask-1 #-1: background, 0~N instance%n_class= class_id (from 0~ to n-1),
for i in np.arange(0,np.max(mask)+1):
mask_temp = (mask==i)
if(np.sum(mask_temp)>0):
mask_gt[mask_temp,n_inst]=1
class_ids[n_inst]=(i%self.n_class)+1
n_inst+=1
mask_gt = mask_gt[:,:,:n_inst]
class_ids = class_ids[:n_inst]
return mask_gt.astype(np.bool), class_ids.astype(np.int32)
| [
"os.listdir",
"os.path.join",
"numpy.max",
"numpy.array",
"skimage.io.imread",
"numpy.sum",
"numpy.load"
] | [((1306, 1356), 'numpy.array', 'np.array', (['[self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM]'], {}), '([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM])\n', (1314, 1356), True, 'import numpy as np\n'), ((2320, 2370), 'numpy.array', 'np.array', (['[self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM]'], {}), '([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM])\n', (2328, 2370), True, 'import numpy as np\n'), ((3624, 3676), 'skimage.io.imread', 'skimage.io.imread', (["self.image_info[image_id]['path']"], {}), "(self.image_info[image_id]['path'])\n", (3641, 3676), False, 'import skimage\n'), ((4115, 4131), 'numpy.load', 'np.load', (['mask_fn'], {}), '(mask_fn)\n', (4122, 4131), True, 'import numpy as np\n'), ((2914, 2940), 'os.listdir', 'os.listdir', (['self.train_dir'], {}), '(self.train_dir)\n', (2924, 2940), False, 'import os, sys\n'), ((3063, 3097), 'os.path.join', 'os.path.join', (['self.train_dir', 'file'], {}), '(self.train_dir, file)\n', (3075, 3097), False, 'import os, sys\n'), ((3179, 3227), 'os.path.join', 'os.path.join', (["(self.train_dir + '/mask/')", 'mask_fn'], {}), "(self.train_dir + '/mask/', mask_fn)\n", (3191, 3227), False, 'import os, sys\n'), ((4260, 4272), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (4266, 4272), True, 'import numpy as np\n'), ((4652, 4664), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (4658, 4664), True, 'import numpy as np\n'), ((4719, 4736), 'numpy.sum', 'np.sum', (['mask_temp'], {}), '(mask_temp)\n', (4725, 4736), True, 'import numpy as np\n'), ((4205, 4217), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (4211, 4217), True, 'import numpy as np\n')] |
#
# cmap.py -- color maps for fits viewing
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import absolute_import, print_function
from .util import six
import warnings
import numpy as np
__all__ = ['ColorMap', 'add_cmap', 'get_cmap', 'has_cmap', 'get_names',
'matplotlib_to_ginga_cmap', 'add_matplotlib_cmap',
'add_matplotlib_cmaps']
# Some built in colormaps
cmap_soss = (
(0.000000, 0.000000, 0.000000),
(0.003922, 0.003007, 0.000000),
(0.007843, 0.006013, 0.000000),
(0.011765, 0.009020, 0.000000),
(0.015686, 0.012026, 0.000000),
(0.019608, 0.015033, 0.000000),
(0.023529, 0.018039, 0.000000),
(0.027451, 0.021046, 0.000000),
(0.031373, 0.024052, 0.000000),
(0.035294, 0.027059, 0.000000),
(0.039216, 0.030065, 0.000000),
(0.043137, 0.033072, 0.000000),
(0.047059, 0.036078, 0.000000),
(0.050980, 0.039085, 0.000000),
(0.054902, 0.042092, 0.000000),
(0.058824, 0.045098, 0.000000),
(0.062745, 0.048105, 0.000000),
(0.066667, 0.051111, 0.000000),
(0.070588, 0.054118, 0.000000),
(0.074510, 0.057124, 0.000000),
(0.078431, 0.060131, 0.000000),
(0.082353, 0.063137, 0.000000),
(0.086275, 0.066144, 0.000000),
(0.090196, 0.069150, 0.000000),
(0.094118, 0.072157, 0.000000),
(0.098039, 0.075163, 0.000000),
(0.101961, 0.078170, 0.000000),
(0.105882, 0.081176, 0.000000),
(0.109804, 0.084183, 0.000000),
(0.113725, 0.087190, 0.000000),
(0.117647, 0.090196, 0.000000),
(0.121569, 0.093203, 0.000000),
(0.125490, 0.096209, 0.000000),
(0.129412, 0.099216, 0.000000),
(0.133333, 0.102222, 0.000000),
(0.137255, 0.105229, 0.000000),
(0.141176, 0.108235, 0.000000),
(0.145098, 0.111242, 0.000000),
(0.149020, 0.114248, 0.000000),
(0.152941, 0.117255, 0.000000),
(0.156863, 0.120261, 0.000000),
(0.160784, 0.123268, 0.000000),
(0.164706, 0.126275, 0.000000),
(0.168627, 0.129281, 0.000000),
(0.172549, 0.132288, 0.000000),
(0.176471, 0.135294, 0.000000),
(0.180392, 0.138301, 0.000000),
(0.184314, 0.141307, 0.000000),
(0.188235, 0.144314, 0.000000),
(0.192157, 0.147320, 0.000000),
(0.196078, 0.150327, 0.000000),
(0.200000, 0.153333, 0.000000),
(0.203922, 0.156340, 0.000000),
(0.207843, 0.159346, 0.000000),
(0.211765, 0.162353, 0.000000),
(0.215686, 0.165359, 0.000000),
(0.219608, 0.168366, 0.000000),
(0.223529, 0.171373, 0.000000),
(0.227451, 0.174379, 0.000000),
(0.231373, 0.177386, 0.000000),
(0.235294, 0.180392, 0.000000),
(0.239216, 0.183399, 0.000000),
(0.243137, 0.186405, 0.000000),
(0.247059, 0.189412, 0.000000),
(0.250980, 0.192418, 0.000000),
(0.254902, 0.195425, 0.000000),
(0.258824, 0.198431, 0.000000),
(0.262745, 0.201438, 0.000000),
(0.266667, 0.204444, 0.000000),
(0.270588, 0.207451, 0.000000),
(0.274510, 0.210458, 0.000000),
(0.278431, 0.213464, 0.000000),
(0.282353, 0.216471, 0.000000),
(0.286275, 0.219477, 0.000000),
(0.290196, 0.222484, 0.000000),
(0.294118, 0.225490, 0.000000),
(0.298039, 0.228497, 0.000000),
(0.301961, 0.231503, 0.000000),
(0.305882, 0.234510, 0.000000),
(0.309804, 0.237516, 0.000000),
(0.313725, 0.240523, 0.000000),
(0.317647, 0.243529, 0.000000),
(0.321569, 0.246536, 0.000000),
(0.325490, 0.249542, 0.000000),
(0.329412, 0.252549, 0.000000),
(0.333333, 0.255556, 0.000000),
(0.337255, 0.258562, 0.000000),
(0.341176, 0.261569, 0.000000),
(0.345098, 0.264575, 0.000000),
(0.349020, 0.267582, 0.000000),
(0.352941, 0.270588, 0.000000),
(0.356863, 0.273595, 0.000000),
(0.360784, 0.276601, 0.000000),
(0.364706, 0.279608, 0.000000),
(0.368627, 0.282614, 0.000000),
(0.372549, 0.285621, 0.000000),
(0.376471, 0.288627, 0.000000),
(0.380392, 0.291634, 0.000000),
(0.384314, 0.294641, 0.000000),
(0.388235, 0.297647, 0.000000),
(0.392157, 0.300654, 0.000000),
(0.396078, 0.303660, 0.000000),
(0.400000, 0.306667, 0.000000),
(0.403922, 0.309673, 0.000000),
(0.407843, 0.312680, 0.000000),
(0.411765, 0.315686, 0.000000),
(0.415686, 0.318693, 0.000000),
(0.419608, 0.321699, 0.000000),
(0.423529, 0.324706, 0.000000),
(0.427451, 0.327712, 0.000000),
(0.431373, 0.330719, 0.000000),
(0.435294, 0.333725, 0.000000),
(0.439216, 0.336732, 0.000000),
(0.443137, 0.339739, 0.000000),
(0.447059, 0.342745, 0.000000),
(0.450980, 0.345752, 0.000000),
(0.454902, 0.348758, 0.000000),
(0.458824, 0.351765, 0.000000),
(0.462745, 0.354771, 0.000000),
(0.466667, 0.357778, 0.000000),
(0.470588, 0.360784, 0.000000),
(0.474510, 0.363791, 0.000000),
(0.478431, 0.366797, 0.000000),
(0.482353, 0.369804, 0.000000),
(0.486275, 0.372810, 0.000000),
(0.490196, 0.375817, 0.000000),
(0.494118, 0.378824, 0.000000),
(0.498039, 0.381830, 0.000000),
(0.501961, 0.384837, 0.000000),
(0.505882, 0.387843, 0.000000),
(0.509804, 0.390850, 0.000000),
(0.513725, 0.393856, 0.000000),
(0.517647, 0.396863, 0.000000),
(0.521569, 0.399869, 0.000000),
(0.525490, 0.402876, 0.000000),
(0.529412, 0.405882, 0.000000),
(0.533333, 0.408889, 0.000000),
(0.537255, 0.411895, 0.000000),
(0.541176, 0.414902, 0.000000),
(0.545098, 0.417908, 0.000000),
(0.549020, 0.420915, 0.000000),
(0.552941, 0.423922, 0.000000),
(0.556863, 0.426928, 0.000000),
(0.560784, 0.429935, 0.000000),
(0.564706, 0.432941, 0.000000),
(0.568627, 0.435948, 0.000000),
(0.572549, 0.438954, 0.000000),
(0.576471, 0.441961, 0.000000),
(0.580392, 0.444967, 0.000000),
(0.584314, 0.447974, 0.000000),
(0.588235, 0.450980, 0.000000),
(0.592157, 0.453987, 0.000000),
(0.596078, 0.456993, 0.000000),
(0.600000, 0.460000, 0.000000),
(0.603922, 0.463007, 0.000000),
(0.607843, 0.466013, 0.000000),
(0.611765, 0.469020, 0.000000),
(0.615686, 0.472026, 0.000000),
(0.619608, 0.475033, 0.000000),
(0.623529, 0.478039, 0.000000),
(0.627451, 0.481046, 0.000000),
(0.631373, 0.484052, 0.000000),
(0.635294, 0.487059, 0.000000),
(0.639216, 0.490065, 0.000000),
(0.643137, 0.493072, 0.000000),
(0.647059, 0.496078, 0.000000),
(0.650980, 0.499085, 0.000000),
(0.654902, 0.502092, 0.000000),
(0.658824, 0.505098, 0.000000),
(0.662745, 0.508105, 0.000000),
(0.666667, 0.511111, 0.000000),
(0.670588, 0.514118, 0.000000),
(0.674510, 0.517124, 0.000000),
(0.678431, 0.520131, 0.000000),
(0.682353, 0.523137, 0.000000),
(0.686275, 0.526144, 0.000000),
(0.690196, 0.529150, 0.000000),
(0.694118, 0.532157, 0.000000),
(0.698039, 0.535163, 0.000000),
(0.701961, 0.538170, 0.000000),
(0.705882, 0.541176, 0.000000),
(0.709804, 0.544183, 0.000000),
(0.713725, 0.547190, 0.000000),
(0.717647, 0.550196, 0.000000),
(0.721569, 0.553203, 0.000000),
(0.725490, 0.556209, 0.000000),
(0.729412, 0.559216, 0.000000),
(0.733333, 0.562222, 0.000000),
(0.737255, 0.565229, 0.000000),
(0.741176, 0.568235, 0.000000),
(0.745098, 0.571242, 0.000000),
(0.749020, 0.574248, 0.000000),
(0.752941, 0.577255, 0.000000),
(0.756863, 0.580261, 0.000000),
(0.760784, 0.583268, 0.000000),
(0.764706, 0.586275, 0.000000),
(0.768627, 0.589281, 0.000000),
(0.772549, 0.592288, 0.000000),
(0.776471, 0.595294, 0.000000),
(0.780392, 0.598301, 0.000000),
(0.784314, 0.601307, 0.000000),
(0.788235, 0.604314, 0.000000),
(0.792157, 0.607320, 0.000000),
(0.796078, 0.610327, 0.000000),
(0.800000, 0.613333, 0.000000),
(0.803922, 0.616340, 0.000000),
(0.807843, 0.619346, 0.000000),
(0.811765, 0.622353, 0.000000),
(0.815686, 0.625359, 0.000000),
(0.819608, 0.628366, 0.000000),
(0.823529, 0.631373, 0.000000),
(0.827451, 0.634379, 0.000000),
(0.831373, 0.637386, 0.000000),
(0.835294, 0.640392, 0.000000),
(0.839216, 0.643399, 0.000000),
(0.843137, 0.646405, 0.000000),
(0.847059, 0.649412, 0.000000),
(0.850980, 0.652418, 0.000000),
(0.854902, 0.655425, 0.000000),
(0.858824, 0.658431, 0.000000),
(0.862745, 0.661438, 0.000000),
(0.866667, 0.664444, 0.000000),
(0.870588, 0.677451, 0.000000),
(0.874510, 0.680458, 0.000000),
(0.878431, 0.693464, 0.000000),
(0.882353, 0.706471, 0.000000),
(0.886275, 0.719477, 0.000000),
(0.890196, 0.722484, 0.000000),
(0.894118, 0.735490, 0.000000),
(0.898039, 0.748497, 0.000000),
(0.901961, 0.751503, 0.000000),
(0.905882, 0.764510, 0.000000),
(0.909804, 0.777516, 0.000000),
(0.913725, 0.780523, 0.000000),
(0.917647, 0.793529, 0.000000),
(0.921569, 0.806536, 0.000000),
(0.925490, 0.819542, 0.000000),
(0.929412, 0.822549, 0.000000),
(0.933333, 0.835556, 0.000000),
(0.937255, 0.848562, 0.000000),
(0.941176, 0.851569, 0.000000),
(0.945098, 0.864575, 0.000000),
(0.949020, 0.877582, 0.000000),
(0.952941, 0.880588, 0.000000),
(0.956863, 0.893595, 0.000000),
(0.960784, 0.906601, 0.000000),
(0.964706, 0.919608, 0.000000),
(0.968627, 0.922614, 0.000000),
(0.972549, 0.935621, 0.000000),
(0.976471, 0.948627, 0.000000),
(0.980392, 0.951634, 0.000000),
(0.984314, 0.964641, 0.000000),
(0.988235, 0.977647, 0.000000),
(0.992157, 0.980654, 0.000000),
(0.996078, 0.993660, 0.000000),
(1.00000, 1.00000, 1.00000),
)
cmap_idl2 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.14118, 0.00000),
(0.00000, 0.28235, 0.00000),
(0.00000, 0.29412, 0.00000),
(0.00000, 0.30980, 0.00000),
(0.00000, 0.32157, 0.00000),
(0.00000, 0.33725, 0.00000),
(0.00000, 0.35294, 0.00000),
(0.00000, 0.36471, 0.00000),
(0.00000, 0.38039, 0.00000),
(0.00000, 0.39216, 0.00000),
(0.00000, 0.40784, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.45882, 0.00000),
(0.00000, 0.49412, 0.00000),
(0.00000, 0.52941, 0.00000),
(0.00000, 0.56471, 0.00000),
(0.00000, 0.60000, 0.00000),
(0.00000, 0.63529, 0.00000),
(0.00000, 0.67059, 0.00000),
(0.00000, 0.70588, 0.00000),
(0.00000, 0.74118, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.81176, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.02353, 0.97647, 0.00000),
(0.04706, 0.96471, 0.00000),
(0.07059, 0.95294, 0.00000),
(0.09412, 0.94118, 0.00000),
(0.11765, 0.91765, 0.00000),
(0.14118, 0.89412, 0.00000),
(0.16471, 0.87059, 0.00000),
(0.18824, 0.84706, 0.00000),
(0.21176, 0.82353, 0.00000),
(0.23529, 0.80000, 0.00000),
(0.25882, 0.77647, 0.00000),
(0.28235, 0.75294, 0.00000),
(0.30588, 0.72941, 0.00000),
(0.32941, 0.70588, 0.00000),
(0.35294, 0.68235, 0.00000),
(0.37647, 0.65882, 0.00000),
(0.40000, 0.63529, 0.00000),
(0.42353, 0.61176, 0.00000),
(0.44706, 0.58824, 0.00000),
(0.47059, 0.56471, 0.00000),
(0.49412, 0.54118, 0.00000),
(0.51765, 0.51765, 0.00000),
(0.54118, 0.49412, 0.00000),
(0.56471, 0.47059, 0.00000),
(0.58824, 0.44706, 0.00000),
(0.61176, 0.42353, 0.00000),
(0.63529, 0.40000, 0.00000),
(0.65882, 0.37647, 0.00000),
(0.68235, 0.35294, 0.00000),
(0.70588, 0.32941, 0.00000),
(0.72941, 0.30588, 0.00000),
(0.75294, 0.28235, 0.00000),
(0.77647, 0.25882, 0.00000),
(0.80000, 0.23529, 0.00000),
(0.82353, 0.21176, 0.00000),
(0.84706, 0.18824, 0.00000),
(0.87059, 0.16471, 0.00000),
(0.89412, 0.14118, 0.00000),
(0.91765, 0.11765, 0.00000),
(0.94118, 0.09412, 0.00000),
(0.95294, 0.07059, 0.00000),
(0.96471, 0.04706, 0.00000),
(0.97647, 0.02353, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00392),
(0.98431, 0.00000, 0.01176),
(0.98039, 0.00000, 0.01961),
(0.97647, 0.00000, 0.02745),
(0.97255, 0.00000, 0.03529),
(0.97255, 0.00000, 0.03922),
(0.97255, 0.00000, 0.04706),
(0.97255, 0.00000, 0.05490),
(0.97255, 0.00000, 0.06275),
(0.96863, 0.00000, 0.07059),
(0.96471, 0.00000, 0.07843),
(0.96078, 0.00000, 0.08627),
(0.95686, 0.00000, 0.09804),
(0.95294, 0.00000, 0.10588),
(0.94902, 0.00000, 0.11373),
(0.94510, 0.00000, 0.12157),
(0.94118, 0.00000, 0.13333),
(0.94118, 0.00000, 0.13725),
(0.93725, 0.00000, 0.14510),
(0.93333, 0.00000, 0.15294),
(0.92941, 0.00000, 0.16078),
(0.92549, 0.00000, 0.16863),
(0.92549, 0.00000, 0.17647),
(0.92549, 0.00000, 0.18431),
(0.92549, 0.00000, 0.19608),
(0.92157, 0.00000, 0.20392),
(0.91765, 0.00000, 0.21176),
(0.91373, 0.00000, 0.21961),
(0.90980, 0.00000, 0.23137),
(0.90588, 0.00000, 0.23922),
(0.90196, 0.00000, 0.24706),
(0.89804, 0.00000, 0.25490),
(0.89412, 0.00000, 0.26275),
(0.89412, 0.00000, 0.26667),
(0.89412, 0.00000, 0.27451),
(0.89412, 0.00000, 0.28235),
(0.89412, 0.00000, 0.29020),
(0.89020, 0.00000, 0.29804),
(0.88627, 0.00000, 0.30588),
(0.88235, 0.00000, 0.31373),
(0.87843, 0.00000, 0.32549),
(0.87451, 0.00000, 0.33333),
(0.87059, 0.00000, 0.34118),
(0.86667, 0.00000, 0.34902),
(0.86275, 0.00392, 0.36078),
(0.85882, 0.00392, 0.36863),
(0.85490, 0.00392, 0.37647),
(0.85098, 0.00392, 0.38431),
(0.84706, 0.00000, 0.39608),
(0.84706, 0.00000, 0.40000),
(0.84706, 0.00000, 0.40784),
(0.84706, 0.00000, 0.41569),
(0.84706, 0.00000, 0.42353),
(0.84314, 0.00000, 0.43137),
(0.83922, 0.00000, 0.43922),
(0.83529, 0.00000, 0.44706),
(0.83137, 0.00000, 0.45490),
(0.82745, 0.00000, 0.46275),
(0.82353, 0.00000, 0.47059),
(0.81961, 0.00000, 0.47843),
(0.81569, 0.00000, 0.49020),
(0.81176, 0.00000, 0.49804),
(0.80784, 0.00000, 0.50588),
(0.80392, 0.00000, 0.51373),
(0.80000, 0.00000, 0.52549),
(0.80000, 0.00000, 0.52941),
(0.80000, 0.00000, 0.53725),
(0.80000, 0.00000, 0.54510),
(0.80000, 0.00000, 0.55294),
(0.79608, 0.00000, 0.56078),
(0.79216, 0.00000, 0.56863),
(0.78824, 0.00000, 0.57647),
(0.78431, 0.00000, 0.58824),
(0.78039, 0.00000, 0.59608),
(0.77647, 0.00000, 0.60392),
(0.77255, 0.00000, 0.61176),
(0.76863, 0.00000, 0.62353),
(0.76863, 0.00000, 0.62745),
(0.76863, 0.00000, 0.63529),
(0.76863, 0.00000, 0.63922),
(0.76863, 0.00000, 0.64706),
(0.76471, 0.00000, 0.65490),
(0.76078, 0.00000, 0.66275),
(0.75686, 0.00000, 0.67059),
(0.75294, 0.00000, 0.68235),
(0.74902, 0.00000, 0.69020),
(0.74510, 0.00000, 0.69804),
(0.74118, 0.00000, 0.70588),
(0.73725, 0.00000, 0.71765),
(0.73333, 0.00000, 0.72549),
(0.72941, 0.00000, 0.73333),
(0.72549, 0.00000, 0.74118),
(0.72157, 0.00000, 0.75294),
(0.72157, 0.00000, 0.75686),
(0.72157, 0.00000, 0.76471),
(0.72157, 0.00000, 0.77255),
(0.72157, 0.00000, 0.78039),
(0.71765, 0.00000, 0.78824),
(0.71373, 0.00000, 0.79608),
(0.70980, 0.00000, 0.80392),
(0.70588, 0.00000, 0.81569),
(0.70196, 0.00000, 0.82353),
(0.69804, 0.00000, 0.83137),
(0.69412, 0.00000, 0.83922),
(0.69020, 0.00000, 0.84706),
(0.69020, 0.00000, 0.85098),
(0.69020, 0.00000, 0.85882),
(0.69020, 0.00000, 0.86667),
(0.69020, 0.00000, 0.87451),
(0.68627, 0.00000, 0.88235),
(0.68235, 0.00000, 0.89020),
(0.67843, 0.00000, 0.89804),
(0.67451, 0.00000, 0.90980),
(0.67059, 0.00000, 0.91765),
(0.66667, 0.00000, 0.92549),
(0.66275, 0.00000, 0.93333),
(0.65882, 0.00000, 0.94510),
(0.65490, 0.00000, 0.95294),
(0.65098, 0.00000, 0.96078),
(0.64706, 0.00000, 0.96863),
(0.64314, 0.00000, 0.98039),
(0.64314, 0.00000, 0.98431),
(0.64314, 0.00000, 0.98824),
(0.64314, 0.00000, 0.99216),
(0.64314, 0.00000, 1.00000),
(0.63922, 0.00000, 1.00000),
(0.63529, 0.00000, 1.00000),
(0.63137, 0.00000, 1.00000),
(0.62745, 0.00000, 1.00000),
(0.62353, 0.00000, 1.00000),
(0.61961, 0.00000, 1.00000),
(0.61569, 0.00000, 1.00000),
(0.61176, 0.00000, 1.00000),
(0.60784, 0.00000, 1.00000),
(0.60392, 0.00000, 1.00000),
(0.60000, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59216, 0.00000, 1.00000),
(0.58824, 0.00000, 1.00000),
(0.58431, 0.00000, 1.00000),
(0.58039, 0.00000, 1.00000),
(0.59216, 0.03137, 1.00000),
(0.60392, 0.06275, 1.00000),
(0.61569, 0.09412, 1.00000),
(0.62745, 0.12549, 1.00000),
(0.63922, 0.15686, 1.00000),
(0.65098, 0.18824, 1.00000),
(0.66275, 0.21961, 1.00000),
(0.67451, 0.25098, 1.00000),
(0.69020, 0.28235, 1.00000),
(0.70588, 0.31373, 1.00000),
(0.72157, 0.34510, 1.00000),
(0.73725, 0.37647, 1.00000),
(0.74902, 0.40784, 1.00000),
(0.76078, 0.43922, 1.00000),
(0.77255, 0.47059, 1.00000),
(0.78431, 0.50196, 1.00000),
(0.79608, 0.52941, 1.00000),
(0.80784, 0.55686, 1.00000),
(0.81961, 0.58431, 1.00000),
(0.83137, 0.61176, 1.00000),
(0.84314, 0.64314, 1.00000),
(0.85490, 0.67451, 1.00000),
(0.86667, 0.70588, 1.00000),
(0.87843, 0.73725, 1.00000),
(0.89412, 0.76863, 1.00000),
(0.90980, 0.80000, 1.00000),
(0.92549, 0.83137, 1.00000),
(0.94118, 0.86275, 1.00000),
(0.95294, 0.89412, 1.00000),
(0.96471, 0.92549, 1.00000),
(0.97647, 0.95686, 1.00000),
(0.98824, 0.98824, 1.00000),
(0.99216, 0.99216, 1.00000),
(0.99608, 0.99608, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow4 = (
(0.00000, 0.00000, 0.01176),
(0.00000, 0.00000, 0.02745),
(0.00000, 0.00000, 0.04314),
(0.00000, 0.00000, 0.05882),
(0.00000, 0.00000, 0.07451),
(0.00000, 0.00000, 0.09020),
(0.00000, 0.00000, 0.10588),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.15294),
(0.00000, 0.00000, 0.16863),
(0.00000, 0.00000, 0.18431),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.21176),
(0.00000, 0.00000, 0.22745),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.25882),
(0.00000, 0.00000, 0.27451),
(0.00000, 0.00000, 0.29020),
(0.00000, 0.00000, 0.30588),
(0.00000, 0.00000, 0.32157),
(0.00000, 0.00000, 0.33725),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.36863),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.41176),
(0.00000, 0.00000, 0.42745),
(0.00000, 0.00000, 0.44314),
(0.00000, 0.00000, 0.45882),
(0.00000, 0.00000, 0.47451),
(0.00000, 0.00000, 0.49020),
(0.00000, 0.00000, 0.50588),
(0.00000, 0.00000, 0.52157),
(0.00000, 0.00000, 0.53725),
(0.00000, 0.00000, 0.55294),
(0.00000, 0.00000, 0.56863),
(0.00000, 0.00000, 0.58431),
(0.00000, 0.00000, 0.60000),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.62745),
(0.00000, 0.00000, 0.64314),
(0.00000, 0.00000, 0.65882),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.69020),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.72157),
(0.00000, 0.00000, 0.73725),
(0.00000, 0.00000, 0.75294),
(0.00000, 0.00000, 0.76863),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.80000),
(0.00000, 0.00000, 0.81176),
(0.00000, 0.00000, 0.82745),
(0.00000, 0.00000, 0.84314),
(0.00000, 0.00000, 0.85882),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.89020),
(0.00000, 0.00000, 0.90588),
(0.00000, 0.00000, 0.92157),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.95294),
(0.00000, 0.00000, 0.96863),
(0.00000, 0.00000, 0.98431),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03529, 1.00000),
(0.00000, 0.07059, 1.00000),
(0.00000, 0.10980, 1.00000),
(0.00000, 0.14510, 1.00000),
(0.00000, 0.18039, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.29412, 1.00000),
(0.00000, 0.32941, 1.00000),
(0.00000, 0.36471, 1.00000),
(0.00000, 0.40392, 1.00000),
(0.00000, 0.43922, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.50196, 1.00000),
(0.00000, 0.52549, 1.00000),
(0.00000, 0.54902, 1.00000),
(0.00000, 0.57255, 1.00000),
(0.00000, 0.59608, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.64314, 1.00000),
(0.00000, 0.66667, 1.00000),
(0.00000, 0.69020, 1.00000),
(0.00000, 0.71373, 1.00000),
(0.00000, 0.73725, 1.00000),
(0.00000, 0.76078, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.80000, 1.00000),
(0.00000, 0.81569, 1.00000),
(0.00000, 0.83137, 1.00000),
(0.00000, 0.84706, 1.00000),
(0.00000, 0.86667, 1.00000),
(0.00000, 0.88235, 1.00000),
(0.00000, 0.89804, 1.00000),
(0.00000, 0.91373, 1.00000),
(0.00000, 0.93333, 1.00000),
(0.00000, 0.94902, 1.00000),
(0.00000, 0.96471, 1.00000),
(0.00000, 0.98039, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.97647),
(0.00000, 1.00000, 0.95294),
(0.00000, 1.00000, 0.92941),
(0.00000, 1.00000, 0.90588),
(0.00000, 1.00000, 0.88627),
(0.00000, 1.00000, 0.86275),
(0.00000, 1.00000, 0.83922),
(0.00000, 1.00000, 0.81569),
(0.00000, 1.00000, 0.79608),
(0.00000, 1.00000, 0.77255),
(0.00000, 1.00000, 0.74902),
(0.00000, 1.00000, 0.72549),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65098),
(0.00000, 1.00000, 0.59608),
(0.00000, 1.00000, 0.54118),
(0.00000, 1.00000, 0.48627),
(0.00000, 1.00000, 0.43137),
(0.00000, 1.00000, 0.37647),
(0.00000, 1.00000, 0.32549),
(0.00000, 1.00000, 0.27059),
(0.00000, 1.00000, 0.21569),
(0.00000, 1.00000, 0.16078),
(0.00000, 1.00000, 0.10588),
(0.00000, 1.00000, 0.05098),
(0.00000, 1.00000, 0.00000),
(0.05098, 1.00000, 0.00000),
(0.10588, 1.00000, 0.00000),
(0.16078, 1.00000, 0.00000),
(0.21569, 1.00000, 0.00000),
(0.27059, 1.00000, 0.00000),
(0.32549, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.43137, 1.00000, 0.00000),
(0.48627, 1.00000, 0.00000),
(0.54118, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.65098, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.72549, 1.00000, 0.00000),
(0.74902, 1.00000, 0.00000),
(0.77255, 1.00000, 0.00000),
(0.79608, 1.00000, 0.00000),
(0.81569, 1.00000, 0.00000),
(0.83922, 1.00000, 0.00000),
(0.86275, 1.00000, 0.00000),
(0.88627, 1.00000, 0.00000),
(0.90588, 1.00000, 0.00000),
(0.92941, 1.00000, 0.00000),
(0.95294, 1.00000, 0.00000),
(0.97647, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.97647, 0.00000),
(0.99608, 0.95686, 0.00000),
(0.99608, 0.93333, 0.00000),
(0.99608, 0.91373, 0.00000),
(0.99216, 0.89412, 0.00000),
(0.99216, 0.87059, 0.00000),
(0.99216, 0.85098, 0.00000),
(0.99216, 0.82745, 0.00000),
(0.98824, 0.80784, 0.00000),
(0.98824, 0.78824, 0.00000),
(0.98824, 0.76471, 0.00000),
(0.98824, 0.74510, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.70588, 0.00000),
(0.98824, 0.68627, 0.00000),
(0.98824, 0.66667, 0.00000),
(0.98824, 0.64706, 0.00000),
(0.99216, 0.62745, 0.00000),
(0.99216, 0.60784, 0.00000),
(0.99216, 0.58824, 0.00000),
(0.99216, 0.56863, 0.00000),
(0.99608, 0.54902, 0.00000),
(0.99608, 0.52941, 0.00000),
(0.99608, 0.50980, 0.00000),
(0.99608, 0.49020, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.43137, 0.00000),
(1.00000, 0.39608, 0.00000),
(1.00000, 0.36078, 0.00000),
(1.00000, 0.32549, 0.00000),
(1.00000, 0.28627, 0.00000),
(1.00000, 0.25098, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.18039, 0.00000),
(1.00000, 0.14118, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.07059, 0.00000),
(1.00000, 0.03529, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.05098),
(1.00000, 0.00000, 0.10588),
(1.00000, 0.00000, 0.16078),
(1.00000, 0.00000, 0.21569),
(1.00000, 0.00000, 0.27059),
(1.00000, 0.00000, 0.32549),
(1.00000, 0.00000, 0.37647),
(1.00000, 0.00000, 0.43137),
(1.00000, 0.00000, 0.48627),
(1.00000, 0.00000, 0.54118),
(1.00000, 0.00000, 0.59608),
(1.00000, 0.00000, 0.65098),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.72549),
(1.00000, 0.00000, 0.74902),
(1.00000, 0.00000, 0.77255),
(1.00000, 0.00000, 0.79608),
(1.00000, 0.00000, 0.81569),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.88627),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.92941),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.97647),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.03529, 1.00000),
(1.00000, 0.07059, 1.00000),
(1.00000, 0.10588, 1.00000),
(1.00000, 0.14118, 1.00000),
(1.00000, 0.18039, 1.00000),
(1.00000, 0.21569, 1.00000),
(1.00000, 0.25098, 1.00000),
(1.00000, 0.28627, 1.00000),
(1.00000, 0.32549, 1.00000),
(1.00000, 0.36078, 1.00000),
(1.00000, 0.39608, 1.00000),
(1.00000, 0.43137, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.48627, 1.00000),
(1.00000, 0.50588, 1.00000),
(1.00000, 0.52157, 1.00000),
(1.00000, 0.54118, 1.00000),
(1.00000, 0.56078, 1.00000),
(1.00000, 0.57647, 1.00000),
(1.00000, 0.59608, 1.00000),
(1.00000, 0.61176, 1.00000),
(1.00000, 0.63137, 1.00000),
(1.00000, 0.65098, 1.00000),
(1.00000, 0.66667, 1.00000),
(1.00000, 0.68627, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.74510, 1.00000),
(1.00000, 0.78824, 1.00000),
(1.00000, 0.83137, 1.00000),
(1.00000, 0.87059, 1.00000),
(1.00000, 0.91373, 1.00000),
(1.00000, 0.95686, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_idl4 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01569),
(0.00000, 0.00000, 0.02353),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.04706),
(0.00000, 0.00000, 0.05490),
(0.00000, 0.00000, 0.06275),
(0.00000, 0.00000, 0.07059),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.08627),
(0.00000, 0.00000, 0.09804),
(0.00000, 0.00000, 0.10588),
(0.00000, 0.00000, 0.11373),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.12941),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.14510),
(0.00000, 0.00000, 0.15294),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.16863),
(0.00000, 0.00000, 0.17647),
(0.00000, 0.00000, 0.18431),
(0.00000, 0.00000, 0.19608),
(0.00000, 0.00000, 0.20392),
(0.00000, 0.00000, 0.21176),
(0.00000, 0.00000, 0.21961),
(0.00000, 0.00000, 0.22745),
(0.00000, 0.00000, 0.23529),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.25098),
(0.00000, 0.00000, 0.25882),
(0.00000, 0.01176, 0.26667),
(0.00000, 0.02353, 0.27451),
(0.00000, 0.03529, 0.28235),
(0.00000, 0.04706, 0.29412),
(0.00000, 0.05882, 0.30196),
(0.00000, 0.07059, 0.30980),
(0.00000, 0.08235, 0.31765),
(0.00000, 0.09804, 0.32549),
(0.00000, 0.10980, 0.33333),
(0.00000, 0.12157, 0.34118),
(0.00000, 0.13333, 0.34902),
(0.00000, 0.14510, 0.35686),
(0.00000, 0.15686, 0.36471),
(0.00000, 0.16863, 0.37255),
(0.00000, 0.18039, 0.38039),
(0.00000, 0.19608, 0.39216),
(0.00000, 0.20784, 0.39216),
(0.00000, 0.21961, 0.39216),
(0.00000, 0.23137, 0.39216),
(0.00000, 0.24314, 0.39216),
(0.00000, 0.25490, 0.39216),
(0.00000, 0.26667, 0.39216),
(0.00000, 0.27843, 0.39216),
(0.00000, 0.29412, 0.39216),
(0.00000, 0.30588, 0.39216),
(0.00000, 0.31765, 0.39216),
(0.00000, 0.32941, 0.39216),
(0.00000, 0.34118, 0.39216),
(0.00000, 0.35294, 0.39216),
(0.00000, 0.36471, 0.39216),
(0.00000, 0.37647, 0.39216),
(0.00000, 0.39216, 0.39216),
(0.00000, 0.40392, 0.39216),
(0.00000, 0.41569, 0.39216),
(0.00000, 0.42745, 0.39216),
(0.00000, 0.43922, 0.39216),
(0.00000, 0.45098, 0.39216),
(0.00000, 0.46275, 0.39216),
(0.00000, 0.47451, 0.39216),
(0.00000, 0.49020, 0.39216),
(0.00000, 0.50196, 0.39216),
(0.00000, 0.51373, 0.39216),
(0.00000, 0.52549, 0.39216),
(0.00000, 0.53725, 0.39216),
(0.00000, 0.54902, 0.39216),
(0.00000, 0.56078, 0.39216),
(0.00000, 0.57255, 0.39216),
(0.00000, 0.58824, 0.39216),
(0.00000, 0.58824, 0.37647),
(0.00000, 0.58824, 0.36471),
(0.00000, 0.58824, 0.35294),
(0.00000, 0.58824, 0.34118),
(0.00000, 0.58824, 0.32941),
(0.00000, 0.58824, 0.31765),
(0.00000, 0.58824, 0.30588),
(0.00000, 0.58824, 0.29412),
(0.00000, 0.58824, 0.27843),
(0.00000, 0.58824, 0.26667),
(0.00000, 0.58824, 0.25490),
(0.00000, 0.58824, 0.24314),
(0.00000, 0.58824, 0.23137),
(0.00000, 0.58824, 0.21961),
(0.00000, 0.58824, 0.20784),
(0.00000, 0.58824, 0.19608),
(0.00000, 0.58431, 0.18039),
(0.00000, 0.58039, 0.16863),
(0.00000, 0.58039, 0.15686),
(0.00000, 0.57647, 0.14510),
(0.00000, 0.57255, 0.13333),
(0.00000, 0.57255, 0.12157),
(0.00000, 0.56863, 0.10980),
(0.00000, 0.56863, 0.09804),
(0.00000, 0.56471, 0.08235),
(0.00000, 0.56078, 0.07059),
(0.00000, 0.56078, 0.05882),
(0.00000, 0.55686, 0.04706),
(0.00000, 0.55294, 0.03529),
(0.00000, 0.55294, 0.02353),
(0.00000, 0.54902, 0.01176),
(0.00000, 0.54902, 0.00000),
(0.02745, 0.53725, 0.00000),
(0.05882, 0.52941, 0.00000),
(0.08627, 0.51765, 0.00000),
(0.11765, 0.50980, 0.00000),
(0.14510, 0.49804, 0.00000),
(0.17647, 0.49020, 0.00000),
(0.20392, 0.47843, 0.00000),
(0.23529, 0.47059, 0.00000),
(0.26275, 0.45882, 0.00000),
(0.29412, 0.45098, 0.00000),
(0.32157, 0.43922, 0.00000),
(0.35294, 0.43137, 0.00000),
(0.38039, 0.41961, 0.00000),
(0.41176, 0.41176, 0.00000),
(0.43922, 0.40000, 0.00000),
(0.47059, 0.39216, 0.00000),
(0.49020, 0.36471, 0.00000),
(0.50980, 0.34118, 0.00000),
(0.52941, 0.31765, 0.00000),
(0.54902, 0.29412, 0.00000),
(0.56863, 0.26667, 0.00000),
(0.58824, 0.24314, 0.00000),
(0.60784, 0.21961, 0.00000),
(0.62745, 0.19608, 0.00000),
(0.64706, 0.16863, 0.00000),
(0.66667, 0.14510, 0.00000),
(0.68627, 0.12157, 0.00000),
(0.70588, 0.09804, 0.00000),
(0.72549, 0.07059, 0.00000),
(0.74510, 0.04706, 0.00000),
(0.76471, 0.02353, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00784, 0.00000),
(0.78824, 0.01569, 0.00000),
(0.78824, 0.02353, 0.00000),
(0.79216, 0.03529, 0.00000),
(0.79216, 0.04314, 0.00000),
(0.79608, 0.05098, 0.00000),
(0.79608, 0.06275, 0.00000),
(0.80000, 0.07059, 0.00000),
(0.80000, 0.07843, 0.00000),
(0.80392, 0.09020, 0.00000),
(0.80392, 0.09804, 0.00000),
(0.80784, 0.10588, 0.00000),
(0.80784, 0.11373, 0.00000),
(0.81176, 0.12549, 0.00000),
(0.81176, 0.13333, 0.00000),
(0.81569, 0.14118, 0.00000),
(0.81569, 0.15294, 0.00000),
(0.81961, 0.16078, 0.00000),
(0.81961, 0.16863, 0.00000),
(0.82353, 0.18039, 0.00000),
(0.82353, 0.18824, 0.00000),
(0.82745, 0.19608, 0.00000),
(0.82745, 0.20784, 0.00000),
(0.83137, 0.21569, 0.00000),
(0.83137, 0.22353, 0.00000),
(0.83529, 0.23137, 0.00000),
(0.83529, 0.24314, 0.00000),
(0.83922, 0.25098, 0.00000),
(0.83922, 0.25882, 0.00000),
(0.84314, 0.27059, 0.00000),
(0.84314, 0.27843, 0.00000),
(0.84706, 0.28627, 0.00000),
(0.84706, 0.29804, 0.00000),
(0.85098, 0.30588, 0.00000),
(0.85098, 0.31373, 0.00000),
(0.85490, 0.32549, 0.00000),
(0.85490, 0.33333, 0.00000),
(0.85882, 0.34118, 0.00000),
(0.85882, 0.34902, 0.00000),
(0.86275, 0.36078, 0.00000),
(0.86275, 0.36863, 0.00000),
(0.86667, 0.37647, 0.00000),
(0.86667, 0.38824, 0.00000),
(0.87059, 0.39608, 0.00000),
(0.87059, 0.40392, 0.00000),
(0.87451, 0.41569, 0.00000),
(0.87451, 0.42353, 0.00000),
(0.87843, 0.43137, 0.00000),
(0.87843, 0.44314, 0.00000),
(0.88235, 0.45098, 0.00000),
(0.88235, 0.45882, 0.00000),
(0.88627, 0.46667, 0.00000),
(0.88627, 0.47843, 0.00000),
(0.89020, 0.48627, 0.00000),
(0.89020, 0.49412, 0.00000),
(0.89412, 0.50588, 0.00000),
(0.89412, 0.51373, 0.00000),
(0.89804, 0.52157, 0.00000),
(0.89804, 0.53333, 0.00000),
(0.90196, 0.54118, 0.00000),
(0.90196, 0.54902, 0.00000),
(0.90588, 0.55686, 0.00000),
(0.90588, 0.56863, 0.00000),
(0.90980, 0.57647, 0.00000),
(0.90980, 0.58431, 0.00000),
(0.91373, 0.59608, 0.00000),
(0.91373, 0.60392, 0.00000),
(0.91765, 0.61176, 0.00000),
(0.91765, 0.62353, 0.00000),
(0.92157, 0.63137, 0.00000),
(0.92157, 0.63922, 0.00000),
(0.92549, 0.65098, 0.00000),
(0.92549, 0.65882, 0.00000),
(0.92941, 0.66667, 0.00000),
(0.92941, 0.67451, 0.00000),
(0.93333, 0.68627, 0.00000),
(0.93333, 0.69412, 0.00000),
(0.93725, 0.70196, 0.00000),
(0.93725, 0.71373, 0.00000),
(0.94118, 0.72157, 0.00000),
(0.94118, 0.72941, 0.00000),
(0.94510, 0.74118, 0.00000),
(0.94510, 0.74902, 0.00000),
(0.94902, 0.75686, 0.00000),
(0.94902, 0.76863, 0.00000),
(0.95294, 0.77647, 0.00000),
(0.95294, 0.78431, 0.00000),
(0.95686, 0.79216, 0.00000),
(0.95686, 0.80392, 0.00000),
(0.96078, 0.81176, 0.00000),
(0.96078, 0.81961, 0.00000),
(0.96471, 0.83137, 0.00000),
(0.96471, 0.83922, 0.00000),
(0.96863, 0.84706, 0.00000),
(0.96863, 0.85882, 0.00000),
(0.97255, 0.86667, 0.00000),
(0.97255, 0.87451, 0.00000),
(0.97647, 0.88627, 0.00000),
(0.97647, 0.89412, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90980, 0.00000),
(0.98431, 0.92157, 0.00000),
(0.98431, 0.92941, 0.00000),
(0.98824, 0.93725, 0.00000),
(0.98824, 0.94902, 0.00000),
(0.99216, 0.95686, 0.00000),
(0.99216, 0.96471, 0.00000),
(0.99608, 0.97647, 0.00000),
(0.99608, 0.98431, 0.00000),
(1.00000, 0.99216, 0.00000),
(1.00000, 1.00000, 0.00000),
)
cmap_idl5 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.05882),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.10196),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.14118),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.18039),
(0.00000, 0.00000, 0.20392),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.26275),
(0.00000, 0.00000, 0.28235),
(0.00000, 0.00000, 0.30588),
(0.00000, 0.00000, 0.32549),
(0.00000, 0.00000, 0.34510),
(0.00000, 0.00000, 0.36471),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.40784),
(0.00000, 0.00000, 0.42745),
(0.00000, 0.00000, 0.44706),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.48627),
(0.00000, 0.00000, 0.50980),
(0.00000, 0.00000, 0.52941),
(0.00000, 0.00000, 0.54902),
(0.00000, 0.00000, 0.56863),
(0.00000, 0.00000, 0.58824),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.63137),
(0.00000, 0.00000, 0.65098),
(0.00000, 0.00000, 0.67059),
(0.00000, 0.00000, 0.69020),
(0.00000, 0.00000, 0.71373),
(0.00000, 0.00000, 0.73333),
(0.00000, 0.00000, 0.75294),
(0.00000, 0.00000, 0.77255),
(0.00000, 0.00000, 0.79216),
(0.00000, 0.00000, 0.81569),
(0.00000, 0.00000, 0.83529),
(0.00000, 0.00000, 0.85490),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.89412),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.95686),
(0.01569, 0.00000, 0.97647),
(0.03529, 0.00000, 1.00000),
(0.05490, 0.00000, 0.98039),
(0.07451, 0.00000, 0.96078),
(0.09020, 0.00000, 0.93725),
(0.10980, 0.00000, 0.91765),
(0.12941, 0.00000, 0.89412),
(0.14902, 0.00000, 0.87451),
(0.16471, 0.00000, 0.85490),
(0.18431, 0.00000, 0.83137),
(0.20392, 0.00000, 0.81176),
(0.22353, 0.00000, 0.78824),
(0.23922, 0.00000, 0.76863),
(0.25882, 0.00000, 0.74510),
(0.27843, 0.00000, 0.72549),
(0.29804, 0.00000, 0.70588),
(0.31765, 0.00000, 0.68235),
(0.31765, 0.00000, 0.66275),
(0.31765, 0.00000, 0.63922),
(0.31765, 0.00000, 0.61961),
(0.31765, 0.00000, 0.59608),
(0.31765, 0.00000, 0.57647),
(0.31765, 0.00000, 0.55686),
(0.31765, 0.00000, 0.53333),
(0.31373, 0.00000, 0.51373),
(0.31373, 0.00000, 0.49020),
(0.31373, 0.00000, 0.47059),
(0.31373, 0.00000, 0.44706),
(0.31373, 0.00000, 0.42745),
(0.31373, 0.00000, 0.40784),
(0.31373, 0.00000, 0.38431),
(0.30980, 0.00000, 0.36471),
(0.32941, 0.00000, 0.34118),
(0.34902, 0.00000, 0.32157),
(0.36863, 0.00000, 0.29804),
(0.38824, 0.00000, 0.27843),
(0.40784, 0.00000, 0.25882),
(0.42745, 0.00000, 0.23529),
(0.44706, 0.00000, 0.21569),
(0.46667, 0.00000, 0.19216),
(0.48627, 0.00000, 0.17255),
(0.50588, 0.00000, 0.14902),
(0.52549, 0.00000, 0.12941),
(0.54510, 0.00000, 0.10980),
(0.56471, 0.00000, 0.08627),
(0.58431, 0.00000, 0.06667),
(0.60392, 0.00000, 0.04314),
(0.62353, 0.00000, 0.02353),
(0.64314, 0.00000, 0.00000),
(0.66275, 0.00000, 0.00000),
(0.68235, 0.00000, 0.00000),
(0.70588, 0.00000, 0.00000),
(0.72549, 0.00000, 0.00000),
(0.74510, 0.00000, 0.00000),
(0.76863, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.80784, 0.00000, 0.00000),
(0.83137, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.87059, 0.00000, 0.00000),
(0.89412, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.01961, 0.00000),
(1.00000, 0.03922, 0.00000),
(1.00000, 0.06275, 0.00000),
(1.00000, 0.08235, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.12549, 0.00000),
(1.00000, 0.14510, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.18824, 0.00000),
(1.00000, 0.21176, 0.00000),
(1.00000, 0.23137, 0.00000),
(1.00000, 0.25098, 0.00000),
(1.00000, 0.27451, 0.00000),
(1.00000, 0.29412, 0.00000),
(1.00000, 0.31765, 0.00000),
(1.00000, 0.33333, 0.01569),
(1.00000, 0.35294, 0.03529),
(1.00000, 0.37255, 0.05490),
(1.00000, 0.39216, 0.07451),
(1.00000, 0.41176, 0.09412),
(1.00000, 0.42745, 0.10980),
(1.00000, 0.44706, 0.12941),
(1.00000, 0.46667, 0.14902),
(1.00000, 0.48627, 0.16863),
(1.00000, 0.50588, 0.18824),
(1.00000, 0.52549, 0.20784),
(1.00000, 0.54118, 0.22353),
(1.00000, 0.56078, 0.24314),
(1.00000, 0.58039, 0.26275),
(1.00000, 0.60000, 0.28235),
(1.00000, 0.61961, 0.30196),
(1.00000, 0.63922, 0.32157),
(1.00000, 0.63922, 0.30196),
(1.00000, 0.63922, 0.27843),
(1.00000, 0.63922, 0.25490),
(1.00000, 0.63922, 0.23137),
(1.00000, 0.63922, 0.20784),
(1.00000, 0.63922, 0.18431),
(1.00000, 0.63922, 0.16078),
(1.00000, 0.63922, 0.14118),
(1.00000, 0.63922, 0.11765),
(1.00000, 0.63922, 0.09412),
(1.00000, 0.63922, 0.07059),
(1.00000, 0.63922, 0.04706),
(1.00000, 0.63922, 0.02353),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.63922, 0.00000),
(0.97255, 0.63922, 0.00000),
(0.94118, 0.63922, 0.00000),
(0.90980, 0.63922, 0.00000),
(0.88235, 0.63922, 0.00000),
(0.85098, 0.63922, 0.00000),
(0.81961, 0.63922, 0.00000),
(0.79216, 0.63922, 0.00000),
(0.76078, 0.63922, 0.00000),
(0.72941, 0.63922, 0.00000),
(0.70196, 0.63922, 0.00000),
(0.67059, 0.63922, 0.00000),
(0.63922, 0.63922, 0.00000),
(0.65882, 0.63922, 0.00000),
(0.67843, 0.63922, 0.00000),
(0.69804, 0.66275, 0.01176),
(0.71765, 0.68627, 0.02353),
(0.73725, 0.70980, 0.03529),
(0.75686, 0.73333, 0.04706),
(0.77647, 0.75686, 0.06275),
(0.79608, 0.78039, 0.07451),
(0.81961, 0.80392, 0.08627),
(0.83922, 0.83137, 0.09804),
(0.85882, 0.85490, 0.11373),
(0.87843, 0.87843, 0.12549),
(0.89804, 0.90196, 0.13725),
(0.91765, 0.92549, 0.14902),
(0.93725, 0.94902, 0.16078),
(0.95686, 0.97255, 0.17647),
(0.97647, 1.00000, 0.18824),
(1.00000, 1.00000, 0.20000),
(1.00000, 1.00000, 0.21176),
(1.00000, 1.00000, 0.22745),
(1.00000, 1.00000, 0.23922),
(1.00000, 1.00000, 0.25098),
(1.00000, 1.00000, 0.26275),
(1.00000, 1.00000, 0.27843),
(1.00000, 1.00000, 0.29020),
(1.00000, 1.00000, 0.30196),
(1.00000, 1.00000, 0.31373),
(1.00000, 1.00000, 0.32549),
(1.00000, 1.00000, 0.34118),
(1.00000, 1.00000, 0.35294),
(1.00000, 1.00000, 0.36471),
(1.00000, 1.00000, 0.37647),
(1.00000, 1.00000, 0.39216),
(1.00000, 1.00000, 0.40392),
(1.00000, 1.00000, 0.41569),
(1.00000, 1.00000, 0.42745),
(1.00000, 1.00000, 0.43922),
(1.00000, 1.00000, 0.45490),
(1.00000, 1.00000, 0.46667),
(1.00000, 1.00000, 0.47843),
(1.00000, 1.00000, 0.49020),
(1.00000, 1.00000, 0.50588),
(1.00000, 1.00000, 0.51765),
(1.00000, 1.00000, 0.52941),
(1.00000, 1.00000, 0.54118),
(1.00000, 1.00000, 0.55686),
(1.00000, 1.00000, 0.56863),
(1.00000, 1.00000, 0.58039),
(1.00000, 1.00000, 0.59216),
(1.00000, 1.00000, 0.60392),
(1.00000, 1.00000, 0.61961),
(1.00000, 1.00000, 0.63137),
(1.00000, 1.00000, 0.64314),
(1.00000, 1.00000, 0.65490),
(1.00000, 1.00000, 0.67059),
(1.00000, 1.00000, 0.68235),
(1.00000, 1.00000, 0.69412),
(1.00000, 1.00000, 0.70588),
(1.00000, 1.00000, 0.71765),
(1.00000, 1.00000, 0.73333),
(1.00000, 1.00000, 0.74510),
(1.00000, 1.00000, 0.75686),
(1.00000, 1.00000, 0.76863),
(1.00000, 1.00000, 0.78431),
(1.00000, 1.00000, 0.79608),
(1.00000, 1.00000, 0.80784),
(1.00000, 1.00000, 0.81961),
(1.00000, 1.00000, 0.83529),
(1.00000, 1.00000, 0.84706),
(1.00000, 1.00000, 0.85882),
(1.00000, 1.00000, 0.87059),
(1.00000, 1.00000, 0.88235),
(1.00000, 1.00000, 0.89804),
(1.00000, 1.00000, 0.90980),
(1.00000, 1.00000, 0.92157),
(1.00000, 1.00000, 0.93333),
(1.00000, 1.00000, 0.94902),
(1.00000, 1.00000, 0.96078),
(1.00000, 1.00000, 0.97255),
(1.00000, 1.00000, 0.98431),
(1.00000, 1.00000, 1.00000),
)
cmap_idl6 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.01176, 0.00000, 0.00000),
(0.02745, 0.00000, 0.00000),
(0.04314, 0.00000, 0.00000),
(0.05882, 0.00000, 0.00000),
(0.07451, 0.00000, 0.00000),
(0.08627, 0.00000, 0.00000),
(0.10196, 0.00000, 0.00000),
(0.11765, 0.00000, 0.00000),
(0.13333, 0.00000, 0.00000),
(0.14902, 0.00000, 0.00000),
(0.16078, 0.00000, 0.00000),
(0.17647, 0.00000, 0.00000),
(0.19216, 0.00000, 0.00000),
(0.20784, 0.00000, 0.00000),
(0.22353, 0.00000, 0.00000),
(0.23529, 0.00000, 0.00000),
(0.25098, 0.00000, 0.00000),
(0.26667, 0.00000, 0.00000),
(0.28235, 0.00000, 0.00000),
(0.29804, 0.00000, 0.00000),
(0.30980, 0.00000, 0.00000),
(0.32549, 0.00000, 0.00000),
(0.34118, 0.00000, 0.00000),
(0.35686, 0.00000, 0.00000),
(0.37255, 0.00000, 0.00000),
(0.38431, 0.00000, 0.00000),
(0.40000, 0.00000, 0.00000),
(0.41569, 0.00000, 0.00000),
(0.43137, 0.00000, 0.00000),
(0.44706, 0.00000, 0.00000),
(0.45882, 0.00000, 0.00000),
(0.47451, 0.00000, 0.00000),
(0.49020, 0.00000, 0.00000),
(0.50588, 0.00000, 0.00000),
(0.52157, 0.00000, 0.00000),
(0.53725, 0.00000, 0.00000),
(0.54902, 0.00000, 0.00000),
(0.56471, 0.00000, 0.00000),
(0.58039, 0.00000, 0.00000),
(0.59608, 0.00000, 0.00000),
(0.61176, 0.00000, 0.00000),
(0.62353, 0.00000, 0.00000),
(0.63922, 0.00000, 0.00000),
(0.65490, 0.00000, 0.00000),
(0.67059, 0.00000, 0.00000),
(0.68627, 0.00000, 0.00000),
(0.69804, 0.00000, 0.00000),
(0.71373, 0.00000, 0.00000),
(0.72941, 0.00000, 0.00000),
(0.74510, 0.00000, 0.00000),
(0.76078, 0.00000, 0.00000),
(0.77255, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.80392, 0.00000, 0.00000),
(0.81961, 0.00000, 0.00000),
(0.83529, 0.00000, 0.00000),
(0.84706, 0.00000, 0.00000),
(0.86275, 0.00000, 0.00000),
(0.87843, 0.00000, 0.00000),
(0.89412, 0.00000, 0.00000),
(0.90980, 0.00000, 0.00000),
(0.92157, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.95294, 0.00000, 0.00000),
(0.96863, 0.01176, 0.00000),
(0.98431, 0.02745, 0.00000),
(1.00000, 0.04314, 0.00000),
(0.98431, 0.05882, 0.00000),
(0.96863, 0.07451, 0.00000),
(0.95294, 0.09020, 0.00000),
(0.93725, 0.10588, 0.00000),
(0.92157, 0.12157, 0.00000),
(0.90196, 0.13725, 0.00000),
(0.88627, 0.15294, 0.00000),
(0.87059, 0.16863, 0.00000),
(0.85490, 0.18431, 0.00000),
(0.83922, 0.20000, 0.00000),
(0.82353, 0.21569, 0.00000),
(0.80392, 0.23137, 0.00000),
(0.78824, 0.24706, 0.00000),
(0.77255, 0.26275, 0.00000),
(0.75686, 0.27843, 0.00000),
(0.74118, 0.29412, 0.00000),
(0.72157, 0.30980, 0.00000),
(0.70588, 0.32549, 0.00000),
(0.69020, 0.34118, 0.00000),
(0.67451, 0.35686, 0.00000),
(0.65882, 0.37255, 0.00000),
(0.64314, 0.38824, 0.00000),
(0.62353, 0.40392, 0.00000),
(0.60784, 0.41961, 0.00000),
(0.59216, 0.43529, 0.00000),
(0.57647, 0.45098, 0.00000),
(0.56078, 0.46667, 0.00000),
(0.54118, 0.48235, 0.00000),
(0.52549, 0.49804, 0.00000),
(0.50980, 0.51373, 0.00000),
(0.49412, 0.52941, 0.00000),
(0.47843, 0.54510, 0.00000),
(0.46275, 0.56078, 0.00000),
(0.44314, 0.57647, 0.00000),
(0.42745, 0.59216, 0.00000),
(0.41176, 0.60784, 0.00000),
(0.39608, 0.62353, 0.00000),
(0.38039, 0.63922, 0.00000),
(0.36078, 0.65490, 0.00000),
(0.34510, 0.67059, 0.00000),
(0.32941, 0.68627, 0.00000),
(0.31373, 0.70196, 0.00000),
(0.29804, 0.71765, 0.00000),
(0.28235, 0.73333, 0.00000),
(0.26275, 0.74902, 0.00000),
(0.24706, 0.76471, 0.00000),
(0.23137, 0.78039, 0.00000),
(0.21569, 0.79608, 0.00000),
(0.20000, 0.81176, 0.00000),
(0.18039, 0.82745, 0.00000),
(0.16471, 0.84314, 0.00000),
(0.14902, 0.85882, 0.00000),
(0.13333, 0.87451, 0.00000),
(0.11765, 0.89020, 0.00000),
(0.10196, 0.90588, 0.00000),
(0.08235, 0.92157, 0.00000),
(0.06667, 0.93725, 0.00000),
(0.05098, 0.95294, 0.00000),
(0.03529, 0.96863, 0.00000),
(0.01961, 0.98431, 0.01176),
(0.00000, 1.00000, 0.02745),
(0.00000, 0.98431, 0.04314),
(0.00000, 0.96863, 0.05882),
(0.00000, 0.95294, 0.07451),
(0.00000, 0.93725, 0.09020),
(0.00000, 0.92157, 0.10588),
(0.00000, 0.90588, 0.11765),
(0.00000, 0.89020, 0.13333),
(0.00000, 0.87451, 0.14902),
(0.00000, 0.85882, 0.16471),
(0.00000, 0.84314, 0.18039),
(0.00000, 0.82745, 0.19608),
(0.00000, 0.81176, 0.21176),
(0.00000, 0.79608, 0.22353),
(0.00000, 0.78039, 0.23922),
(0.00000, 0.76471, 0.25490),
(0.00000, 0.74902, 0.27059),
(0.00000, 0.73333, 0.28627),
(0.00000, 0.71765, 0.30196),
(0.00000, 0.70196, 0.31765),
(0.00000, 0.68627, 0.33333),
(0.00000, 0.66667, 0.34510),
(0.00000, 0.65098, 0.36078),
(0.00000, 0.63529, 0.37647),
(0.00000, 0.61961, 0.39216),
(0.00000, 0.60392, 0.40784),
(0.00000, 0.58824, 0.42353),
(0.00000, 0.57255, 0.43922),
(0.00000, 0.55686, 0.45098),
(0.00000, 0.54118, 0.46667),
(0.00000, 0.52549, 0.48235),
(0.00000, 0.50980, 0.49804),
(0.00000, 0.49412, 0.51373),
(0.00000, 0.47843, 0.52941),
(0.00000, 0.46275, 0.54510),
(0.00000, 0.44706, 0.55686),
(0.00000, 0.43137, 0.57255),
(0.00000, 0.41569, 0.58824),
(0.00000, 0.40000, 0.60392),
(0.00000, 0.38431, 0.61961),
(0.00000, 0.36863, 0.63529),
(0.00000, 0.35294, 0.65098),
(0.00000, 0.33333, 0.66667),
(0.00000, 0.31765, 0.67843),
(0.00000, 0.30196, 0.69412),
(0.00000, 0.28627, 0.70980),
(0.00000, 0.27059, 0.72549),
(0.00000, 0.25490, 0.74118),
(0.00000, 0.23922, 0.75686),
(0.00000, 0.22353, 0.77255),
(0.00000, 0.20784, 0.78431),
(0.00000, 0.19216, 0.80000),
(0.00000, 0.17647, 0.81569),
(0.00000, 0.16078, 0.83137),
(0.00000, 0.14510, 0.84706),
(0.00000, 0.12941, 0.86275),
(0.00000, 0.11373, 0.87843),
(0.00000, 0.09804, 0.89020),
(0.00000, 0.08235, 0.90588),
(0.00000, 0.06667, 0.92157),
(0.00000, 0.05098, 0.93725),
(0.00000, 0.03529, 0.95294),
(0.00000, 0.01961, 0.96863),
(0.00000, 0.00000, 0.98431),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.98431),
(0.00000, 0.00000, 0.96863),
(0.00000, 0.00000, 0.95294),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.92157),
(0.00000, 0.00000, 0.90588),
(0.00000, 0.00000, 0.89020),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.85882),
(0.00000, 0.00000, 0.84314),
(0.00000, 0.00000, 0.82745),
(0.00000, 0.00000, 0.81176),
(0.00000, 0.00000, 0.79608),
(0.00000, 0.00000, 0.78039),
(0.00000, 0.00000, 0.76471),
(0.00000, 0.00000, 0.74902),
(0.00000, 0.00000, 0.73333),
(0.00000, 0.00000, 0.71765),
(0.00000, 0.00000, 0.70196),
(0.00000, 0.00000, 0.68627),
(0.00000, 0.00000, 0.66667),
(0.00000, 0.00000, 0.65098),
(0.00000, 0.00000, 0.63529),
(0.00000, 0.00000, 0.61961),
(0.00000, 0.00000, 0.60392),
(0.00000, 0.00000, 0.58824),
(0.00000, 0.00000, 0.57255),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.54118),
(0.00000, 0.00000, 0.52549),
(0.00000, 0.00000, 0.50980),
(0.00000, 0.00000, 0.49412),
(0.00000, 0.00000, 0.47843),
(0.00000, 0.00000, 0.46275),
(0.00000, 0.00000, 0.44706),
(0.00000, 0.00000, 0.43137),
(0.00000, 0.00000, 0.41569),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.36863),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.30196),
(0.00000, 0.00000, 0.28627),
(0.00000, 0.00000, 0.27059),
(0.00000, 0.00000, 0.25490),
(0.00000, 0.00000, 0.23922),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.20784),
(0.00000, 0.00000, 0.19216),
(0.00000, 0.00000, 0.17647),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.14510),
(0.00000, 0.00000, 0.12941),
(0.00000, 0.00000, 0.11373),
(0.00000, 0.00000, 0.09804),
(0.00000, 0.00000, 0.08235),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.05098),
(0.00000, 0.00000, 0.03529),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.00000),
)
cmap_smooth1 = (
(0.30980, 0.29020, 0.22353), # noqa
(0.32157, 0.30196, 0.23922),
(0.33333, 0.31765, 0.25490),
(0.34510, 0.32941, 0.27059),
(0.35686, 0.34510, 0.29020),
(0.36863, 0.36078, 0.30588),
(0.38039, 0.37647, 0.32549),
(0.39216, 0.38824, 0.34510),
(0.40392, 0.40392, 0.36471),
(0.41569, 0.41961, 0.38431),
(0.42745, 0.43529, 0.40392),
(0.43922, 0.45098, 0.42353),
(0.45098, 0.46667, 0.44314),
(0.46275, 0.48235, 0.46667),
(0.47451, 0.49804, 0.48627),
(0.49020, 0.51765, 0.50980),
(0.50196, 0.53333, 0.53333),
(0.51373, 0.54902, 0.55686),
(0.52549, 0.56863, 0.58039),
(0.54118, 0.58431, 0.60392),
(0.55294, 0.60000, 0.62745),
(0.56863, 0.61961, 0.65098),
(0.58039, 0.63529, 0.67843),
(0.59216, 0.65490, 0.70196),
(0.60784, 0.67059, 0.72941),
(0.61961, 0.69020, 0.75686),
(0.63529, 0.70980, 0.78431),
(0.64706, 0.72549, 0.75686),
(0.66275, 0.74510, 0.72941),
(0.67843, 0.76471, 0.70588),
(0.69020, 0.78431, 0.68235),
(0.70588, 0.80392, 0.65882),
(0.71765, 0.82353, 0.64314),
(0.73333, 0.80392, 0.62353),
(0.74902, 0.78824, 0.60392),
(0.76471, 0.77255, 0.58824),
(0.77647, 0.75686, 0.57255),
(0.79216, 0.74118, 0.55686),
(0.80784, 0.73333, 0.54118),
(0.82353, 0.71765, 0.52941),
(0.83922, 0.70588, 0.51373),
(0.85490, 0.69804, 0.50588),
(0.87059, 0.68235, 0.49412),
(0.85490, 0.67451, 0.48627),
(0.83922, 0.66667, 0.47843),
(0.82745, 0.65882, 0.47059),
(0.81569, 0.65098, 0.46275),
(0.80392, 0.63922, 0.45882),
(0.79216, 0.63529, 0.45490),
(0.78431, 0.62745, 0.45098),
(0.77255, 0.61961, 0.44706),
(0.76078, 0.61176, 0.44706),
(0.74902, 0.60784, 0.44706),
(0.74510, 0.60392, 0.44706),
(0.73333, 0.60000, 0.44706),
(0.72941, 0.59608, 0.45098),
(0.71765, 0.59216, 0.45490),
(0.70980, 0.58824, 0.45882),
(0.70588, 0.58824, 0.46275),
(0.69412, 0.58431, 0.47059),
(0.69020, 0.58039, 0.47843),
(0.68235, 0.58039, 0.48627),
(0.67843, 0.58039, 0.49412),
(0.67451, 0.57647, 0.50588),
(0.66667, 0.58039, 0.51373),
(0.66275, 0.57647, 0.52941),
(0.65490, 0.58039, 0.54118),
(0.65098, 0.58039, 0.55686),
(0.64706, 0.58039, 0.57255),
(0.64314, 0.58431, 0.58824),
(0.63529, 0.58824, 0.60392),
(0.63529, 0.58824, 0.62353),
(0.63137, 0.59216, 0.64314),
(0.62745, 0.59608, 0.65882),
(0.62745, 0.60000, 0.68235),
(0.62353, 0.60392, 0.70588),
(0.62353, 0.60784, 0.72941),
(0.61961, 0.61176, 0.75686),
(0.61961, 0.61961, 0.78431),
(0.61569, 0.62745, 0.75686),
(0.61569, 0.63529, 0.72941),
(0.61176, 0.63922, 0.70588),
(0.61176, 0.65098, 0.68235),
(0.61176, 0.65882, 0.65882),
(0.61176, 0.66667, 0.64314),
(0.61176, 0.67451, 0.62353),
(0.61176, 0.68235, 0.60392),
(0.61176, 0.69804, 0.58824),
(0.61176, 0.70588, 0.57255),
(0.61569, 0.71765, 0.55686),
(0.61569, 0.73333, 0.54118),
(0.61961, 0.74118, 0.52941),
(0.61961, 0.75686, 0.51373),
(0.62353, 0.77255, 0.50588),
(0.62353, 0.78824, 0.49412),
(0.62745, 0.80392, 0.48627),
(0.62745, 0.82353, 0.47843),
(0.63137, 0.80392, 0.47059),
(0.63529, 0.78824, 0.46275),
(0.63529, 0.77255, 0.45882),
(0.64314, 0.75686, 0.45490),
(0.64706, 0.74118, 0.45098),
(0.65098, 0.73333, 0.44706),
(0.65490, 0.71765, 0.44706),
(0.66275, 0.70588, 0.44706),
(0.66667, 0.69804, 0.44706),
(0.67451, 0.68235, 0.44706),
(0.67843, 0.67451, 0.45098),
(0.68235, 0.66667, 0.45490),
(0.69020, 0.65882, 0.45882),
(0.69412, 0.65098, 0.46275),
(0.70588, 0.63922, 0.47059),
(0.70980, 0.63529, 0.47843),
(0.71765, 0.62745, 0.48627),
(0.72941, 0.61961, 0.49412),
(0.73333, 0.61176, 0.50588),
(0.74510, 0.60784, 0.51373),
(0.74902, 0.60392, 0.52941),
(0.76078, 0.60000, 0.54118),
(0.77647, 0.59608, 0.55686),
(0.79216, 0.59216, 0.57255),
(0.80392, 0.58824, 0.58824),
(0.81961, 0.58824, 0.60392),
(0.83922, 0.58431, 0.62353),
(0.85490, 0.58039, 0.64314),
(0.87451, 0.58039, 0.65882),
(0.89804, 0.58039, 0.68235),
(0.92157, 0.57647, 0.70588),
(0.91373, 0.58039, 0.72941),
(0.90588, 0.57647, 0.75686),
(0.89804, 0.58039, 0.78431),
(0.89412, 0.58039, 0.75686),
(0.88627, 0.58039, 0.72941),
(0.88235, 0.58431, 0.70588),
(0.87843, 0.58824, 0.68235),
(0.87451, 0.58824, 0.65882),
(0.87059, 0.59216, 0.64314),
(0.86275, 0.59608, 0.62353),
(0.86275, 0.60000, 0.60392),
(0.85882, 0.60392, 0.58824),
(0.85882, 0.60784, 0.57255),
(0.85490, 0.61176, 0.55686),
(0.85098, 0.61961, 0.54118),
(0.85490, 0.62745, 0.52941),
(0.85098, 0.63529, 0.51373),
(0.85098, 0.63922, 0.50588),
(0.85098, 0.65490, 0.49412),
(0.85098, 0.67059, 0.48627),
(0.85490, 0.68235, 0.47843),
(0.85098, 0.69804, 0.47059),
(0.85490, 0.71373, 0.46275),
(0.85098, 0.74118, 0.45882),
(0.85490, 0.75686, 0.45490),
(0.85882, 0.78039, 0.45098),
(0.86275, 0.80392, 0.44706),
(0.86275, 0.82353, 0.44706),
(0.86667, 0.85098, 0.44706),
(0.87059, 0.87843, 0.44706),
(0.87451, 0.90980, 0.44706),
(0.87843, 0.93725, 0.45098),
(0.88235, 0.97255, 0.45490),
(0.88627, 0.96471, 0.45882),
(0.89020, 0.96078, 0.46275),
(0.89804, 0.95686, 0.47059),
(0.90196, 0.95294, 0.47843),
(0.90588, 0.94902, 0.48627),
(0.91373, 0.94902, 0.49412),
(0.91765, 0.94510, 0.50588),
(0.92549, 0.94118, 0.51373),
(0.92941, 0.94510, 0.52941),
(0.94118, 0.94118, 0.54118),
(0.94902, 0.94510, 0.55686),
(0.95294, 0.94510, 0.57255),
(0.96078, 0.94510, 0.58824),
(0.97255, 0.94902, 0.60392),
(0.98039, 0.94902, 0.62745),
(0.98824, 0.95294, 0.65098),
(0.99608, 0.95686, 0.67451),
(1.00000, 0.95686, 0.70588),
(1.00000, 0.96078, 0.73333),
(1.00000, 0.96863, 0.76863),
(1.00000, 0.97255, 0.80392),
(1.00000, 0.98039, 0.84314),
(1.00000, 0.98431, 0.82353),
(0.98824, 0.99216, 0.80784),
(0.96078, 0.99608, 0.79608),
(0.93333, 1.00000, 0.78431),
(0.90980, 1.00000, 0.77255),
(0.88235, 1.00000, 0.76471),
(0.85882, 1.00000, 0.75686),
(0.83137, 1.00000, 0.74902),
(0.80784, 1.00000, 0.74118),
(0.78039, 1.00000, 0.73725),
(0.75686, 1.00000, 0.73333),
(0.73333, 1.00000, 0.72941),
(0.70588, 1.00000, 0.72941),
(0.69020, 1.00000, 0.72941),
(0.66667, 0.97255, 0.72941),
(0.64314, 0.93725, 0.73333),
(0.62353, 0.90196, 0.73725),
(0.60392, 0.86667, 0.74118),
(0.58431, 0.83137, 0.74510),
(0.56078, 0.79608, 0.75294),
(0.54510, 0.76078, 0.75686),
(0.52941, 0.73333, 0.76863),
(0.51373, 0.69804, 0.77647),
(0.50196, 0.67059, 0.78824),
(0.48627, 0.63922, 0.80392),
(0.47059, 0.61176, 0.81569),
(0.45490, 0.58039, 0.83137),
(0.44314, 0.55294, 0.84706),
(0.43529, 0.52941, 0.86667),
(0.42353, 0.50588, 0.88235),
(0.41569, 0.47843, 0.90196),
(0.41176, 0.45490, 0.92549),
(0.40392, 0.43529, 0.87843),
(0.40000, 0.41569, 0.83922),
(0.39216, 0.39216, 0.79608),
(0.38824, 0.37647, 0.75686),
(0.38431, 0.35686, 0.72157),
(0.38039, 0.34118, 0.67843),
(0.38039, 0.32941, 0.65098),
(0.37255, 0.31765, 0.61569),
(0.37255, 0.30588, 0.58431),
(0.37255, 0.29804, 0.55686),
(0.37255, 0.29412, 0.52549),
(0.37647, 0.28627, 0.50196),
(0.37647, 0.28235, 0.47843),
(0.38039, 0.28235, 0.45882),
(0.38431, 0.27843, 0.43922),
(0.38824, 0.27843, 0.41961),
(0.39608, 0.27843, 0.40784),
(0.40000, 0.28235, 0.40000),
(0.40784, 0.28627, 0.38824),
(0.41569, 0.29020, 0.38431),
(0.42353, 0.29804, 0.38039),
(0.43529, 0.30588, 0.37647),
(0.44706, 0.31765, 0.37647),
(0.45882, 0.32549, 0.38039),
(0.47059, 0.34118, 0.38431),
(0.48235, 0.35294, 0.39216),
(0.50196, 0.37255, 0.40000),
(0.51765, 0.39216, 0.40784),
(0.53725, 0.41176, 0.42353),
(0.55686, 0.43922, 0.43529),
(0.57647, 0.46667, 0.45098),
(0.60000, 0.49804, 0.47451),
(0.62745, 0.52941, 0.49804),
(0.65490, 0.56078, 0.52549),
(0.68235, 0.59216, 0.54902),
(0.70980, 0.62745, 0.58431),
(0.74118, 0.66275, 0.61569),
(0.77255, 0.70196, 0.65098),
(0.80392, 0.74118, 0.69020),
(0.83529, 0.78039, 0.73333),
(0.87059, 0.82353, 0.78431),
)
cmap_smooth = (
(0.00000, 0.00000, 1.00000), # noqa
(0.01569, 0.00000, 0.98431),
(0.03529, 0.00000, 0.96471),
(0.05098, 0.00000, 0.94902),
(0.06667, 0.00000, 0.93333),
(0.08627, 0.00000, 0.91373),
(0.10196, 0.00000, 0.89804),
(0.11765, 0.00000, 0.88235),
(0.13725, 0.00000, 0.86275),
(0.15294, 0.00000, 0.84706),
(0.16863, 0.00000, 0.83137),
(0.18824, 0.00000, 0.81176),
(0.20392, 0.00000, 0.79608),
(0.21961, 0.00000, 0.78039),
(0.23922, 0.00000, 0.76078),
(0.25490, 0.00000, 0.74510),
(0.27059, 0.00000, 0.72941),
(0.28627, 0.00000, 0.71373),
(0.30588, 0.00000, 0.69412),
(0.32157, 0.00000, 0.67843),
(0.33725, 0.00000, 0.66275),
(0.35686, 0.00000, 0.64314),
(0.37255, 0.00000, 0.62745),
(0.38824, 0.00000, 0.61176),
(0.40784, 0.00000, 0.59216),
(0.42353, 0.00000, 0.57647),
(0.43922, 0.00000, 0.56078),
(0.45882, 0.00000, 0.54118),
(0.47451, 0.00000, 0.52549),
(0.49020, 0.00000, 0.50980),
(0.50980, 0.00000, 0.49020),
(0.52549, 0.00000, 0.47451),
(0.54118, 0.00000, 0.45882),
(0.56078, 0.00000, 0.43922),
(0.57647, 0.00000, 0.42353),
(0.59216, 0.00000, 0.40784),
(0.61176, 0.00000, 0.38824),
(0.62745, 0.00000, 0.37255),
(0.64314, 0.00000, 0.35686),
(0.66275, 0.00000, 0.33725),
(0.67843, 0.00000, 0.32157),
(0.69412, 0.00000, 0.30588),
(0.71373, 0.00000, 0.28627),
(0.72941, 0.00000, 0.27059),
(0.74510, 0.00000, 0.25490),
(0.76078, 0.00000, 0.23922),
(0.78039, 0.00000, 0.21961),
(0.79608, 0.00000, 0.20392),
(0.81176, 0.00000, 0.18824),
(0.83137, 0.00000, 0.16863),
(0.84706, 0.00000, 0.15294),
(0.86275, 0.00000, 0.13725),
(0.88235, 0.00000, 0.11765),
(0.89804, 0.00000, 0.10196),
(0.91373, 0.00000, 0.08627),
(0.93333, 0.00000, 0.06667),
(0.94902, 0.00000, 0.05098),
(0.96471, 0.00000, 0.03529),
(0.98431, 0.00000, 0.01569),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.01176, 0.00000),
(1.00000, 0.01961, 0.00000),
(1.00000, 0.03137, 0.00000),
(1.00000, 0.03922, 0.00000),
(1.00000, 0.05098, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.07059, 0.00000),
(1.00000, 0.08235, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.10196, 0.00000),
(1.00000, 0.10980, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.12941, 0.00000),
(1.00000, 0.14118, 0.00000),
(0.99608, 0.15294, 0.00000),
(0.99608, 0.16078, 0.00000),
(0.99608, 0.17255, 0.00000),
(0.99608, 0.18039, 0.00000),
(0.99608, 0.19216, 0.00000),
(0.99608, 0.20392, 0.00000),
(0.99608, 0.21176, 0.00000),
(0.99608, 0.22353, 0.00000),
(0.99608, 0.23137, 0.00000),
(0.99608, 0.24314, 0.00000),
(0.99608, 0.25098, 0.00000),
(0.99608, 0.26275, 0.00000),
(0.99608, 0.27451, 0.00000),
(0.99608, 0.28235, 0.00000),
(0.99608, 0.29412, 0.00000),
(0.99608, 0.30196, 0.00000),
(0.99608, 0.31373, 0.00000),
(0.99608, 0.32157, 0.00000),
(0.99608, 0.33333, 0.00000),
(0.99608, 0.34510, 0.00000),
(0.99608, 0.35294, 0.00000),
(0.99608, 0.36471, 0.00000),
(0.99608, 0.37255, 0.00000),
(0.99608, 0.38431, 0.00000),
(0.99608, 0.39216, 0.00000),
(0.99608, 0.40392, 0.00000),
(0.99608, 0.41569, 0.00000),
(0.99608, 0.42353, 0.00000),
(0.99608, 0.43529, 0.00000),
(0.99608, 0.44314, 0.00000),
(0.99216, 0.45490, 0.00000),
(0.99216, 0.46667, 0.00000),
(0.99216, 0.47451, 0.00000),
(0.99216, 0.48627, 0.00000),
(0.99216, 0.49412, 0.00000),
(0.99216, 0.50588, 0.00000),
(0.99216, 0.51373, 0.00000),
(0.99216, 0.52549, 0.00000),
(0.99216, 0.53725, 0.00000),
(0.99216, 0.54510, 0.00000),
(0.99216, 0.55686, 0.00000),
(0.99216, 0.56471, 0.00000),
(0.99216, 0.57647, 0.00000),
(0.99216, 0.58431, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.60000, 0.00000),
(0.99216, 0.60784, 0.00000),
(0.99216, 0.61176, 0.00000),
(0.99216, 0.61569, 0.00000),
(0.99216, 0.61961, 0.00000),
(0.99216, 0.62745, 0.00000),
(0.99216, 0.63137, 0.00000),
(0.99216, 0.63529, 0.00000),
(0.99216, 0.64314, 0.00000),
(0.98824, 0.64706, 0.00000),
(0.98824, 0.65098, 0.00000),
(0.98824, 0.65882, 0.00000),
(0.98824, 0.66275, 0.00000),
(0.98824, 0.66667, 0.00000),
(0.98824, 0.67451, 0.00000),
(0.98824, 0.67843, 0.00000),
(0.98824, 0.68235, 0.00000),
(0.98824, 0.68627, 0.00000),
(0.98824, 0.69412, 0.00000),
(0.98824, 0.69804, 0.00000),
(0.98824, 0.70196, 0.00000),
(0.98824, 0.70980, 0.00000),
(0.98824, 0.71373, 0.00000),
(0.98824, 0.71765, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72941, 0.00000),
(0.98824, 0.73333, 0.00000),
(0.98824, 0.73725, 0.00000),
(0.98824, 0.74510, 0.00000),
(0.98824, 0.74902, 0.00000),
(0.98431, 0.75294, 0.00000),
(0.98431, 0.76078, 0.00000),
(0.98431, 0.76471, 0.00000),
(0.98431, 0.76863, 0.00000),
(0.98431, 0.77255, 0.00000),
(0.98431, 0.78039, 0.00000),
(0.98431, 0.78431, 0.00000),
(0.98431, 0.78824, 0.00000),
(0.98431, 0.79608, 0.00000),
(0.98431, 0.80000, 0.00000),
(0.98431, 0.80392, 0.00000),
(0.98431, 0.81176, 0.00000),
(0.98431, 0.81569, 0.00000),
(0.98431, 0.81961, 0.00000),
(0.98431, 0.82745, 0.00000),
(0.98431, 0.83137, 0.00000),
(0.98431, 0.83529, 0.00000),
(0.98431, 0.83922, 0.00000),
(0.98431, 0.84706, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98039, 0.85490, 0.00000),
(0.98039, 0.86275, 0.00000),
(0.98039, 0.86667, 0.00000),
(0.98039, 0.87059, 0.00000),
(0.98039, 0.87843, 0.00000),
(0.98039, 0.88235, 0.00000),
(0.98039, 0.88627, 0.00000),
(0.98039, 0.89020, 0.00000),
(0.98039, 0.89804, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.96471, 0.88627, 0.00000),
(0.94902, 0.87059, 0.00000),
(0.92941, 0.85490, 0.00000),
(0.91373, 0.83922, 0.00000),
(0.89804, 0.82745, 0.00000),
(0.88235, 0.81176, 0.00000),
(0.86275, 0.79608, 0.00000),
(0.84706, 0.78039, 0.00000),
(0.83137, 0.76471, 0.00000),
(0.81569, 0.74902, 0.00000),
(0.79608, 0.73333, 0.00000),
(0.78039, 0.71765, 0.00000),
(0.76471, 0.70196, 0.00000),
(0.74902, 0.68627, 0.00000),
(0.72941, 0.67451, 0.00000),
(0.71373, 0.65882, 0.00000),
(0.69804, 0.64314, 0.00000),
(0.68235, 0.62745, 0.00000),
(0.66275, 0.61176, 0.00000),
(0.64706, 0.59608, 0.00000),
(0.63137, 0.58039, 0.00000),
(0.61569, 0.56471, 0.00000),
(0.60000, 0.54902, 0.00000),
(0.58039, 0.53333, 0.00000),
(0.56471, 0.52157, 0.00000),
(0.54902, 0.50588, 0.00000),
(0.53333, 0.49020, 0.00000),
(0.51373, 0.47451, 0.00000),
(0.49804, 0.45882, 0.00000),
(0.48235, 0.44314, 0.00000),
(0.46667, 0.42745, 0.00000),
(0.44706, 0.41176, 0.00000),
(0.43137, 0.39608, 0.00000),
(0.41569, 0.38039, 0.00000),
(0.40000, 0.36863, 0.00000),
(0.38039, 0.35294, 0.00000),
(0.36471, 0.33725, 0.00000),
(0.34902, 0.32157, 0.00000),
(0.33333, 0.30588, 0.00000),
(0.31765, 0.29020, 0.00000),
(0.29804, 0.27451, 0.00000),
(0.28235, 0.25882, 0.00000),
(0.26667, 0.24314, 0.00000),
(0.25098, 0.22745, 0.00000),
(0.23137, 0.21569, 0.00000),
(0.21569, 0.20000, 0.00000),
(0.20000, 0.18431, 0.00000),
(0.18431, 0.16863, 0.00000),
(0.16471, 0.15294, 0.00000),
(0.14902, 0.13725, 0.00000),
(0.13333, 0.12157, 0.00000),
(0.11765, 0.10588, 0.00000),
(0.09804, 0.09020, 0.00000),
(0.08235, 0.07451, 0.00000),
(0.06667, 0.06275, 0.00000),
(0.05098, 0.04706, 0.00000),
(0.03137, 0.03137, 0.00000),
(0.01569, 0.01569, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
)
cmap_isophot = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.11765),
(0.00000, 0.00000, 0.15686),
(0.00000, 0.00000, 0.19608),
(0.00000, 0.00000, 0.23529),
(0.00000, 0.00000, 0.27843),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.35686),
(0.00000, 0.00000, 0.39608),
(0.00000, 0.00000, 0.43529),
(0.00000, 0.00000, 0.47451),
(0.00000, 0.00000, 0.51765),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.59608),
(0.00000, 0.00000, 0.63529),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.71765),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 0.00000, 0.87843),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.95686),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.28627, 1.00000),
(0.00000, 0.31765, 1.00000),
(0.00000, 0.34902, 1.00000),
(0.00000, 0.38039, 1.00000),
(0.00000, 0.41176, 1.00000),
(0.00000, 0.44314, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.49804, 1.00000),
(0.00000, 0.51765, 1.00000),
(0.00000, 0.53725, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.63922, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.67843, 1.00000),
(0.00000, 0.70196, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.74118, 1.00000),
(0.00000, 0.76078, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.79608, 1.00000),
(0.00000, 0.81176, 1.00000),
(0.00000, 0.82353, 1.00000),
(0.00000, 0.83922, 1.00000),
(0.00000, 0.85490, 1.00000),
(0.00000, 0.86667, 1.00000),
(0.00000, 0.88235, 1.00000),
(0.00000, 0.89412, 1.00000),
(0.00000, 0.90980, 1.00000),
(0.00000, 0.92549, 1.00000),
(0.00000, 0.93725, 1.00000),
(0.00000, 0.95294, 1.00000),
(0.00000, 0.96863, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.96078),
(0.00000, 1.00000, 0.94118),
(0.00000, 1.00000, 0.92157),
(0.00000, 1.00000, 0.90196),
(0.00000, 1.00000, 0.88235),
(0.00000, 1.00000, 0.86275),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.82353),
(0.00000, 1.00000, 0.80392),
(0.00000, 1.00000, 0.78431),
(0.00000, 1.00000, 0.76471),
(0.00000, 1.00000, 0.74510),
(0.00000, 1.00000, 0.72549),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.60784),
(0.00000, 1.00000, 0.56078),
(0.00000, 1.00000, 0.51373),
(0.00000, 1.00000, 0.46667),
(0.00000, 1.00000, 0.41961),
(0.00000, 1.00000, 0.37255),
(0.00000, 1.00000, 0.32549),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.13725),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.04314),
(0.00000, 1.00000, 0.00000),
(0.04706, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.14118, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.23529, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.32941, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.42353, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.51765, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.61176, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.72549, 1.00000, 0.00000),
(0.74510, 1.00000, 0.00000),
(0.76471, 1.00000, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.84314, 1.00000, 0.00000),
(0.86275, 1.00000, 0.00000),
(0.88235, 1.00000, 0.00000),
(0.90196, 1.00000, 0.00000),
(0.92157, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.96078, 1.00000, 0.00000),
(0.98039, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.98039, 0.00000),
(0.99608, 0.96078, 0.00000),
(0.99608, 0.94118, 0.00000),
(0.99608, 0.92549, 0.00000),
(0.99216, 0.90588, 0.00000),
(0.99216, 0.88627, 0.00000),
(0.99216, 0.87059, 0.00000),
(0.99216, 0.85098, 0.00000),
(0.98824, 0.83137, 0.00000),
(0.98824, 0.81569, 0.00000),
(0.98824, 0.79608, 0.00000),
(0.98824, 0.77647, 0.00000),
(0.98824, 0.76078, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.98824, 0.69020, 0.00000),
(0.98824, 0.67059, 0.00000),
(0.98824, 0.65490, 0.00000),
(0.98824, 0.63922, 0.00000),
(0.98824, 0.61961, 0.00000),
(0.99216, 0.60392, 0.00000),
(0.99216, 0.58824, 0.00000),
(0.99216, 0.56863, 0.00000),
(0.99216, 0.55294, 0.00000),
(0.99608, 0.53725, 0.00000),
(0.99608, 0.51765, 0.00000),
(0.99608, 0.50196, 0.00000),
(0.99608, 0.48627, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.30980, 0.00000),
(1.00000, 0.27843, 0.00000),
(1.00000, 0.24706, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.04706),
(1.00000, 0.00000, 0.09412),
(1.00000, 0.00000, 0.14118),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.00000, 0.32941),
(1.00000, 0.00000, 0.37647),
(1.00000, 0.00000, 0.42353),
(1.00000, 0.00000, 0.47059),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.56471),
(1.00000, 0.00000, 0.61176),
(1.00000, 0.00000, 0.65882),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.72549),
(1.00000, 0.00000, 0.74902),
(1.00000, 0.00000, 0.77255),
(1.00000, 0.00000, 0.79608),
(1.00000, 0.00000, 0.81569),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.88627),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.92941),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.97647),
(1.00000, 0.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.14118, 1.00000),
(1.00000, 0.17647, 1.00000),
(1.00000, 0.21176, 1.00000),
(1.00000, 0.25098, 1.00000),
(1.00000, 0.28627, 1.00000),
(1.00000, 0.32157, 1.00000),
(1.00000, 0.36078, 1.00000),
(1.00000, 0.39608, 1.00000),
(1.00000, 0.43137, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.48627, 1.00000),
(1.00000, 0.50588, 1.00000),
(1.00000, 0.52157, 1.00000),
(1.00000, 0.54118, 1.00000),
(1.00000, 0.56078, 1.00000),
(1.00000, 0.57647, 1.00000),
(1.00000, 0.59608, 1.00000),
(1.00000, 0.61176, 1.00000),
(1.00000, 0.63137, 1.00000),
(1.00000, 0.65098, 1.00000),
(1.00000, 0.66667, 1.00000),
(1.00000, 0.68627, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.74510, 1.00000),
(1.00000, 0.78824, 1.00000),
(1.00000, 0.83137, 1.00000),
(1.00000, 0.87059, 1.00000),
(1.00000, 0.91373, 1.00000),
(1.00000, 0.95686, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_smooth2 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.46667),
(0.53333, 0.00000, 0.46667),
(0.53333, 0.00000, 0.46667),
(0.53333, 0.00000, 0.46667),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.33333),
(0.60000, 0.00000, 0.33333),
(0.60000, 0.00000, 0.33333),
(0.60000, 0.00000, 0.33333),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.20000),
(0.66667, 0.00000, 0.20000),
(0.66667, 0.00000, 0.20000),
(0.66667, 0.00000, 0.20000),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.06667),
(0.73333, 0.00000, 0.06667),
(0.73333, 0.00000, 0.06667),
(0.73333, 0.00000, 0.06667),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.06667, 0.00000),
(1.00000, 0.06667, 0.00000),
(1.00000, 0.13333, 0.00000),
(1.00000, 0.13333, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.26667, 0.00000),
(1.00000, 0.26667, 0.00000),
(1.00000, 0.33333, 0.00000),
(1.00000, 0.33333, 0.00000),
(1.00000, 0.40000, 0.00000),
(1.00000, 0.40000, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.53333, 0.00000),
(1.00000, 0.53333, 0.00000),
(1.00000, 0.60000, 0.00000),
(1.00000, 0.60000, 0.00000),
(1.00000, 0.66667, 0.00000),
(1.00000, 0.66667, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.80000, 0.00000),
(1.00000, 0.80000, 0.00000),
(1.00000, 0.86667, 0.00000),
(1.00000, 0.86667, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.06667),
(1.00000, 1.00000, 0.06667),
(1.00000, 1.00000, 0.13333),
(1.00000, 1.00000, 0.13333),
(1.00000, 1.00000, 0.20000),
(1.00000, 1.00000, 0.20000),
(1.00000, 1.00000, 0.26667),
(1.00000, 1.00000, 0.26667),
(1.00000, 1.00000, 0.33333),
(1.00000, 1.00000, 0.33333),
(1.00000, 1.00000, 0.40000),
(1.00000, 1.00000, 0.40000),
(1.00000, 1.00000, 0.46667),
(1.00000, 1.00000, 0.46667),
(1.00000, 1.00000, 0.53333),
(1.00000, 1.00000, 0.53333),
(1.00000, 1.00000, 0.60000),
(1.00000, 1.00000, 0.60000),
(1.00000, 1.00000, 0.66667),
(1.00000, 1.00000, 0.66667),
(1.00000, 1.00000, 0.73333),
(1.00000, 1.00000, 0.73333),
(1.00000, 1.00000, 0.80000),
(1.00000, 1.00000, 0.80000),
(1.00000, 1.00000, 0.86667),
(1.00000, 1.00000, 1.00000),
)
cmap_heat = (
(0.00000, 0.00000, 0.00000), # noqa
(0.01176, 0.00392, 0.00000),
(0.02353, 0.00784, 0.00000),
(0.03529, 0.01176, 0.00000),
(0.04706, 0.01569, 0.00000),
(0.05882, 0.01961, 0.00000),
(0.07059, 0.02353, 0.00000),
(0.08235, 0.02745, 0.00000),
(0.09412, 0.03137, 0.00000),
(0.10588, 0.03529, 0.00000),
(0.11765, 0.03922, 0.00000),
(0.12941, 0.04314, 0.00000),
(0.14118, 0.04706, 0.00000),
(0.15294, 0.05098, 0.00000),
(0.16471, 0.05490, 0.00000),
(0.17647, 0.05882, 0.00000),
(0.18824, 0.06275, 0.00000),
(0.20000, 0.06667, 0.00000),
(0.21176, 0.07059, 0.00000),
(0.22353, 0.07451, 0.00000),
(0.23529, 0.07843, 0.00000),
(0.24706, 0.08235, 0.00000),
(0.25882, 0.08627, 0.00000),
(0.27059, 0.09020, 0.00000),
(0.28235, 0.09412, 0.00000),
(0.29412, 0.09804, 0.00000),
(0.30588, 0.10196, 0.00000),
(0.31765, 0.10588, 0.00000),
(0.32941, 0.10980, 0.00000),
(0.34118, 0.11373, 0.00000),
(0.35294, 0.11765, 0.00000),
(0.36471, 0.12157, 0.00000),
(0.37647, 0.12549, 0.00000),
(0.38824, 0.12941, 0.00000),
(0.40000, 0.13333, 0.00000),
(0.41176, 0.13725, 0.00000),
(0.42353, 0.14118, 0.00000),
(0.43529, 0.14510, 0.00000),
(0.44706, 0.14902, 0.00000),
(0.45882, 0.15294, 0.00000),
(0.47059, 0.15686, 0.00000),
(0.48235, 0.16078, 0.00000),
(0.49412, 0.16471, 0.00000),
(0.50588, 0.16863, 0.00000),
(0.51765, 0.17255, 0.00000),
(0.52941, 0.17647, 0.00000),
(0.54118, 0.18039, 0.00000),
(0.55294, 0.18431, 0.00000),
(0.56471, 0.18824, 0.00000),
(0.57647, 0.19216, 0.00000),
(0.58824, 0.19608, 0.00000),
(0.60000, 0.20000, 0.00000),
(0.61176, 0.20392, 0.00000),
(0.62353, 0.20784, 0.00000),
(0.63529, 0.21176, 0.00000),
(0.64706, 0.21569, 0.00000),
(0.65882, 0.21961, 0.00000),
(0.67059, 0.22353, 0.00000),
(0.68235, 0.22745, 0.00000),
(0.69412, 0.23137, 0.00000),
(0.70588, 0.23529, 0.00000),
(0.71765, 0.23922, 0.00000),
(0.72941, 0.24314, 0.00000),
(0.74118, 0.24706, 0.00000),
(0.75294, 0.25098, 0.00000),
(0.76471, 0.25490, 0.00000),
(0.77647, 0.25882, 0.00000),
(0.78824, 0.26275, 0.00000),
(0.80000, 0.26667, 0.00000),
(0.81176, 0.27059, 0.00000),
(0.82353, 0.27451, 0.00000),
(0.83529, 0.27843, 0.00000),
(0.84706, 0.28235, 0.00000),
(0.85882, 0.28627, 0.00000),
(0.87059, 0.29020, 0.00000),
(0.88235, 0.29412, 0.00000),
(0.89412, 0.29804, 0.00000),
(0.90588, 0.30196, 0.00000),
(0.91765, 0.30588, 0.00000),
(0.92941, 0.30980, 0.00000),
(0.94118, 0.31373, 0.00000),
(0.95294, 0.31765, 0.00000),
(0.96471, 0.32157, 0.00000),
(0.97647, 0.32549, 0.00000),
(0.98824, 0.32941, 0.00000),
(1.00000, 0.33333, 0.00000),
(1.00000, 0.33725, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.34510, 0.00000),
(1.00000, 0.34902, 0.00000),
(1.00000, 0.35294, 0.00000),
(1.00000, 0.35686, 0.00000),
(1.00000, 0.36078, 0.00000),
(1.00000, 0.36471, 0.00000),
(1.00000, 0.36863, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.37647, 0.00000),
(1.00000, 0.38039, 0.00000),
(1.00000, 0.38431, 0.00000),
(1.00000, 0.38824, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39608, 0.00000),
(1.00000, 0.40000, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.40784, 0.00000),
(1.00000, 0.41176, 0.00000),
(1.00000, 0.41569, 0.00000),
(1.00000, 0.41961, 0.00000),
(1.00000, 0.42353, 0.00000),
(1.00000, 0.42745, 0.00000),
(1.00000, 0.43137, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.43922, 0.00000),
(1.00000, 0.44314, 0.00000),
(1.00000, 0.44706, 0.00000),
(1.00000, 0.45098, 0.00000),
(1.00000, 0.45490, 0.00000),
(1.00000, 0.45882, 0.00000),
(1.00000, 0.46275, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47451, 0.00000),
(1.00000, 0.47843, 0.00000),
(1.00000, 0.48235, 0.00000),
(1.00000, 0.48627, 0.00000),
(1.00000, 0.49020, 0.00000),
(1.00000, 0.49412, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.50196, 0.00000),
(1.00000, 0.50588, 0.00000),
(1.00000, 0.50980, 0.00000),
(1.00000, 0.51373, 0.00000),
(1.00000, 0.51765, 0.00000),
(1.00000, 0.52157, 0.00000),
(1.00000, 0.52549, 0.00000),
(1.00000, 0.52941, 0.00000),
(1.00000, 0.53333, 0.00000),
(1.00000, 0.53725, 0.00000),
(1.00000, 0.54118, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.54902, 0.00000),
(1.00000, 0.55294, 0.00000),
(1.00000, 0.55686, 0.00000),
(1.00000, 0.56078, 0.00000),
(1.00000, 0.56471, 0.00000),
(1.00000, 0.56863, 0.00000),
(1.00000, 0.57255, 0.00000),
(1.00000, 0.57647, 0.00000),
(1.00000, 0.58039, 0.00000),
(1.00000, 0.58431, 0.00000),
(1.00000, 0.58824, 0.00000),
(1.00000, 0.59216, 0.00000),
(1.00000, 0.59608, 0.00000),
(1.00000, 0.60000, 0.00000),
(1.00000, 0.60392, 0.00000),
(1.00000, 0.60784, 0.00000),
(1.00000, 0.61176, 0.00000),
(1.00000, 0.61569, 0.00000),
(1.00000, 0.61961, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.62745, 0.00000),
(1.00000, 0.63137, 0.00000),
(1.00000, 0.63529, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.64314, 0.00000),
(1.00000, 0.64706, 0.00000),
(1.00000, 0.65098, 0.01176),
(1.00000, 0.65490, 0.02353),
(1.00000, 0.65882, 0.03529),
(1.00000, 0.66275, 0.04706),
(1.00000, 0.66667, 0.05882),
(1.00000, 0.67059, 0.07059),
(1.00000, 0.67451, 0.08235),
(1.00000, 0.67843, 0.09412),
(1.00000, 0.68235, 0.10588),
(1.00000, 0.68627, 0.11765),
(1.00000, 0.69020, 0.12941),
(1.00000, 0.69412, 0.14118),
(1.00000, 0.69804, 0.15294),
(1.00000, 0.70196, 0.16471),
(1.00000, 0.70588, 0.17647),
(1.00000, 0.70980, 0.18824),
(1.00000, 0.71373, 0.20000),
(1.00000, 0.71765, 0.21176),
(1.00000, 0.72157, 0.22353),
(1.00000, 0.72549, 0.23529),
(1.00000, 0.72941, 0.24706),
(1.00000, 0.73333, 0.25882),
(1.00000, 0.73725, 0.27059),
(1.00000, 0.74118, 0.28235),
(1.00000, 0.74510, 0.29412),
(1.00000, 0.74902, 0.30588),
(1.00000, 0.75294, 0.31765),
(1.00000, 0.75686, 0.32941),
(1.00000, 0.76078, 0.34118),
(1.00000, 0.76471, 0.35294),
(1.00000, 0.76863, 0.36471),
(1.00000, 0.77255, 0.37647),
(1.00000, 0.77647, 0.38824),
(1.00000, 0.78039, 0.40000),
(1.00000, 0.78431, 0.41176),
(1.00000, 0.78824, 0.42353),
(1.00000, 0.79216, 0.43529),
(1.00000, 0.79608, 0.44706),
(1.00000, 0.80000, 0.45882),
(1.00000, 0.80392, 0.47059),
(1.00000, 0.80784, 0.48235),
(1.00000, 0.81176, 0.49412),
(1.00000, 0.81569, 0.50588),
(1.00000, 0.81961, 0.51765),
(1.00000, 0.82353, 0.52941),
(1.00000, 0.82745, 0.54118),
(1.00000, 0.83137, 0.55294),
(1.00000, 0.83529, 0.56471),
(1.00000, 0.83922, 0.57647),
(1.00000, 0.84314, 0.58824),
(1.00000, 0.84706, 0.60000),
(1.00000, 0.85098, 0.61176),
(1.00000, 0.85490, 0.62353),
(1.00000, 0.85882, 0.63529),
(1.00000, 0.86275, 0.64706),
(1.00000, 0.86667, 0.65882),
(1.00000, 0.87059, 0.67059),
(1.00000, 0.87451, 0.68235),
(1.00000, 0.87843, 0.69412),
(1.00000, 0.88235, 0.70588),
(1.00000, 0.88627, 0.71765),
(1.00000, 0.89020, 0.72941),
(1.00000, 0.89412, 0.74118),
(1.00000, 0.89804, 0.75294),
(1.00000, 0.90196, 0.76471),
(1.00000, 0.90588, 0.77647),
(1.00000, 0.90980, 0.78824),
(1.00000, 0.91373, 0.80000),
(1.00000, 0.91765, 0.81176),
(1.00000, 0.92157, 0.82353),
(1.00000, 0.92549, 0.83529),
(1.00000, 0.92941, 0.84706),
(1.00000, 0.93333, 0.85882),
(1.00000, 0.93725, 0.87059),
(1.00000, 0.94118, 0.88235),
(1.00000, 0.94510, 0.89412),
(1.00000, 0.94902, 0.90588),
(1.00000, 0.95294, 0.91765),
(1.00000, 0.95686, 0.92941),
(1.00000, 0.96078, 0.94118),
(1.00000, 0.96471, 0.95294),
(1.00000, 0.96863, 0.96471),
(1.00000, 0.97255, 0.97647),
(1.00000, 0.97647, 0.98824),
(1.00000, 0.98039, 1.00000),
(1.00000, 0.98431, 1.00000),
(1.00000, 0.98824, 1.00000),
(1.00000, 0.99216, 1.00000),
(1.00000, 0.99608, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_smooth3 = (
(0.00000, 0.00000, 0.00784), # noqa
(0.00000, 0.00000, 0.01795),
(0.00000, 0.00000, 0.03087),
(0.00000, 0.00000, 0.04434),
(0.00000, 0.00000, 0.05781),
(0.00000, 0.00000, 0.07128),
(0.00000, 0.00000, 0.08475),
(0.00000, 0.00000, 0.09822),
(0.00000, 0.00000, 0.11170),
(0.00000, 0.00000, 0.12231),
(0.00000, 0.00000, 0.13472),
(0.00000, 0.00000, 0.14819),
(0.00000, 0.00000, 0.16166),
(0.00000, 0.00000, 0.17513),
(0.00000, 0.00000, 0.18851),
(0.00000, 0.00000, 0.19862),
(0.00000, 0.00000, 0.21163),
(0.00000, 0.00000, 0.22510),
(0.00000, 0.00000, 0.23857),
(0.00000, 0.00000, 0.25080),
(0.00000, 0.00000, 0.26228),
(0.00000, 0.00000, 0.27885),
(0.00000, 0.00000, 0.28895),
(0.00000, 0.00000, 0.30201),
(0.00000, 0.00000, 0.31308),
(0.00000, 0.00000, 0.32503),
(0.00000, 0.00000, 0.33850),
(0.00000, 0.00000, 0.35197),
(0.00000, 0.00000, 0.36526),
(0.00000, 0.00000, 0.37536),
(0.00000, 0.00000, 0.39146),
(0.00000, 0.00000, 0.40341),
(0.00000, 0.00000, 0.41541),
(0.00000, 0.00000, 0.42754),
(0.00000, 0.00000, 0.43922),
(0.00000, 0.00000, 0.45559),
(0.00000, 0.00000, 0.46570),
(0.00000, 0.00000, 0.47885),
(0.00000, 0.00000, 0.49232),
(0.00000, 0.00000, 0.50579),
(0.00000, 0.00000, 0.51788),
(0.00000, 0.00000, 0.52881),
(0.00000, 0.00000, 0.54228),
(0.00000, 0.00000, 0.55576),
(0.00000, 0.00000, 0.56923),
(0.00000, 0.00000, 0.58016),
(0.00397, 0.00000, 0.59225),
(0.01356, 0.00000, 0.60572),
(0.02791, 0.00000, 0.61919),
(0.04507, 0.00000, 0.63234),
(0.06528, 0.00000, 0.64245),
(0.08235, 0.00000, 0.65569),
(0.10178, 0.00000, 0.66916),
(0.12198, 0.00000, 0.68263),
(0.14219, 0.00000, 0.69462),
(0.16240, 0.00000, 0.70565),
(0.18261, 0.00000, 0.71912),
(0.20281, 0.00000, 0.73259),
(0.21984, 0.00000, 0.74607),
(0.23668, 0.00000, 0.75954),
(0.25559, 0.00000, 0.77093),
(0.27580, 0.00000, 0.78256),
(0.29504, 0.00000, 0.79603),
(0.31063, 0.00000, 0.80909),
(0.31737, 0.00000, 0.81919),
(0.31765, 0.00000, 0.83575),
(0.31765, 0.00000, 0.84992),
(0.31765, 0.00000, 0.86339),
(0.31765, 0.00000, 0.87686),
(0.31765, 0.00000, 0.88932),
(0.31719, 0.00000, 0.89942),
(0.31382, 0.00000, 0.90953),
(0.31373, 0.00000, 0.92291),
(0.31373, 0.00000, 0.93638),
(0.31373, 0.00000, 0.94985),
(0.31373, 0.00000, 0.96332),
(0.31373, 0.00000, 0.97573),
(0.40250, 0.05175, 0.86307),
(0.99189, 0.39528, 0.05813),
(1.00000, 0.40000, 0.04706),
(0.84776, 0.30312, 0.04983),
(0.55402, 0.11438, 0.05319),
(0.38644, 0.00000, 0.05988),
(0.40664, 0.00000, 0.07442),
(0.42685, 0.00000, 0.09799),
(0.44706, 0.00000, 0.12157),
(0.46727, 0.00000, 0.13841),
(0.48466, 0.00000, 0.15806),
(0.50376, 0.00000, 0.17827),
(0.52397, 0.00000, 0.20018),
(0.54417, 0.00000, 0.22261),
(0.56438, 0.00000, 0.24281),
(0.58459, 0.00000, 0.26302),
(0.60480, 0.00000, 0.28323),
(0.62501, 0.00000, 0.30344),
(0.64521, 0.00000, 0.32364),
(0.66542, 0.00000, 0.34385),
(0.68563, 0.00000, 0.36406),
(0.70584, 0.00000, 0.38491),
(0.72604, 0.00000, 0.40840),
(0.74625, 0.00000, 0.42860),
(0.76937, 0.00000, 0.44881),
(0.79059, 0.00000, 0.46667),
(0.81260, 0.00000, 0.48711),
(0.83493, 0.00000, 0.50944),
(0.85513, 0.00000, 0.52964),
(0.87576, 0.00000, 0.54985),
(0.90607, 0.00000, 0.57006),
(0.93933, 0.00000, 0.59027),
(0.96821, 0.00000, 0.61047),
(0.98777, 0.00000, 0.63068),
(0.99737, 0.00000, 0.65089),
(1.00000, 0.00000, 0.67110),
(1.00000, 0.00092, 0.69149),
(1.00000, 0.01776, 0.71506),
(1.00000, 0.03760, 0.73564),
(1.00000, 0.05781, 0.75585),
(1.00000, 0.07802, 0.77606),
(1.00000, 0.09822, 0.79626),
(1.00000, 0.11922, 0.81647),
(1.00000, 0.14279, 0.83668),
(1.00000, 0.16637, 0.85689),
(1.00000, 0.18690, 0.88014),
(1.00000, 0.20960, 0.90122),
(1.00000, 0.23124, 0.92143),
(1.00000, 0.25144, 0.94164),
(1.00000, 0.27165, 0.96185),
(1.00000, 0.29214, 0.98178),
(1.00000, 0.31571, 0.99862),
(1.00000, 0.33310, 0.98454),
(1.00000, 0.35248, 0.96517),
(1.00000, 0.37269, 0.94496),
(1.00000, 0.39290, 0.92332),
(1.00000, 0.41223, 0.89974),
(1.00000, 0.42907, 0.87649),
(1.00000, 0.44591, 0.85628),
(1.00000, 0.46588, 0.83294),
(1.00000, 0.48609, 0.81195),
(1.00000, 0.50630, 0.79174),
(1.00000, 0.52503, 0.77006),
(1.00000, 0.54279, 0.74740),
(1.00000, 0.56263, 0.72720),
(1.00000, 0.57947, 0.70699),
(1.00000, 0.59949, 0.68360),
(1.00000, 0.61707, 0.66265),
(1.00000, 0.62976, 0.64037),
(1.00000, 0.63682, 0.61831),
(1.00000, 0.63922, 0.59811),
(1.00000, 0.63922, 0.57790),
(1.00000, 0.63922, 0.55769),
(1.00000, 0.63922, 0.53425),
(1.00000, 0.63922, 0.51335),
(1.00000, 0.63922, 0.49102),
(1.00000, 0.63922, 0.46902),
(1.00000, 0.63922, 0.44881),
(1.00000, 0.63922, 0.42860),
(1.00000, 0.63922, 0.40839),
(1.00000, 0.63922, 0.38491),
(1.00000, 0.63922, 0.36406),
(1.00000, 0.63922, 0.34168),
(0.99838, 0.63922, 0.31972),
(0.99077, 0.63922, 0.29845),
(0.97241, 0.63922, 0.27589),
(0.94546, 0.63922, 0.25905),
(0.91520, 0.63922, 0.23557),
(0.88489, 0.63922, 0.21476),
(0.85457, 0.63922, 0.19234),
(0.82426, 0.63922, 0.16876),
(0.79395, 0.63922, 0.14629),
(0.76309, 0.63922, 0.12664),
(0.72941, 0.63922, 0.10980),
(0.70583, 0.63922, 0.08622),
(0.69070, 0.63922, 0.06265),
(0.67949, 0.63922, 0.04133),
(0.67793, 0.64092, 0.02113),
(0.68582, 0.64545, 0.00554),
(0.69772, 0.65278, 0.00000),
(0.71793, 0.66307, 0.00000),
(0.73814, 0.68665, 0.00000),
(0.75834, 0.71022, 0.00000),
(0.77855, 0.73380, 0.00000),
(0.79876, 0.75738, 0.00000),
(0.81897, 0.78095, 0.00000),
(0.83982, 0.80517, 0.00000),
(0.86330, 0.83202, 0.00000),
(0.88351, 0.85560, 0.00000),
(0.90372, 0.88208, 0.00000),
(0.92393, 0.90667, 0.00000),
(0.94413, 0.93025, 0.00000),
(0.96434, 0.95382, 0.00000),
(0.98386, 0.97740, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00305),
(1.00000, 1.00000, 0.01638),
(1.00000, 1.00000, 0.03322),
(1.00000, 1.00000, 0.05006),
(1.00000, 1.00000, 0.06773),
(1.00000, 1.00000, 0.08794),
(1.00000, 1.00000, 0.10815),
(1.00000, 1.00000, 0.12526),
(1.00000, 1.00000, 0.14464),
(1.00000, 1.00000, 0.16485),
(1.00000, 1.00000, 0.18506),
(1.00000, 1.00000, 0.20439),
(1.00000, 1.00000, 0.22155),
(1.00000, 1.00000, 0.24176),
(1.00000, 1.00000, 0.25883),
(1.00000, 1.00000, 0.27567),
(1.00000, 1.00000, 0.28642),
(1.00000, 1.00000, 0.28872),
(1.00000, 1.00000, 0.28350),
(1.00000, 1.00000, 0.27229),
(1.00000, 1.00000, 0.25208),
(1.00000, 1.00000, 0.22869),
(1.00000, 1.00000, 0.20511),
(1.00000, 1.00000, 0.18154),
(1.00000, 1.00000, 0.15796),
(1.00000, 1.00000, 0.13536),
(1.00000, 1.00000, 0.11473),
(1.00000, 1.00000, 0.09116),
(1.00000, 1.00000, 0.06435),
(1.00000, 1.00000, 0.04008),
(1.00000, 1.00000, 0.02076),
(1.00000, 1.00000, 0.00706),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.09305),
(1.00000, 1.00000, 0.33136),
(1.00000, 1.00000, 0.60966),
(1.00000, 1.00000, 0.83605),
(0.96674, 1.00000, 0.96343),
(0.75749, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
)
cmap_rainbow = (
(0.00000, 0.00000, 0.16471), # noqa
(0.02745, 0.00000, 0.18431),
(0.05882, 0.00000, 0.20000),
(0.08627, 0.00000, 0.21961),
(0.11373, 0.00000, 0.23922),
(0.14510, 0.00000, 0.25882),
(0.17647, 0.00000, 0.27843),
(0.20392, 0.00000, 0.29804),
(0.23137, 0.00000, 0.31765),
(0.26275, 0.00000, 0.33725),
(0.29412, 0.00000, 0.35686),
(0.32157, 0.00000, 0.37647),
(0.35294, 0.00000, 0.39608),
(0.38039, 0.00000, 0.41569),
(0.41176, 0.00000, 0.43529),
(0.43922, 0.00000, 0.45490),
(0.47059, 0.00000, 0.47451),
(0.49804, 0.00000, 0.49412),
(0.52941, 0.00000, 0.51373),
(0.55686, 0.00000, 0.53725),
(0.58824, 0.00000, 0.55686),
(0.55686, 0.00000, 0.57647),
(0.52941, 0.00000, 0.59608),
(0.49804, 0.00000, 0.61569),
(0.47059, 0.00000, 0.63922),
(0.43922, 0.00000, 0.65882),
(0.41176, 0.00000, 0.67843),
(0.38039, 0.00000, 0.70196),
(0.35294, 0.00000, 0.72157),
(0.32157, 0.00000, 0.74118),
(0.29412, 0.00000, 0.76471),
(0.26275, 0.00000, 0.78431),
(0.23137, 0.00000, 0.80392),
(0.20392, 0.00000, 0.82745),
(0.17647, 0.00000, 0.84706),
(0.14510, 0.00000, 0.87059),
(0.11373, 0.00000, 0.89020),
(0.08627, 0.00000, 0.91373),
(0.05882, 0.00000, 0.93333),
(0.02745, 0.00000, 0.95686),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.02353, 0.97647),
(0.00000, 0.04706, 0.95686),
(0.00000, 0.06275, 0.93333),
(0.00000, 0.08235, 0.91373),
(0.00000, 0.09804, 0.89020),
(0.00000, 0.11373, 0.87059),
(0.00000, 0.12941, 0.84706),
(0.00000, 0.14118, 0.82745),
(0.00000, 0.15686, 0.80392),
(0.00000, 0.16863, 0.78431),
(0.00000, 0.18431, 0.76471),
(0.00000, 0.19608, 0.74118),
(0.00000, 0.21176, 0.72157),
(0.00000, 0.22353, 0.70196),
(0.00000, 0.23529, 0.67843),
(0.00000, 0.25098, 0.65882),
(0.00000, 0.26275, 0.63922),
(0.00000, 0.27451, 0.61569),
(0.00000, 0.28627, 0.59608),
(0.00000, 0.29804, 0.57647),
(0.00000, 0.30980, 0.55686),
(0.00000, 0.32157, 0.53725),
(0.00000, 0.33333, 0.51373),
(0.00000, 0.34510, 0.49412),
(0.00000, 0.35686, 0.47451),
(0.00000, 0.36863, 0.45490),
(0.00000, 0.38039, 0.43529),
(0.00000, 0.39216, 0.41569),
(0.00000, 0.40392, 0.39608),
(0.00000, 0.41176, 0.37647),
(0.00000, 0.42353, 0.35686),
(0.00000, 0.43529, 0.33725),
(0.00000, 0.44706, 0.31765),
(0.00000, 0.45882, 0.29804),
(0.00000, 0.46667, 0.27843),
(0.00000, 0.47843, 0.25882),
(0.00000, 0.49020, 0.23922),
(0.00000, 0.49804, 0.21961),
(0.00000, 0.50980, 0.20000),
(0.00000, 0.52157, 0.18431),
(0.00000, 0.52941, 0.16471),
(0.00000, 0.54118, 0.14510),
(0.00000, 0.55294, 0.12941),
(0.00000, 0.56078, 0.10980),
(0.00000, 0.57255, 0.09412),
(0.00000, 0.58431, 0.07451),
(0.00000, 0.59216, 0.05882),
(0.00000, 0.60392, 0.04314),
(0.00000, 0.61176, 0.02745),
(0.00000, 0.62353, 0.01176),
(0.00000, 0.63137, 0.00000),
(0.00000, 0.64314, 0.00000),
(0.00000, 0.65098, 0.00000),
(0.00000, 0.66275, 0.00000),
(0.00000, 0.67059, 0.00000),
(0.00000, 0.68235, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.70196, 0.00000),
(0.00000, 0.70980, 0.00000),
(0.00000, 0.72157, 0.00000),
(0.00000, 0.72941, 0.00000),
(0.00000, 0.74118, 0.00000),
(0.00000, 0.74902, 0.00000),
(0.00000, 0.76078, 0.00000),
(0.00000, 0.76863, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00784, 0.76863, 0.00000),
(0.03529, 0.77647, 0.00000),
(0.06667, 0.78824, 0.00000),
(0.09804, 0.80000, 0.00000),
(0.12941, 0.81176, 0.00000),
(0.16471, 0.82745, 0.00000),
(0.20000, 0.84314, 0.00000),
(0.23529, 0.85882, 0.00000),
(0.26667, 0.87059, 0.00000),
(0.30588, 0.89020, 0.00000),
(0.34118, 0.90196, 0.00000),
(0.37647, 0.92157, 0.00000),
(0.41176, 0.93333, 0.00000),
(0.44706, 0.95294, 0.00000),
(0.48627, 0.96863, 0.00000),
(0.52157, 0.98824, 0.00000),
(0.56078, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.63529, 1.00000, 0.00000),
(0.67059, 1.00000, 0.00000),
(0.70980, 1.00000, 0.00000),
(0.74902, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.82353, 1.00000, 0.00000),
(0.85882, 1.00000, 0.00000),
(0.89804, 1.00000, 0.00000),
(0.93333, 1.00000, 0.00000),
(0.97647, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 1.00000, 0.00000),
(0.98039, 1.00000, 0.00000),
(0.96078, 0.97647, 0.00000),
(0.94510, 0.93725, 0.00000),
(0.92549, 0.89804, 0.00000),
(0.90980, 0.85882, 0.00000),
(0.89412, 0.81961, 0.00000),
(0.87451, 0.78039, 0.00000),
(0.85882, 0.74118, 0.00000),
(0.83922, 0.70196, 0.00000),
(0.82353, 0.66275, 0.00000),
(0.80392, 0.62353, 0.00000),
(0.78824, 0.58431, 0.00000),
(0.76863, 0.54510, 0.00000),
(0.75686, 0.50980, 0.00000),
(0.74118, 0.46667, 0.00000),
(0.72549, 0.43137, 0.00000),
(0.70980, 0.39216, 0.00000),
(0.69412, 0.35294, 0.00000),
(0.68235, 0.31765, 0.00000),
(0.66275, 0.27451, 0.00000),
(0.65098, 0.23922, 0.00000),
(0.63529, 0.20000, 0.00000),
(0.62745, 0.16863, 0.00000),
(0.61569, 0.12941, 0.00000),
(0.60784, 0.09804, 0.00000),
(0.61961, 0.08235, 0.00000),
(0.62745, 0.06275, 0.00000),
(0.63922, 0.04706, 0.00000),
(0.64706, 0.02353, 0.00000),
(0.65882, 0.00000, 0.00000),
(0.66667, 0.00000, 0.00000),
(0.67843, 0.00000, 0.00000),
(0.68627, 0.00000, 0.00000),
(0.69804, 0.00000, 0.00000),
(0.70980, 0.00000, 0.00000),
(0.71765, 0.00000, 0.00000),
(0.72941, 0.00000, 0.00000),
(0.73725, 0.00000, 0.00000),
(0.74902, 0.00000, 0.00000),
(0.75686, 0.00000, 0.00000),
(0.76863, 0.00000, 0.00000),
(0.77647, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.80000, 0.00784, 0.00784),
(0.80784, 0.02745, 0.02745),
(0.81961, 0.05098, 0.05098),
(0.82745, 0.08235, 0.08235),
(0.83922, 0.11373, 0.11373),
(0.84706, 0.14902, 0.14902),
(0.85882, 0.19216, 0.19216),
(0.86667, 0.23137, 0.23137),
(0.87843, 0.27843, 0.27843),
(0.88627, 0.32549, 0.32549),
(0.89804, 0.37647, 0.37647),
(0.90980, 0.43137, 0.43137),
(0.91765, 0.48627, 0.48627),
(0.92941, 0.54118, 0.54118),
(0.93725, 0.60000, 0.60000),
(0.94902, 0.66275, 0.66275),
(0.95686, 0.72549, 0.72549),
(0.96863, 0.79216, 0.79216),
(0.97647, 0.85882, 0.85882),
(0.98824, 0.92941, 0.92941),
(1.00000, 1.00000, 1.00000),
)
cmap_manycol = (
(0.34902, 0.34902, 0.34902), # noqa
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
)
cmap_gray = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00392, 0.00392, 0.00392),
(0.00784, 0.00784, 0.00784),
(0.01176, 0.01176, 0.01176),
(0.01569, 0.01569, 0.01569),
(0.01961, 0.01961, 0.01961),
(0.02353, 0.02353, 0.02353),
(0.02745, 0.02745, 0.02745),
(0.03137, 0.03137, 0.03137),
(0.03529, 0.03529, 0.03529),
(0.03922, 0.03922, 0.03922),
(0.04314, 0.04314, 0.04314),
(0.04706, 0.04706, 0.04706),
(0.05098, 0.05098, 0.05098),
(0.05490, 0.05490, 0.05490),
(0.05882, 0.05882, 0.05882),
(0.06275, 0.06275, 0.06275),
(0.06667, 0.06667, 0.06667),
(0.07059, 0.07059, 0.07059),
(0.07451, 0.07451, 0.07451),
(0.07843, 0.07843, 0.07843),
(0.08235, 0.08235, 0.08235),
(0.08627, 0.08627, 0.08627),
(0.09020, 0.09020, 0.09020),
(0.09412, 0.09412, 0.09412),
(0.09804, 0.09804, 0.09804),
(0.10196, 0.10196, 0.10196),
(0.10588, 0.10588, 0.10588),
(0.10980, 0.10980, 0.10980),
(0.11373, 0.11373, 0.11373),
(0.11765, 0.11765, 0.11765),
(0.12157, 0.12157, 0.12157),
(0.12549, 0.12549, 0.12549),
(0.12941, 0.12941, 0.12941),
(0.13333, 0.13333, 0.13333),
(0.13725, 0.13725, 0.13725),
(0.14118, 0.14118, 0.14118),
(0.14510, 0.14510, 0.14510),
(0.14902, 0.14902, 0.14902),
(0.15294, 0.15294, 0.15294),
(0.15686, 0.15686, 0.15686),
(0.16078, 0.16078, 0.16078),
(0.16471, 0.16471, 0.16471),
(0.16863, 0.16863, 0.16863),
(0.17255, 0.17255, 0.17255),
(0.17647, 0.17647, 0.17647),
(0.18039, 0.18039, 0.18039),
(0.18431, 0.18431, 0.18431),
(0.18824, 0.18824, 0.18824),
(0.19216, 0.19216, 0.19216),
(0.19608, 0.19608, 0.19608),
(0.20000, 0.20000, 0.20000),
(0.20392, 0.20392, 0.20392),
(0.20784, 0.20784, 0.20784),
(0.21176, 0.21176, 0.21176),
(0.21569, 0.21569, 0.21569),
(0.21961, 0.21961, 0.21961),
(0.22353, 0.22353, 0.22353),
(0.22745, 0.22745, 0.22745),
(0.23137, 0.23137, 0.23137),
(0.23529, 0.23529, 0.23529),
(0.23922, 0.23922, 0.23922),
(0.24314, 0.24314, 0.24314),
(0.24706, 0.24706, 0.24706),
(0.25098, 0.25098, 0.25098),
(0.25490, 0.25490, 0.25490),
(0.25882, 0.25882, 0.25882),
(0.26275, 0.26275, 0.26275),
(0.26667, 0.26667, 0.26667),
(0.27059, 0.27059, 0.27059),
(0.27451, 0.27451, 0.27451),
(0.27843, 0.27843, 0.27843),
(0.28235, 0.28235, 0.28235),
(0.28627, 0.28627, 0.28627),
(0.29020, 0.29020, 0.29020),
(0.29412, 0.29412, 0.29412),
(0.29804, 0.29804, 0.29804),
(0.30196, 0.30196, 0.30196),
(0.30588, 0.30588, 0.30588),
(0.30980, 0.30980, 0.30980),
(0.31373, 0.31373, 0.31373),
(0.31765, 0.31765, 0.31765),
(0.32157, 0.32157, 0.32157),
(0.32549, 0.32549, 0.32549),
(0.32941, 0.32941, 0.32941),
(0.33333, 0.33333, 0.33333),
(0.33725, 0.33725, 0.33725),
(0.34118, 0.34118, 0.34118),
(0.34510, 0.34510, 0.34510),
(0.34902, 0.34902, 0.34902),
(0.35294, 0.35294, 0.35294),
(0.35686, 0.35686, 0.35686),
(0.36078, 0.36078, 0.36078),
(0.36471, 0.36471, 0.36471),
(0.36863, 0.36863, 0.36863),
(0.37255, 0.37255, 0.37255),
(0.37647, 0.37647, 0.37647),
(0.38039, 0.38039, 0.38039),
(0.38431, 0.38431, 0.38431),
(0.38824, 0.38824, 0.38824),
(0.39216, 0.39216, 0.39216),
(0.39608, 0.39608, 0.39608),
(0.40000, 0.40000, 0.40000),
(0.40392, 0.40392, 0.40392),
(0.40784, 0.40784, 0.40784),
(0.41176, 0.41176, 0.41176),
(0.41569, 0.41569, 0.41569),
(0.41961, 0.41961, 0.41961),
(0.42353, 0.42353, 0.42353),
(0.42745, 0.42745, 0.42745),
(0.43137, 0.43137, 0.43137),
(0.43529, 0.43529, 0.43529),
(0.43922, 0.43922, 0.43922),
(0.44314, 0.44314, 0.44314),
(0.44706, 0.44706, 0.44706),
(0.45098, 0.45098, 0.45098),
(0.45490, 0.45490, 0.45490),
(0.45882, 0.45882, 0.45882),
(0.46275, 0.46275, 0.46275),
(0.46667, 0.46667, 0.46667),
(0.47059, 0.47059, 0.47059),
(0.47451, 0.47451, 0.47451),
(0.47843, 0.47843, 0.47843),
(0.48235, 0.48235, 0.48235),
(0.48627, 0.48627, 0.48627),
(0.49020, 0.49020, 0.49020),
(0.49412, 0.49412, 0.49412),
(0.49804, 0.49804, 0.49804),
(0.50196, 0.50196, 0.50196),
(0.50588, 0.50588, 0.50588),
(0.50980, 0.50980, 0.50980),
(0.51373, 0.51373, 0.51373),
(0.51765, 0.51765, 0.51765),
(0.52157, 0.52157, 0.52157),
(0.52549, 0.52549, 0.52549),
(0.52941, 0.52941, 0.52941),
(0.53333, 0.53333, 0.53333),
(0.53725, 0.53725, 0.53725),
(0.54118, 0.54118, 0.54118),
(0.54510, 0.54510, 0.54510),
(0.54902, 0.54902, 0.54902),
(0.55294, 0.55294, 0.55294),
(0.55686, 0.55686, 0.55686),
(0.56078, 0.56078, 0.56078),
(0.56471, 0.56471, 0.56471),
(0.56863, 0.56863, 0.56863),
(0.57255, 0.57255, 0.57255),
(0.57647, 0.57647, 0.57647),
(0.58039, 0.58039, 0.58039),
(0.58431, 0.58431, 0.58431),
(0.58824, 0.58824, 0.58824),
(0.59608, 0.59608, 0.59608),
(0.60000, 0.60000, 0.60000),
(0.59608, 0.59608, 0.59608),
(0.60392, 0.60392, 0.60392),
(0.60784, 0.60784, 0.60784),
(0.61176, 0.61176, 0.61176),
(0.61569, 0.61569, 0.61569),
(0.61961, 0.61961, 0.61961),
(0.62353, 0.62353, 0.62353),
(0.62745, 0.62745, 0.62745),
(0.63137, 0.63137, 0.63137),
(0.63529, 0.63529, 0.63529),
(0.63922, 0.63922, 0.63922),
(0.64314, 0.64314, 0.64314),
(0.64706, 0.64706, 0.64706),
(0.65098, 0.65098, 0.65098),
(0.65490, 0.65490, 0.65490),
(0.65882, 0.65882, 0.65882),
(0.66275, 0.66275, 0.66275),
(0.66667, 0.66667, 0.66667),
(0.67059, 0.67059, 0.67059),
(0.67451, 0.67451, 0.67451),
(0.67843, 0.67843, 0.67843),
(0.68235, 0.68235, 0.68235),
(0.68627, 0.68627, 0.68627),
(0.69020, 0.69020, 0.69020),
(0.69412, 0.69412, 0.69412),
(0.69804, 0.69804, 0.69804),
(0.70196, 0.70196, 0.70196),
(0.70588, 0.70588, 0.70588),
(0.70980, 0.70980, 0.70980),
(0.71373, 0.71373, 0.71373),
(0.71765, 0.71765, 0.71765),
(0.72157, 0.72157, 0.72157),
(0.72549, 0.72549, 0.72549),
(0.72941, 0.72941, 0.72941),
(0.73333, 0.73333, 0.73333),
(0.73725, 0.73725, 0.73725),
(0.74118, 0.74118, 0.74118),
(0.74510, 0.74510, 0.74510),
(0.74902, 0.74902, 0.74902),
(0.75294, 0.75294, 0.75294),
(0.75686, 0.75686, 0.75686),
(0.76078, 0.76078, 0.76078),
(0.76471, 0.76471, 0.76471),
(0.76863, 0.76863, 0.76863),
(0.77255, 0.77255, 0.77255),
(0.77647, 0.77647, 0.77647),
(0.78039, 0.78039, 0.78039),
(0.78431, 0.78431, 0.78431),
(0.78824, 0.78824, 0.78824),
(0.79216, 0.79216, 0.79216),
(0.79608, 0.79608, 0.79608),
(0.80000, 0.80000, 0.80000),
(0.80392, 0.80392, 0.80392),
(0.80784, 0.80784, 0.80784),
(0.81176, 0.81176, 0.81176),
(0.81569, 0.81569, 0.81569),
(0.81961, 0.81961, 0.81961),
(0.82353, 0.82353, 0.82353),
(0.82745, 0.82745, 0.82745),
(0.83137, 0.83137, 0.83137),
(0.83529, 0.83529, 0.83529),
(0.83922, 0.83922, 0.83922),
(0.84314, 0.84314, 0.84314),
(0.84706, 0.84706, 0.84706),
(0.85098, 0.85098, 0.85098),
(0.85490, 0.85490, 0.85490),
(0.85882, 0.85882, 0.85882),
(0.86275, 0.86275, 0.86275),
(0.86667, 0.86667, 0.86667),
(0.87059, 0.87059, 0.87059),
(0.87451, 0.87451, 0.87451),
(0.87843, 0.87843, 0.87843),
(0.88235, 0.88235, 0.88235),
(0.88627, 0.88627, 0.88627),
(0.89020, 0.89020, 0.89020),
(0.89412, 0.89412, 0.89412),
(0.89804, 0.89804, 0.89804),
(0.90196, 0.90196, 0.90196),
(0.90588, 0.90588, 0.90588),
(0.90980, 0.90980, 0.90980),
(0.91373, 0.91373, 0.91373),
(0.91765, 0.91765, 0.91765),
(0.92157, 0.92157, 0.92157),
(0.92549, 0.92549, 0.92549),
(0.92941, 0.92941, 0.92941),
(0.93333, 0.93333, 0.93333),
(0.93725, 0.93725, 0.93725),
(0.94118, 0.94118, 0.94118),
(0.94510, 0.94510, 0.94510),
(0.94902, 0.94902, 0.94902),
(0.95294, 0.95294, 0.95294),
(0.95686, 0.95686, 0.95686),
(0.96078, 0.96078, 0.96078),
(0.96471, 0.96471, 0.96471),
(0.96863, 0.96863, 0.96863),
(0.97255, 0.97255, 0.97255),
(0.97647, 0.97647, 0.97647),
(0.98039, 0.98039, 0.98039),
(0.98431, 0.98431, 0.98431),
(0.98824, 0.98824, 0.98824),
(0.99216, 0.99216, 0.99216),
(0.99608, 0.99608, 0.99608),
(1.00000, 1.00000, 1.00000),
)
cmap_grayclip = (
# Like gray, but shows clipping of top and bottom 5%
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.01961, 0.01961, 0.01961),
(0.02353, 0.02353, 0.02353),
(0.02745, 0.02745, 0.02745),
(0.03137, 0.03137, 0.03137),
(0.03529, 0.03529, 0.03529),
(0.03922, 0.03922, 0.03922),
(0.04314, 0.04314, 0.04314),
(0.04706, 0.04706, 0.04706),
(0.05098, 0.05098, 0.05098),
(0.05490, 0.05490, 0.05490),
(0.05882, 0.05882, 0.05882),
(0.06275, 0.06275, 0.06275),
(0.06667, 0.06667, 0.06667),
(0.07059, 0.07059, 0.07059),
(0.07451, 0.07451, 0.07451),
(0.07843, 0.07843, 0.07843),
(0.08235, 0.08235, 0.08235),
(0.08627, 0.08627, 0.08627),
(0.09020, 0.09020, 0.09020),
(0.09412, 0.09412, 0.09412),
(0.09804, 0.09804, 0.09804),
(0.10196, 0.10196, 0.10196),
(0.10588, 0.10588, 0.10588),
(0.10980, 0.10980, 0.10980),
(0.11373, 0.11373, 0.11373),
(0.11765, 0.11765, 0.11765),
(0.12157, 0.12157, 0.12157),
(0.12549, 0.12549, 0.12549),
(0.12941, 0.12941, 0.12941),
(0.13333, 0.13333, 0.13333),
(0.13725, 0.13725, 0.13725),
(0.14118, 0.14118, 0.14118),
(0.14510, 0.14510, 0.14510),
(0.14902, 0.14902, 0.14902),
(0.15294, 0.15294, 0.15294),
(0.15686, 0.15686, 0.15686),
(0.16078, 0.16078, 0.16078),
(0.16471, 0.16471, 0.16471),
(0.16863, 0.16863, 0.16863),
(0.17255, 0.17255, 0.17255),
(0.17647, 0.17647, 0.17647),
(0.18039, 0.18039, 0.18039),
(0.18431, 0.18431, 0.18431),
(0.18824, 0.18824, 0.18824),
(0.19216, 0.19216, 0.19216),
(0.19608, 0.19608, 0.19608),
(0.20000, 0.20000, 0.20000),
(0.20392, 0.20392, 0.20392),
(0.20784, 0.20784, 0.20784),
(0.21176, 0.21176, 0.21176),
(0.21569, 0.21569, 0.21569),
(0.21961, 0.21961, 0.21961),
(0.22353, 0.22353, 0.22353),
(0.22745, 0.22745, 0.22745),
(0.23137, 0.23137, 0.23137),
(0.23529, 0.23529, 0.23529),
(0.23922, 0.23922, 0.23922),
(0.24314, 0.24314, 0.24314),
(0.24706, 0.24706, 0.24706),
(0.25098, 0.25098, 0.25098),
(0.25490, 0.25490, 0.25490),
(0.25882, 0.25882, 0.25882),
(0.26275, 0.26275, 0.26275),
(0.26667, 0.26667, 0.26667),
(0.27059, 0.27059, 0.27059),
(0.27451, 0.27451, 0.27451),
(0.27843, 0.27843, 0.27843),
(0.28235, 0.28235, 0.28235),
(0.28627, 0.28627, 0.28627),
(0.29020, 0.29020, 0.29020),
(0.29412, 0.29412, 0.29412),
(0.29804, 0.29804, 0.29804),
(0.30196, 0.30196, 0.30196),
(0.30588, 0.30588, 0.30588),
(0.30980, 0.30980, 0.30980),
(0.31373, 0.31373, 0.31373),
(0.31765, 0.31765, 0.31765),
(0.32157, 0.32157, 0.32157),
(0.32549, 0.32549, 0.32549),
(0.32941, 0.32941, 0.32941),
(0.33333, 0.33333, 0.33333),
(0.33725, 0.33725, 0.33725),
(0.34118, 0.34118, 0.34118),
(0.34510, 0.34510, 0.34510),
(0.34902, 0.34902, 0.34902),
(0.35294, 0.35294, 0.35294),
(0.35686, 0.35686, 0.35686),
(0.36078, 0.36078, 0.36078),
(0.36471, 0.36471, 0.36471),
(0.36863, 0.36863, 0.36863),
(0.37255, 0.37255, 0.37255),
(0.37647, 0.37647, 0.37647),
(0.38039, 0.38039, 0.38039),
(0.38431, 0.38431, 0.38431),
(0.38824, 0.38824, 0.38824),
(0.39216, 0.39216, 0.39216),
(0.39608, 0.39608, 0.39608),
(0.40000, 0.40000, 0.40000),
(0.40392, 0.40392, 0.40392),
(0.40784, 0.40784, 0.40784),
(0.41176, 0.41176, 0.41176),
(0.41569, 0.41569, 0.41569),
(0.41961, 0.41961, 0.41961),
(0.42353, 0.42353, 0.42353),
(0.42745, 0.42745, 0.42745),
(0.43137, 0.43137, 0.43137),
(0.43529, 0.43529, 0.43529),
(0.43922, 0.43922, 0.43922),
(0.44314, 0.44314, 0.44314),
(0.44706, 0.44706, 0.44706),
(0.45098, 0.45098, 0.45098),
(0.45490, 0.45490, 0.45490),
(0.45882, 0.45882, 0.45882),
(0.46275, 0.46275, 0.46275),
(0.46667, 0.46667, 0.46667),
(0.47059, 0.47059, 0.47059),
(0.47451, 0.47451, 0.47451),
(0.47843, 0.47843, 0.47843),
(0.48235, 0.48235, 0.48235),
(0.48627, 0.48627, 0.48627),
(0.49020, 0.49020, 0.49020),
(0.49412, 0.49412, 0.49412),
(0.49804, 0.49804, 0.49804),
(0.50196, 0.50196, 0.50196),
(0.50588, 0.50588, 0.50588),
(0.50980, 0.50980, 0.50980),
(0.51373, 0.51373, 0.51373),
(0.51765, 0.51765, 0.51765),
(0.52157, 0.52157, 0.52157),
(0.52549, 0.52549, 0.52549),
(0.52941, 0.52941, 0.52941),
(0.53333, 0.53333, 0.53333),
(0.53725, 0.53725, 0.53725),
(0.54118, 0.54118, 0.54118),
(0.54510, 0.54510, 0.54510),
(0.54902, 0.54902, 0.54902),
(0.55294, 0.55294, 0.55294),
(0.55686, 0.55686, 0.55686),
(0.56078, 0.56078, 0.56078),
(0.56471, 0.56471, 0.56471),
(0.56863, 0.56863, 0.56863),
(0.57255, 0.57255, 0.57255),
(0.57647, 0.57647, 0.57647),
(0.58039, 0.58039, 0.58039),
(0.58431, 0.58431, 0.58431),
(0.58824, 0.58824, 0.58824),
(0.59608, 0.59608, 0.59608),
(0.60000, 0.60000, 0.60000),
(0.59608, 0.59608, 0.59608),
(0.60392, 0.60392, 0.60392),
(0.60784, 0.60784, 0.60784),
(0.61176, 0.61176, 0.61176),
(0.61569, 0.61569, 0.61569),
(0.61961, 0.61961, 0.61961),
(0.62353, 0.62353, 0.62353),
(0.62745, 0.62745, 0.62745),
(0.63137, 0.63137, 0.63137),
(0.63529, 0.63529, 0.63529),
(0.63922, 0.63922, 0.63922),
(0.64314, 0.64314, 0.64314),
(0.64706, 0.64706, 0.64706),
(0.65098, 0.65098, 0.65098),
(0.65490, 0.65490, 0.65490),
(0.65882, 0.65882, 0.65882),
(0.66275, 0.66275, 0.66275),
(0.66667, 0.66667, 0.66667),
(0.67059, 0.67059, 0.67059),
(0.67451, 0.67451, 0.67451),
(0.67843, 0.67843, 0.67843),
(0.68235, 0.68235, 0.68235),
(0.68627, 0.68627, 0.68627),
(0.69020, 0.69020, 0.69020),
(0.69412, 0.69412, 0.69412),
(0.69804, 0.69804, 0.69804),
(0.70196, 0.70196, 0.70196),
(0.70588, 0.70588, 0.70588),
(0.70980, 0.70980, 0.70980),
(0.71373, 0.71373, 0.71373),
(0.71765, 0.71765, 0.71765),
(0.72157, 0.72157, 0.72157),
(0.72549, 0.72549, 0.72549),
(0.72941, 0.72941, 0.72941),
(0.73333, 0.73333, 0.73333),
(0.73725, 0.73725, 0.73725),
(0.74118, 0.74118, 0.74118),
(0.74510, 0.74510, 0.74510),
(0.74902, 0.74902, 0.74902),
(0.75294, 0.75294, 0.75294),
(0.75686, 0.75686, 0.75686),
(0.76078, 0.76078, 0.76078),
(0.76471, 0.76471, 0.76471),
(0.76863, 0.76863, 0.76863),
(0.77255, 0.77255, 0.77255),
(0.77647, 0.77647, 0.77647),
(0.78039, 0.78039, 0.78039),
(0.78431, 0.78431, 0.78431),
(0.78824, 0.78824, 0.78824),
(0.79216, 0.79216, 0.79216),
(0.79608, 0.79608, 0.79608),
(0.80000, 0.80000, 0.80000),
(0.80392, 0.80392, 0.80392),
(0.80784, 0.80784, 0.80784),
(0.81176, 0.81176, 0.81176),
(0.81569, 0.81569, 0.81569),
(0.81961, 0.81961, 0.81961),
(0.82353, 0.82353, 0.82353),
(0.82745, 0.82745, 0.82745),
(0.83137, 0.83137, 0.83137),
(0.83529, 0.83529, 0.83529),
(0.83922, 0.83922, 0.83922),
(0.84314, 0.84314, 0.84314),
(0.84706, 0.84706, 0.84706),
(0.85098, 0.85098, 0.85098),
(0.85490, 0.85490, 0.85490),
(0.85882, 0.85882, 0.85882),
(0.86275, 0.86275, 0.86275),
(0.86667, 0.86667, 0.86667),
(0.87059, 0.87059, 0.87059),
(0.87451, 0.87451, 0.87451),
(0.87843, 0.87843, 0.87843),
(0.88235, 0.88235, 0.88235),
(0.88627, 0.88627, 0.88627),
(0.89020, 0.89020, 0.89020),
(0.89412, 0.89412, 0.89412),
(0.89804, 0.89804, 0.89804),
(0.90196, 0.90196, 0.90196),
(0.90588, 0.90588, 0.90588),
(0.90980, 0.90980, 0.90980),
(0.91373, 0.91373, 0.91373),
(0.91765, 0.91765, 0.91765),
(0.92157, 0.92157, 0.92157),
(0.92549, 0.92549, 0.92549),
(0.92941, 0.92941, 0.92941),
(0.93333, 0.93333, 0.93333),
(0.93725, 0.93725, 0.93725),
(0.94118, 0.94118, 0.94118),
(0.94510, 0.94510, 0.94510),
(0.94902, 0.94902, 0.94902),
(0.95294, 0.95294, 0.95294),
(0.95686, 0.95686, 0.95686),
(0.96078, 0.96078, 0.96078),
(0.96471, 0.96471, 0.96471),
(0.96863, 0.96863, 0.96863),
(0.97255, 0.97255, 0.97255),
(0.97647, 0.97647, 0.97647),
(0.98039, 0.98039, 0.98039),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_pastel = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 1.00000),
(0.01961, 0.00000, 0.98039),
(0.05490, 0.00000, 0.94510),
(0.08627, 0.00392, 0.91373),
(0.10980, 0.00392, 0.89020),
(0.13725, 0.00392, 0.86275),
(0.15686, 0.00392, 0.84314),
(0.18039, 0.00392, 0.81961),
(0.20000, 0.00784, 0.80000),
(0.21569, 0.00784, 0.78431),
(0.23529, 0.00784, 0.76471),
(0.25098, 0.00784, 0.74902),
(0.26275, 0.01176, 0.73725),
(0.28235, 0.01176, 0.71765),
(0.29412, 0.01176, 0.70588),
(0.30588, 0.01176, 0.69412),
(0.31765, 0.01176, 0.68235),
(0.33333, 0.01569, 0.66667),
(0.34118, 0.01569, 0.65882),
(0.35294, 0.01569, 0.64706),
(0.36078, 0.01569, 0.63922),
(0.37255, 0.01961, 0.62745),
(0.38431, 0.01961, 0.61569),
(0.39216, 0.01961, 0.60784),
(0.40000, 0.01961, 0.60000),
(0.41176, 0.02353, 0.58824),
(0.41961, 0.02353, 0.58039),
(0.43137, 0.02353, 0.56863),
(0.43529, 0.02745, 0.56471),
(0.44314, 0.02745, 0.55686),
(0.45098, 0.02745, 0.54902),
(0.45882, 0.02745, 0.54118),
(0.46667, 0.03137, 0.53333),
(0.47059, 0.03137, 0.52941),
(0.48235, 0.03137, 0.51765),
(0.48627, 0.03529, 0.51373),
(0.49412, 0.03529, 0.50588),
(0.50196, 0.03529, 0.49804),
(0.50588, 0.03529, 0.49412),
(0.50980, 0.04314, 0.49020),
(0.51765, 0.04314, 0.48235),
(0.52157, 0.04314, 0.47843),
(0.53333, 0.04706, 0.46667),
(0.53725, 0.04706, 0.46275),
(0.54118, 0.04706, 0.45882),
(0.54902, 0.05098, 0.45098),
(0.55294, 0.05098, 0.44706),
(0.55686, 0.05098, 0.44314),
(0.56078, 0.05490, 0.43922),
(0.56471, 0.05490, 0.43529),
(0.57255, 0.05490, 0.42745),
(0.58039, 0.05882, 0.41961),
(0.58431, 0.05882, 0.41569),
(0.58824, 0.05882, 0.41176),
(0.59216, 0.06275, 0.40784),
(0.59608, 0.06275, 0.40392),
(0.60000, 0.06275, 0.40000),
(0.60392, 0.06667, 0.39608),
(0.60784, 0.06667, 0.39216),
(0.61176, 0.06667, 0.38824),
(0.61569, 0.07059, 0.38431),
(0.61961, 0.07059, 0.38039),
(0.62745, 0.07451, 0.37255),
(0.63137, 0.07451, 0.36863),
(0.63529, 0.07451, 0.36471),
(0.63922, 0.07843, 0.36078),
(0.64314, 0.07843, 0.35686),
(0.64706, 0.08235, 0.35294),
(0.65098, 0.08235, 0.34902),
(0.65490, 0.08235, 0.34510),
(0.65490, 0.08627, 0.34510),
(0.65882, 0.08627, 0.34118),
(0.66275, 0.09020, 0.33725),
(0.66667, 0.09020, 0.33333),
(0.67059, 0.09020, 0.32941),
(0.67843, 0.09412, 0.32157),
(0.67843, 0.09412, 0.32157),
(0.68235, 0.09804, 0.31765),
(0.68627, 0.09804, 0.31373),
(0.69020, 0.10196, 0.30980),
(0.69020, 0.10196, 0.30980),
(0.69412, 0.10588, 0.30588),
(0.69804, 0.10588, 0.30196),
(0.70196, 0.10980, 0.29804),
(0.70196, 0.10980, 0.29804),
(0.70588, 0.10980, 0.29412),
(0.70980, 0.11765, 0.29020),
(0.71373, 0.11765, 0.28627),
(0.71373, 0.12157, 0.28627),
(0.71765, 0.12157, 0.28235),
(0.72157, 0.12549, 0.27843),
(0.72157, 0.12549, 0.27843),
(0.72941, 0.12941, 0.27059),
(0.73333, 0.12941, 0.26667),
(0.73333, 0.13333, 0.26667),
(0.73725, 0.13725, 0.26275),
(0.74118, 0.13725, 0.25882),
(0.74118, 0.14118, 0.25882),
(0.74510, 0.14118, 0.25490),
(0.74510, 0.14510, 0.25490),
(0.74902, 0.14510, 0.25098),
(0.75294, 0.14902, 0.24706),
(0.75294, 0.14902, 0.24706),
(0.75686, 0.15294, 0.24314),
(0.76078, 0.15686, 0.23922),
(0.76078, 0.15686, 0.23922),
(0.76471, 0.16078, 0.23529),
(0.76471, 0.16078, 0.23529),
(0.76863, 0.16471, 0.23137),
(0.76863, 0.16863, 0.23137),
(0.77255, 0.16863, 0.22745),
(0.78039, 0.17255, 0.21961),
(0.78039, 0.17255, 0.21961),
(0.78431, 0.17647, 0.21569),
(0.78431, 0.18039, 0.21569),
(0.78824, 0.18039, 0.21176),
(0.78824, 0.18431, 0.21176),
(0.79216, 0.18824, 0.20784),
(0.79216, 0.18824, 0.20784),
(0.79608, 0.19608, 0.20392),
(0.79608, 0.20000, 0.20392),
(0.80000, 0.20000, 0.20000),
(0.80000, 0.20392, 0.20000),
(0.80392, 0.20784, 0.19608),
(0.80392, 0.20784, 0.19608),
(0.80784, 0.21176, 0.19216),
(0.80784, 0.21569, 0.19216),
(0.81176, 0.21961, 0.18824),
(0.81176, 0.21961, 0.18824),
(0.81569, 0.22353, 0.18431),
(0.81569, 0.22745, 0.18431),
(0.81961, 0.23137, 0.18039),
(0.81961, 0.23137, 0.18039),
(0.82745, 0.23529, 0.17255),
(0.82745, 0.23922, 0.17255),
(0.83137, 0.24314, 0.16863),
(0.83137, 0.24314, 0.16863),
(0.83529, 0.24706, 0.16471),
(0.83529, 0.25098, 0.16471),
(0.83922, 0.25490, 0.16078),
(0.83922, 0.25882, 0.16078),
(0.83922, 0.26275, 0.16078),
(0.84314, 0.26275, 0.15686),
(0.84314, 0.27059, 0.15686),
(0.84706, 0.27451, 0.15294),
(0.84706, 0.27843, 0.15294),
(0.85098, 0.28235, 0.14902),
(0.85098, 0.28627, 0.14902),
(0.85490, 0.29020, 0.14510),
(0.85490, 0.29412, 0.14510),
(0.85490, 0.29804, 0.14510),
(0.85882, 0.29804, 0.14118),
(0.85882, 0.30196, 0.14118),
(0.86275, 0.30588, 0.13725),
(0.86275, 0.30980, 0.13725),
(0.86275, 0.31373, 0.13725),
(0.86667, 0.31765, 0.13333),
(0.86667, 0.32157, 0.13333),
(0.87059, 0.32549, 0.12941),
(0.87059, 0.33333, 0.12941),
(0.87059, 0.33725, 0.12941),
(0.87843, 0.34118, 0.12157),
(0.87843, 0.34510, 0.12157),
(0.88235, 0.34902, 0.11765),
(0.88235, 0.35294, 0.11765),
(0.88235, 0.35686, 0.11765),
(0.88627, 0.36078, 0.11373),
(0.88627, 0.36471, 0.11373),
(0.89020, 0.37255, 0.10980),
(0.89020, 0.37647, 0.10980),
(0.89020, 0.38039, 0.10980),
(0.89412, 0.38431, 0.10588),
(0.89412, 0.38824, 0.10588),
(0.89412, 0.39216, 0.10588),
(0.89804, 0.40000, 0.10196),
(0.89804, 0.40392, 0.10196),
(0.89804, 0.40784, 0.10196),
(0.90196, 0.41176, 0.09804),
(0.90196, 0.41961, 0.09804),
(0.90588, 0.42353, 0.09412),
(0.90588, 0.42745, 0.09412),
(0.90588, 0.43529, 0.09412),
(0.90980, 0.43922, 0.09020),
(0.90980, 0.44314, 0.09020),
(0.90980, 0.45098, 0.09020),
(0.91373, 0.45490, 0.08627),
(0.91373, 0.45882, 0.08627),
(0.91373, 0.46667, 0.08627),
(0.91765, 0.47059, 0.08235),
(0.91765, 0.47843, 0.08235),
(0.91765, 0.48235, 0.08235),
(0.92157, 0.49020, 0.07843),
(0.92157, 0.49412, 0.07843),
(0.92157, 0.50196, 0.07843),
(0.92941, 0.50588, 0.07059),
(0.92941, 0.51373, 0.07059),
(0.92941, 0.51765, 0.07059),
(0.93333, 0.52549, 0.06667),
(0.93333, 0.52941, 0.06667),
(0.93333, 0.53725, 0.06667),
(0.93725, 0.54118, 0.06275),
(0.93725, 0.54902, 0.06275),
(0.93725, 0.55686, 0.06275),
(0.94118, 0.56078, 0.05882),
(0.94118, 0.56863, 0.05882),
(0.94118, 0.57647, 0.05882),
(0.94118, 0.58039, 0.05882),
(0.94510, 0.58824, 0.05490),
(0.94510, 0.59608, 0.05490),
(0.94510, 0.60000, 0.05490),
(0.94902, 0.60784, 0.05098),
(0.94902, 0.61569, 0.05098),
(0.94902, 0.62353, 0.05098),
(0.95294, 0.63137, 0.04706),
(0.95294, 0.63529, 0.04706),
(0.95294, 0.64314, 0.04706),
(0.95686, 0.65098, 0.04314),
(0.95686, 0.65882, 0.04314),
(0.95686, 0.66667, 0.04314),
(0.95686, 0.67451, 0.04314),
(0.96078, 0.68235, 0.03922),
(0.96078, 0.69020, 0.03922),
(0.96078, 0.69804, 0.03922),
(0.96471, 0.70588, 0.03529),
(0.96471, 0.71373, 0.03529),
(0.96471, 0.72157, 0.03529),
(0.96471, 0.72941, 0.03529),
(0.96863, 0.73725, 0.03137),
(0.96863, 0.74510, 0.03137),
(0.96863, 0.75294, 0.03137),
(0.97255, 0.76078, 0.02745),
(0.97255, 0.77255, 0.02745),
(0.97255, 0.78039, 0.02745),
(0.97255, 0.78824, 0.02745),
(0.98039, 0.79608, 0.01961),
(0.98039, 0.80392, 0.01961),
(0.98039, 0.81569, 0.01961),
(0.98039, 0.82353, 0.01961),
(0.98431, 0.83137, 0.01569),
(0.98431, 0.84314, 0.01569),
(0.98431, 0.85098, 0.01569),
(0.98824, 0.86275, 0.01176),
(0.98824, 0.87059, 0.01176),
(0.98824, 0.87843, 0.01176),
(0.98824, 0.89020, 0.01176),
(0.99216, 0.89804, 0.00784),
(0.99216, 0.90980, 0.00784),
(0.99216, 0.91765, 0.00784),
(0.99216, 0.92941, 0.00784),
(0.99608, 0.94118, 0.00392),
(0.99608, 0.94902, 0.00392),
(0.99608, 0.96078, 0.00392),
(0.99608, 0.97255, 0.00392),
(1.00000, 0.98039, 0.00000),
(1.00000, 0.99216, 0.00000),
)
cmap_light = (
(0.00000, 0.00392, 0.00000), # noqa
(0.00000, 0.00784, 0.01961),
(0.00000, 0.01176, 0.05490),
(0.00000, 0.01569, 0.08627),
(0.00000, 0.01961, 0.10980),
(0.00000, 0.02353, 0.13725),
(0.00000, 0.02745, 0.15686),
(0.00000, 0.03137, 0.18039),
(0.00000, 0.03529, 0.20000),
(0.00000, 0.03922, 0.21569),
(0.00000, 0.04314, 0.23529),
(0.00000, 0.04706, 0.25098),
(0.00000, 0.05098, 0.26275),
(0.00000, 0.05490, 0.28235),
(0.00000, 0.05882, 0.29412),
(0.00000, 0.06275, 0.30588),
(0.00000, 0.06667, 0.31765),
(0.00000, 0.07059, 0.33333),
(0.00000, 0.07451, 0.34118),
(0.00000, 0.07843, 0.35294),
(0.00000, 0.08235, 0.36078),
(0.00000, 0.08627, 0.37255),
(0.00000, 0.09020, 0.38431),
(0.00000, 0.09412, 0.39216),
(0.00392, 0.09804, 0.40000),
(0.00784, 0.10196, 0.41176),
(0.01176, 0.10588, 0.41961),
(0.01569, 0.10980, 0.43137),
(0.01569, 0.11373, 0.43529),
(0.01961, 0.11765, 0.44314),
(0.02353, 0.12157, 0.45098),
(0.02745, 0.12549, 0.45882),
(0.02745, 0.12941, 0.46667),
(0.03137, 0.13333, 0.47059),
(0.03529, 0.13725, 0.48235),
(0.04314, 0.14118, 0.48627),
(0.04706, 0.14510, 0.49412),
(0.05098, 0.14902, 0.50196),
(0.05882, 0.15294, 0.50588),
(0.06667, 0.15686, 0.50980),
(0.07451, 0.16078, 0.51765),
(0.08235, 0.16471, 0.52157),
(0.09020, 0.16863, 0.53333),
(0.09804, 0.17255, 0.53725),
(0.10588, 0.17647, 0.54118),
(0.11765, 0.18039, 0.54902),
(0.12941, 0.18431, 0.55294),
(0.14118, 0.18824, 0.55686),
(0.15294, 0.19216, 0.56078),
(0.16471, 0.19608, 0.56471),
(0.18039, 0.20000, 0.57255),
(0.18824, 0.20392, 0.58039),
(0.20000, 0.20784, 0.58431),
(0.21569, 0.21176, 0.58824),
(0.23137, 0.21569, 0.59216),
(0.24706, 0.21961, 0.59608),
(0.26275, 0.22353, 0.60000),
(0.27843, 0.22745, 0.60392),
(0.29412, 0.23137, 0.60784),
(0.30980, 0.23529, 0.61176),
(0.32941, 0.23922, 0.61569),
(0.34902, 0.24314, 0.61961),
(0.36863, 0.24706, 0.62745),
(0.38431, 0.25098, 0.63137),
(0.40392, 0.25490, 0.63529),
(0.41569, 0.25882, 0.63922),
(0.43529, 0.26275, 0.64314),
(0.45490, 0.26667, 0.64706),
(0.47059, 0.27059, 0.65098),
(0.48627, 0.27451, 0.65490),
(0.50196, 0.27843, 0.65490),
(0.51765, 0.28235, 0.65882),
(0.52941, 0.28627, 0.66275),
(0.54902, 0.29020, 0.66667),
(0.56471, 0.29412, 0.67059),
(0.58039, 0.29804, 0.67843),
(0.59216, 0.30196, 0.67843),
(0.60392, 0.30588, 0.68235),
(0.61961, 0.30980, 0.68627),
(0.63137, 0.31373, 0.69020),
(0.63922, 0.31765, 0.69020),
(0.65098, 0.32157, 0.69412),
(0.65882, 0.32549, 0.69804),
(0.67059, 0.32941, 0.70196),
(0.67843, 0.33333, 0.70196),
(0.69020, 0.33725, 0.70588),
(0.69804, 0.34118, 0.70980),
(0.70588, 0.34510, 0.71373),
(0.71373, 0.34902, 0.71373),
(0.72157, 0.35294, 0.71765),
(0.72941, 0.35686, 0.72157),
(0.73725, 0.36078, 0.72157),
(0.74510, 0.36471, 0.72941),
(0.75294, 0.36863, 0.73333),
(0.76078, 0.37255, 0.73333),
(0.76863, 0.37647, 0.73725),
(0.77647, 0.38039, 0.74118),
(0.78039, 0.38431, 0.74118),
(0.78824, 0.38824, 0.74510),
(0.79608, 0.39216, 0.74510),
(0.80392, 0.39608, 0.74902),
(0.80784, 0.40000, 0.75294),
(0.81176, 0.40392, 0.75294),
(0.81961, 0.40784, 0.75686),
(0.82353, 0.41176, 0.76078),
(0.82745, 0.41569, 0.76078),
(0.83529, 0.41961, 0.76471),
(0.83922, 0.42353, 0.76471),
(0.84314, 0.42745, 0.76863),
(0.84706, 0.43137, 0.76863),
(0.85098, 0.43529, 0.77255),
(0.85490, 0.43922, 0.78039),
(0.85882, 0.44314, 0.78039),
(0.86275, 0.44706, 0.78431),
(0.86667, 0.45098, 0.78431),
(0.87059, 0.45490, 0.78824),
(0.87451, 0.45882, 0.78824),
(0.87843, 0.46275, 0.79216),
(0.88235, 0.46667, 0.79216),
(0.88627, 0.47059, 0.79608),
(0.89020, 0.47451, 0.79608),
(0.89412, 0.47843, 0.80000),
(0.89804, 0.48235, 0.80000),
(0.89804, 0.48627, 0.80392),
(0.90196, 0.49020, 0.80392),
(0.90588, 0.49412, 0.80784),
(0.90980, 0.49804, 0.80784),
(0.91373, 0.50196, 0.81176),
(0.91373, 0.50588, 0.81176),
(0.91765, 0.50980, 0.81569),
(0.92157, 0.51373, 0.81569),
(0.92157, 0.51765, 0.81961),
(0.92549, 0.52157, 0.81961),
(0.92941, 0.52549, 0.82745),
(0.92941, 0.52941, 0.82745),
(0.93333, 0.53333, 0.83137),
(0.93725, 0.53725, 0.83137),
(0.93725, 0.54118, 0.83529),
(0.93725, 0.54510, 0.83529),
(0.94118, 0.54902, 0.83922),
(0.94118, 0.55294, 0.83922),
(0.94510, 0.55686, 0.83922),
(0.94510, 0.56078, 0.84314),
(0.94902, 0.56471, 0.84314),
(0.94902, 0.56863, 0.84706),
(0.95294, 0.57255, 0.84706),
(0.95294, 0.57647, 0.85098),
(0.95294, 0.58039, 0.85098),
(0.95686, 0.58431, 0.85490),
(0.95686, 0.58824, 0.85490),
(0.96078, 0.59216, 0.85490),
(0.96078, 0.59608, 0.85882),
(0.96078, 0.60000, 0.85882),
(0.96471, 0.60392, 0.86275),
(0.96471, 0.60784, 0.86275),
(0.96471, 0.61176, 0.86275),
(0.96863, 0.61569, 0.86667),
(0.96863, 0.61961, 0.86667),
(0.97255, 0.62353, 0.87059),
(0.97255, 0.62745, 0.87059),
(0.97255, 0.63137, 0.87059),
(0.97647, 0.63529, 0.87843),
(0.97647, 0.63922, 0.87843),
(0.98039, 0.64314, 0.88235),
(0.98039, 0.64706, 0.88235),
(0.98039, 0.65098, 0.88235),
(0.98431, 0.65490, 0.88627),
(0.98431, 0.65882, 0.88627),
(0.98431, 0.66275, 0.89020),
(0.98824, 0.66667, 0.89020),
(0.98824, 0.67059, 0.89020),
(0.98824, 0.67451, 0.89412),
(0.99216, 0.67843, 0.89412),
(0.99216, 0.68235, 0.89412),
(0.99216, 0.68627, 0.89804),
(0.99216, 0.69020, 0.89804),
(0.99216, 0.69412, 0.89804),
(0.99608, 0.69804, 0.90196),
(0.99608, 0.70196, 0.90196),
(0.99608, 0.70588, 0.90588),
(0.99608, 0.70980, 0.90588),
(0.99608, 0.71373, 0.90588),
(0.99608, 0.71765, 0.90980),
(0.99608, 0.72157, 0.90980),
(0.99608, 0.72549, 0.90980),
(0.99608, 0.72941, 0.91373),
(0.99608, 0.73333, 0.91373),
(0.99608, 0.73725, 0.91373),
(0.99608, 0.74118, 0.91765),
(0.99608, 0.74510, 0.91765),
(0.99608, 0.74902, 0.91765),
(0.99608, 0.75294, 0.92157),
(0.99608, 0.75686, 0.92157),
(0.99608, 0.76078, 0.92157),
(0.99608, 0.76471, 0.92941),
(0.99608, 0.76863, 0.92941),
(0.99608, 0.77255, 0.92941),
(0.99608, 0.77647, 0.93333),
(0.99608, 0.78039, 0.93333),
(0.99608, 0.78431, 0.93333),
(0.99608, 0.78824, 0.93725),
(1.00000, 0.79216, 0.93725),
(1.00000, 0.79608, 0.93725),
(1.00000, 0.80000, 0.94118),
(1.00000, 0.80392, 0.94118),
(1.00000, 0.80784, 0.94118),
(1.00000, 0.81176, 0.94118),
(1.00000, 0.81569, 0.94510),
(1.00000, 0.81961, 0.94510),
(1.00000, 0.82353, 0.94510),
(1.00000, 0.82745, 0.94902),
(1.00000, 0.83137, 0.94902),
(1.00000, 0.83529, 0.94902),
(1.00000, 0.83922, 0.95294),
(1.00000, 0.84314, 0.95294),
(1.00000, 0.84706, 0.95294),
(1.00000, 0.85098, 0.95686),
(1.00000, 0.85490, 0.95686),
(1.00000, 0.85882, 0.95686),
(1.00000, 0.86275, 0.95686),
(1.00000, 0.86667, 0.96078),
(1.00000, 0.87059, 0.96078),
(1.00000, 0.87451, 0.96078),
(1.00000, 0.87843, 0.96471),
(1.00000, 0.88235, 0.96471),
(1.00000, 0.88627, 0.96471),
(1.00000, 0.89020, 0.96471),
(1.00000, 0.89412, 0.96863),
(1.00000, 0.89804, 0.96863),
(1.00000, 0.90196, 0.96863),
(1.00000, 0.90588, 0.97255),
(1.00000, 0.90980, 0.97255),
(1.00000, 0.91373, 0.97255),
(1.00000, 0.91765, 0.97255),
(1.00000, 0.92157, 0.98039),
(1.00000, 0.92549, 0.98039),
(1.00000, 0.92941, 0.98039),
(1.00000, 0.93333, 0.98039),
(1.00000, 0.93725, 0.98431),
(1.00000, 0.94118, 0.98431),
(1.00000, 0.94510, 0.98431),
(1.00000, 0.94902, 0.98824),
(1.00000, 0.95294, 0.98824),
(1.00000, 0.95686, 0.98824),
(1.00000, 0.96078, 0.98824),
(1.00000, 0.96471, 0.99216),
(1.00000, 0.96863, 0.99216),
(1.00000, 0.97255, 0.99216),
(1.00000, 0.97647, 0.99216),
(1.00000, 0.98039, 0.99608),
(1.00000, 0.98431, 0.99608),
(1.00000, 0.98824, 0.99608),
(1.00000, 0.99216, 0.99608),
(1.00000, 0.99608, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random1 = (
(0.00000, 0.00000, 0.16471), # noqa
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
)
cmap_random2 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random3 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random4 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random5 = (
(0.00000, 0.00000, 1.00000), # noqa
(0.00000, 0.00000, 0.99216),
(0.00000, 0.00000, 0.98824),
(0.00392, 0.00000, 0.98431),
(0.00392, 0.00000, 0.98039),
(0.00784, 0.00000, 0.97647),
(0.00784, 0.00000, 0.96863),
(0.01176, 0.00000, 0.96471),
(0.01569, 0.00000, 0.96078),
(0.01569, 0.00000, 0.95686),
(0.01961, 0.00000, 0.95294),
(0.01961, 0.00000, 0.94510),
(0.02353, 0.00784, 0.94118),
(0.02745, 0.01569, 0.93725),
(0.02745, 0.02745, 0.93333),
(0.03137, 0.03922, 0.92941),
(0.03529, 0.05098, 0.92157),
(0.03529, 0.06667, 0.91765),
(0.03922, 0.08235, 0.91373),
(0.04314, 0.09804, 0.90980),
(0.04706, 0.11765, 0.90588),
(0.04706, 0.09804, 0.90196),
(0.05098, 0.08235, 0.89412),
(0.05490, 0.06667, 0.89020),
(0.05490, 0.05098, 0.88627),
(0.05882, 0.03922, 0.88235),
(0.06275, 0.02745, 0.87843),
(0.06667, 0.01569, 0.87059),
(0.07059, 0.00784, 0.86667),
(0.07059, 0.00000, 0.86275),
(0.07451, 0.00000, 0.85882),
(0.07843, 0.00392, 0.85490),
(0.08235, 0.01176, 0.85098),
(0.08235, 0.02353, 0.84314),
(0.08627, 0.03922, 0.83922),
(0.09020, 0.05490, 0.83529),
(0.09412, 0.07059, 0.83137),
(0.09804, 0.09020, 0.82745),
(0.09804, 0.10980, 0.82353),
(0.10196, 0.13333, 0.81569),
(0.10588, 0.15686, 0.81176),
(0.10980, 0.13333, 0.80784),
(0.11373, 0.10980, 0.80392),
(0.11765, 0.09020, 0.80000),
(0.11765, 0.07059, 0.79608),
(0.12157, 0.05490, 0.79216),
(0.12549, 0.03922, 0.78431),
(0.12941, 0.02353, 0.78039),
(0.13333, 0.01176, 0.77647),
(0.13725, 0.00392, 0.77255),
(0.14118, 0.00000, 0.76863),
(0.14118, 0.00392, 0.76471),
(0.14510, 0.01569, 0.75686),
(0.14902, 0.03137, 0.75294),
(0.15294, 0.04706, 0.74902),
(0.15686, 0.06667, 0.74510),
(0.16078, 0.09020, 0.74118),
(0.16471, 0.11373, 0.73725),
(0.16863, 0.13725, 0.73333),
(0.17255, 0.16471, 0.72549),
(0.17255, 0.19608, 0.72157),
(0.17647, 0.16471, 0.71765),
(0.18039, 0.13725, 0.71373),
(0.18431, 0.11373, 0.70980),
(0.18824, 0.09020, 0.70588),
(0.19216, 0.06667, 0.70196),
(0.19608, 0.04706, 0.69804),
(0.20000, 0.03137, 0.69020),
(0.20392, 0.01569, 0.68627),
(0.20784, 0.00392, 0.68235),
(0.21176, 0.00000, 0.67843),
(0.21176, 0.00784, 0.67451),
(0.21569, 0.02353, 0.67059),
(0.21961, 0.04314, 0.66667),
(0.22353, 0.06667, 0.66275),
(0.22745, 0.09412, 0.65490),
(0.23137, 0.12549, 0.65098),
(0.23529, 0.15686, 0.64706),
(0.23922, 0.19608, 0.64314),
(0.24314, 0.23137, 0.63922),
(0.24706, 0.27451, 0.63529),
(0.25098, 0.23137, 0.63137),
(0.25490, 0.19608, 0.62745),
(0.25882, 0.15686, 0.61961),
(0.26275, 0.12549, 0.61569),
(0.26667, 0.09412, 0.61176),
(0.27059, 0.06667, 0.60784),
(0.27451, 0.04314, 0.60392),
(0.27843, 0.02353, 0.60000),
(0.28235, 0.00784, 0.59608),
(0.28627, 0.00000, 0.59216),
(0.29020, 0.00784, 0.58824),
(0.29412, 0.03137, 0.58431),
(0.29804, 0.05490, 0.57647),
(0.29804, 0.08627, 0.57255),
(0.30196, 0.12157, 0.56863),
(0.30588, 0.16078, 0.56471),
(0.30980, 0.20392, 0.56078),
(0.31373, 0.25098, 0.55686),
(0.31765, 0.29804, 0.55294),
(0.32157, 0.35294, 0.54902),
(0.32549, 0.29804, 0.54510),
(0.32941, 0.25098, 0.54118),
(0.33333, 0.20392, 0.53725),
(0.33725, 0.16078, 0.52941),
(0.34118, 0.12157, 0.52549),
(0.34510, 0.08627, 0.52157),
(0.34902, 0.05490, 0.51765),
(0.35294, 0.03137, 0.51373),
(0.35686, 0.00784, 0.50980),
(0.36078, 0.00000, 0.50588),
(0.36471, 0.01176, 0.50196),
(0.37255, 0.03529, 0.49804),
(0.37647, 0.07059, 0.49412),
(0.38039, 0.10588, 0.49020),
(0.38431, 0.14902, 0.48627),
(0.38824, 0.20000, 0.48235),
(0.39216, 0.25098, 0.47843),
(0.39608, 0.30588, 0.47059),
(0.40000, 0.36471, 0.46667),
(0.40392, 0.43137, 0.46275),
(0.40784, 0.36471, 0.45882),
(0.41176, 0.30588, 0.45490),
(0.41569, 0.25098, 0.45098),
(0.41961, 0.20000, 0.44706),
(0.42353, 0.14902, 0.44314),
(0.42745, 0.10588, 0.43922),
(0.43137, 0.07059, 0.43529),
(0.43529, 0.03529, 0.43137),
(0.43922, 0.01176, 0.42745),
(0.44314, 0.00000, 0.42353),
(0.44706, 0.01569, 0.41961),
(0.45098, 0.04314, 0.41569),
(0.45490, 0.08235, 0.41176),
(0.45882, 0.12549, 0.40784),
(0.46275, 0.17647, 0.40392),
(0.46667, 0.23529, 0.40000),
(0.47059, 0.29804, 0.39608),
(0.47843, 0.36471, 0.39216),
(0.48235, 0.43137, 0.38824),
(0.48627, 0.50980, 0.38431),
(0.49020, 0.43137, 0.38039),
(0.49412, 0.36471, 0.37647),
(0.49804, 0.29804, 0.37255),
(0.50196, 0.23529, 0.36471),
(0.50588, 0.17647, 0.36078),
(0.50980, 0.12549, 0.35686),
(0.51373, 0.08235, 0.35294),
(0.51765, 0.04314, 0.34902),
(0.52157, 0.01569, 0.34510),
(0.52549, 0.00000, 0.34118),
(0.52941, 0.01569, 0.33725),
(0.53725, 0.05098, 0.33333),
(0.54118, 0.09412, 0.32941),
(0.54510, 0.14510, 0.32549),
(0.54902, 0.20784, 0.32157),
(0.55294, 0.27059, 0.31765),
(0.55686, 0.34118, 0.31373),
(0.56078, 0.41961, 0.30980),
(0.56471, 0.50196, 0.30588),
(0.56863, 0.58824, 0.30196),
(0.57255, 0.50196, 0.29804),
(0.57647, 0.41961, 0.29804),
(0.58431, 0.34118, 0.29412),
(0.58824, 0.27059, 0.29020),
(0.59216, 0.20784, 0.28627),
(0.59608, 0.14510, 0.28235),
(0.60000, 0.09412, 0.27843),
(0.60392, 0.05098, 0.27451),
(0.60784, 0.01569, 0.27059),
(0.61176, 0.00000, 0.26667),
(0.61569, 0.01961, 0.26275),
(0.61961, 0.05882, 0.25882),
(0.62745, 0.10588, 0.25490),
(0.63137, 0.16863, 0.25098),
(0.63529, 0.23529, 0.24706),
(0.63922, 0.30980, 0.24314),
(0.64314, 0.38824, 0.23922),
(0.64706, 0.47451, 0.23529),
(0.65098, 0.56863, 0.23137),
(0.65490, 0.66667, 0.22745),
(0.66275, 0.56863, 0.22353),
(0.66667, 0.47451, 0.21961),
(0.67059, 0.38824, 0.21569),
(0.67451, 0.30980, 0.21176),
(0.67843, 0.23529, 0.21176),
(0.68235, 0.16863, 0.20784),
(0.68627, 0.10588, 0.20392),
(0.69020, 0.05882, 0.20000),
(0.69804, 0.01961, 0.19608),
(0.70196, 0.00000, 0.19216),
(0.70588, 0.02353, 0.18824),
(0.70980, 0.06275, 0.18431),
(0.71373, 0.12157, 0.18039),
(0.71765, 0.18824, 0.17647),
(0.72157, 0.26275, 0.17255),
(0.72549, 0.34510, 0.17255),
(0.73333, 0.43529, 0.16863),
(0.73725, 0.52941, 0.16471),
(0.74118, 0.63529, 0.16078),
(0.74510, 0.74510, 0.15686),
(0.74902, 0.63529, 0.15294),
(0.75294, 0.52941, 0.14902),
(0.75686, 0.43529, 0.14510),
(0.76471, 0.34510, 0.14118),
(0.76863, 0.26275, 0.14118),
(0.77255, 0.18824, 0.13725),
(0.77647, 0.12157, 0.13333),
(0.78039, 0.06275, 0.12941),
(0.78431, 0.02353, 0.12549),
(0.79216, 0.00000, 0.12157),
(0.79608, 0.02353, 0.11765),
(0.80000, 0.07059, 0.11765),
(0.80392, 0.13333, 0.11373),
(0.80784, 0.20784, 0.10980),
(0.81176, 0.29020, 0.10588),
(0.81569, 0.38039, 0.10196),
(0.82353, 0.47843, 0.09804),
(0.82745, 0.58824, 0.09804),
(0.83137, 0.70196, 0.09412),
(0.83529, 0.82353, 0.09020),
(0.83922, 0.70196, 0.08627),
(0.84314, 0.58824, 0.08235),
(0.85098, 0.47843, 0.08235),
(0.85490, 0.38039, 0.07843),
(0.85882, 0.29020, 0.07451),
(0.86275, 0.20784, 0.07059),
(0.86667, 0.13333, 0.07059),
(0.87059, 0.07059, 0.06667),
(0.87843, 0.02353, 0.06275),
(0.88235, 0.00000, 0.05882),
(0.88627, 0.02745, 0.05490),
(0.89020, 0.07843, 0.05490),
(0.89412, 0.14510, 0.05098),
(0.90196, 0.22745, 0.04706),
(0.90588, 0.31765, 0.04706),
(0.90980, 0.41569, 0.04314),
(0.91373, 0.52549, 0.03922),
(0.91765, 0.64314, 0.03529),
(0.92157, 0.76863, 0.03529),
(0.92941, 0.90196, 0.03137),
(0.93333, 0.76863, 0.02745),
(0.93725, 0.64314, 0.02745),
(0.94118, 0.52549, 0.02353),
(0.94510, 0.41569, 0.01961),
(0.95294, 0.31765, 0.01961),
(0.95686, 0.25882, 0.01569),
(0.96078, 0.23137, 0.01569),
(0.96471, 0.23922, 0.01176),
(0.96863, 0.27843, 0.00784),
(0.97647, 0.35294, 0.00784),
(0.98039, 0.46275, 0.00392),
(0.98431, 0.58431, 0.00392),
(0.98824, 0.71373, 0.00000),
(0.99216, 0.85098, 0.00000),
(1.00000, 1.00000, 0.00000),
)
cmap_random6 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.14118, 0.00000),
(0.00000, 0.28235, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.56471, 0.00000),
(0.00000, 0.70588, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 0.00000, 0.14118),
(0.00000, 0.14118, 0.14118),
(0.00000, 0.28235, 0.14118),
(0.00000, 0.42353, 0.14118),
(0.00000, 0.56471, 0.14118),
(0.00000, 0.70588, 0.14118),
(0.00000, 0.84706, 0.14118),
(0.00000, 0.98824, 0.14118),
(0.00000, 0.00000, 0.28235),
(0.00000, 0.14118, 0.28235),
(0.00000, 0.28235, 0.28235),
(0.00000, 0.42353, 0.28235),
(0.00000, 0.56471, 0.28235),
(0.00000, 0.70588, 0.28235),
(0.00000, 0.84706, 0.28235),
(0.00000, 0.98824, 0.28235),
(0.00000, 0.00000, 0.42353),
(0.00000, 0.14118, 0.42353),
(0.00000, 0.28235, 0.42353),
(0.00000, 0.42353, 0.42353),
(0.00000, 0.56471, 0.42353),
(0.00000, 0.70588, 0.42353),
(0.00000, 0.84706, 0.42353),
(0.00000, 0.98824, 0.42353),
(0.00000, 0.00000, 0.56471),
(0.00000, 0.14118, 0.56471),
(0.00000, 0.28235, 0.56471),
(0.00000, 0.42353, 0.56471),
(0.00000, 0.56471, 0.56471),
(0.00000, 0.70588, 0.56471),
(0.00000, 0.84706, 0.56471),
(0.00000, 0.98824, 0.56471),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.14118, 0.70588),
(0.00000, 0.28235, 0.70588),
(0.00000, 0.42353, 0.70588),
(0.00000, 0.56471, 0.70588),
(0.00000, 0.70588, 0.70588),
(0.00000, 0.84706, 0.70588),
(0.00000, 0.98824, 0.70588),
(0.00000, 0.00000, 0.84706),
(0.00000, 0.14118, 0.84706),
(0.00000, 0.28235, 0.84706),
(0.00000, 0.42353, 0.84706),
(0.00000, 0.56471, 0.84706),
(0.00000, 0.70588, 0.84706),
(0.00000, 0.84706, 0.84706),
(0.00000, 0.98824, 0.84706),
(0.00000, 0.00000, 0.98824),
(0.00000, 0.14118, 0.98824),
(0.00000, 0.28235, 0.98824),
(0.00000, 0.42353, 0.98824),
(0.00000, 0.56471, 0.98824),
(0.00000, 0.70588, 0.98824),
(0.00000, 0.84706, 0.98824),
(0.00000, 0.98824, 0.98824),
(0.00000, 0.00000, 0.00000),
(0.32941, 0.14118, 0.00000),
(0.32941, 0.28235, 0.00000),
(0.32941, 0.42353, 0.00000),
(0.32941, 0.56471, 0.00000),
(0.32941, 0.70588, 0.00000),
(0.32941, 0.84706, 0.00000),
(0.32941, 0.98824, 0.00000),
(0.32941, 0.00000, 0.14118),
(0.32941, 0.14118, 0.14118),
(0.32941, 0.28235, 0.14118),
(0.32941, 0.42353, 0.14118),
(0.32941, 0.56471, 0.14118),
(0.32941, 0.70588, 0.14118),
(0.32941, 0.84706, 0.14118),
(0.32941, 0.98824, 0.14118),
(0.32941, 0.00000, 0.28235),
(0.32941, 0.14118, 0.28235),
(0.32941, 0.28235, 0.28235),
(0.32941, 0.42353, 0.28235),
(0.32941, 0.56471, 0.28235),
(0.32941, 0.70588, 0.28235),
(0.32941, 0.84706, 0.28235),
(0.32941, 0.98824, 0.28235),
(0.32941, 0.00000, 0.42353),
(0.32941, 0.14118, 0.42353),
(0.32941, 0.28235, 0.42353),
(0.32941, 0.42353, 0.42353),
(0.32941, 0.56471, 0.42353),
(0.32941, 0.70588, 0.42353),
(0.32941, 0.84706, 0.42353),
(0.32941, 0.98824, 0.42353),
(0.32941, 0.00000, 0.56471),
(0.32941, 0.14118, 0.56471),
(0.32941, 0.28235, 0.56471),
(0.32941, 0.42353, 0.56471),
(0.32941, 0.56471, 0.56471),
(0.32941, 0.70588, 0.56471),
(0.32941, 0.84706, 0.56471),
(0.32941, 0.98824, 0.56471),
(0.32941, 0.00000, 0.70588),
(0.32941, 0.14118, 0.70588),
(0.32941, 0.28235, 0.70588),
(0.32941, 0.42353, 0.70588),
(0.32941, 0.56471, 0.70588),
(0.32941, 0.70588, 0.70588),
(0.32941, 0.84706, 0.70588),
(0.32941, 0.98824, 0.70588),
(0.32941, 0.00000, 0.84706),
(0.32941, 0.14118, 0.84706),
(0.32941, 0.28235, 0.84706),
(0.32941, 0.42353, 0.84706),
(0.32941, 0.56471, 0.84706),
(0.32941, 0.70588, 0.84706),
(0.32941, 0.84706, 0.84706),
(0.32941, 0.98824, 0.84706),
(0.32941, 0.00000, 0.98824),
(0.32941, 0.14118, 0.98824),
(0.32941, 0.28235, 0.98824),
(0.32941, 0.42353, 0.98824),
(0.32941, 0.56471, 0.98824),
(0.32941, 0.70588, 0.98824),
(0.32941, 0.84706, 0.98824),
(0.32941, 0.98824, 0.98824),
(0.32941, 0.00000, 0.00000),
(0.65882, 0.14118, 0.00000),
(0.65882, 0.28235, 0.00000),
(0.65882, 0.42353, 0.00000),
(0.65882, 0.56471, 0.00000),
(0.65882, 0.70588, 0.00000),
(0.65882, 0.84706, 0.00000),
(0.65882, 0.98824, 0.00000),
(0.65882, 0.00000, 0.14118),
(0.65882, 0.14118, 0.14118),
(0.65882, 0.28235, 0.14118),
(0.65882, 0.42353, 0.14118),
(0.65882, 0.56471, 0.14118),
(0.65882, 0.70588, 0.14118),
(0.65882, 0.84706, 0.14118),
(0.65882, 0.98824, 0.14118),
(0.65882, 0.00000, 0.28235),
(0.65882, 0.14118, 0.28235),
(0.65882, 0.28235, 0.28235),
(0.65882, 0.42353, 0.28235),
(0.65882, 0.56471, 0.28235),
(0.65882, 0.70588, 0.28235),
(0.65882, 0.84706, 0.28235),
(0.65882, 0.98824, 0.28235),
(0.65882, 0.00000, 0.42353),
(0.65882, 0.14118, 0.42353),
(0.65882, 0.28235, 0.42353),
(0.65882, 0.42353, 0.42353),
(0.65882, 0.56471, 0.42353),
(0.65882, 0.70588, 0.42353),
(0.65882, 0.84706, 0.42353),
(0.65882, 0.98824, 0.42353),
(0.65882, 0.00000, 0.56471),
(0.65882, 0.14118, 0.56471),
(0.65882, 0.28235, 0.56471),
(0.65882, 0.42353, 0.56471),
(0.65882, 0.56471, 0.56471),
(0.65882, 0.70588, 0.56471),
(0.65882, 0.84706, 0.56471),
(0.65882, 0.98824, 0.56471),
(0.65882, 0.00000, 0.70588),
(0.65882, 0.14118, 0.70588),
(0.65882, 0.28235, 0.70588),
(0.65882, 0.42353, 0.70588),
(0.65882, 0.56471, 0.70588),
(0.65882, 0.70588, 0.70588),
(0.65882, 0.84706, 0.70588),
(0.65882, 0.98824, 0.70588),
(0.65882, 0.00000, 0.84706),
(0.65882, 0.14118, 0.84706),
(0.65882, 0.28235, 0.84706),
(0.65882, 0.42353, 0.84706),
(0.65882, 0.56471, 0.84706),
(0.65882, 0.70588, 0.84706),
(0.65882, 0.84706, 0.84706),
(0.65882, 0.98824, 0.84706),
(0.65882, 0.00000, 0.98824),
(0.65882, 0.14118, 0.98824),
(0.65882, 0.28235, 0.98824),
(0.65882, 0.42353, 0.98824),
(0.65882, 0.56471, 0.98824),
(0.65882, 0.70588, 0.98824),
(0.65882, 0.84706, 0.98824),
(0.65882, 0.98824, 0.98824),
(0.65882, 0.00000, 0.00000),
(0.98824, 0.14118, 0.00000),
(0.98824, 0.28235, 0.00000),
(0.98824, 0.42353, 0.00000),
(0.98824, 0.56471, 0.00000),
(0.98824, 0.70588, 0.00000),
(0.98824, 0.84706, 0.00000),
(0.98824, 0.98824, 0.00000),
(0.98824, 0.00000, 0.14118),
(0.98824, 0.14118, 0.14118),
(0.98824, 0.28235, 0.14118),
(0.98824, 0.42353, 0.14118),
(0.98824, 0.56471, 0.14118),
(0.98824, 0.70588, 0.14118),
(0.98824, 0.84706, 0.14118),
(0.98824, 0.98824, 0.14118),
(0.98824, 0.00000, 0.28235),
(0.98824, 0.14118, 0.28235),
(0.98824, 0.28235, 0.28235),
(0.98824, 0.42353, 0.28235),
(0.98824, 0.56471, 0.28235),
(0.98824, 0.70588, 0.28235),
(0.98824, 0.84706, 0.28235),
(0.98824, 0.98824, 0.28235),
(0.98824, 0.00000, 0.42353),
(0.98824, 0.14118, 0.42353),
(0.98824, 0.28235, 0.42353),
(0.98824, 0.42353, 0.42353),
(0.98824, 0.56471, 0.42353),
(0.98824, 0.70588, 0.42353),
(0.98824, 0.84706, 0.42353),
(0.98824, 0.98824, 0.42353),
(0.98824, 0.00000, 0.56471),
(0.98824, 0.14118, 0.56471),
(0.98824, 0.28235, 0.56471),
(0.98824, 0.42353, 0.56471),
(0.98824, 0.56471, 0.56471),
(0.98824, 0.70588, 0.56471),
(0.98824, 0.84706, 0.56471),
(0.98824, 0.98824, 0.56471),
(0.98824, 0.00000, 0.70588),
(0.98824, 0.14118, 0.70588),
(0.98824, 0.28235, 0.70588),
(0.98824, 0.42353, 0.70588),
(0.98824, 0.56471, 0.70588),
(0.98824, 0.70588, 0.70588),
(0.98824, 0.84706, 0.70588),
(0.98824, 0.98824, 0.70588),
(0.98824, 0.00000, 0.84706),
(0.98824, 0.14118, 0.84706),
(0.98824, 0.28235, 0.84706),
(0.98824, 0.42353, 0.84706),
(0.98824, 0.56471, 0.84706),
(0.98824, 0.70588, 0.84706),
(0.98824, 0.84706, 0.84706),
(0.98824, 0.98824, 0.84706),
(0.98824, 0.00000, 0.98824),
(0.98824, 0.14118, 0.98824),
(0.98824, 0.28235, 0.98824),
(0.98824, 0.42353, 0.98824),
(0.98824, 0.56471, 0.98824),
(0.98824, 0.70588, 0.98824),
(0.98824, 0.84706, 0.98824),
(0.98824, 0.98824, 0.98824),
)
cmap_color = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
)
cmap_standard = (
(0.00392, 0.00392, 0.33333),
(0.00784, 0.00784, 0.34118),
(0.01176, 0.01176, 0.34902),
(0.01569, 0.01569, 0.35686),
(0.01961, 0.01961, 0.36471),
(0.02353, 0.02353, 0.37255),
(0.02745, 0.02745, 0.38039),
(0.03137, 0.03137, 0.38824),
(0.03529, 0.03529, 0.39608),
(0.03922, 0.03922, 0.40392),
(0.04314, 0.04314, 0.41176),
(0.04706, 0.04706, 0.41961),
(0.05098, 0.05098, 0.42745),
(0.05490, 0.05490, 0.43529),
(0.05882, 0.05882, 0.44314),
(0.06275, 0.06275, 0.45098),
(0.06667, 0.06667, 0.45882),
(0.07059, 0.07059, 0.46667),
(0.07451, 0.07451, 0.47451),
(0.07843, 0.07843, 0.48235),
(0.08235, 0.08235, 0.49020),
(0.08627, 0.08627, 0.49804),
(0.09020, 0.09020, 0.50588),
(0.09412, 0.09412, 0.51373),
(0.09804, 0.09804, 0.52157),
(0.10196, 0.10196, 0.52941),
(0.10588, 0.10588, 0.53725),
(0.10980, 0.10980, 0.54510),
(0.11373, 0.11373, 0.55294),
(0.11765, 0.11765, 0.56078),
(0.12157, 0.12157, 0.56863),
(0.12549, 0.12549, 0.57647),
(0.12941, 0.12941, 0.58431),
(0.13333, 0.13333, 0.59216),
(0.13725, 0.13725, 0.60000),
(0.14118, 0.14118, 0.60784),
(0.14510, 0.14510, 0.61569),
(0.14902, 0.14902, 0.62353),
(0.15294, 0.15294, 0.63137),
(0.15686, 0.15686, 0.63922),
(0.16078, 0.16078, 0.64706),
(0.16471, 0.16471, 0.65490),
(0.16863, 0.16863, 0.66275),
(0.17255, 0.17255, 0.67059),
(0.17647, 0.17647, 0.67843),
(0.18039, 0.18039, 0.68627),
(0.18431, 0.18431, 0.69412),
(0.18824, 0.18824, 0.70196),
(0.19216, 0.19216, 0.70980),
(0.19608, 0.19608, 0.71765),
(0.20000, 0.20000, 0.72549),
(0.20392, 0.20392, 0.73333),
(0.20784, 0.20784, 0.74118),
(0.21176, 0.21176, 0.74902),
(0.21569, 0.21569, 0.75686),
(0.21961, 0.21961, 0.76471),
(0.22353, 0.22353, 0.77255),
(0.22745, 0.22745, 0.78039),
(0.23137, 0.23137, 0.78824),
(0.23529, 0.23529, 0.79608),
(0.23922, 0.23922, 0.80392),
(0.24314, 0.24314, 0.81176),
(0.24706, 0.24706, 0.81961),
(0.25098, 0.25098, 0.82745),
(0.25490, 0.25490, 0.83529),
(0.25882, 0.25882, 0.84314),
(0.26275, 0.26275, 0.85098),
(0.26667, 0.26667, 0.85882),
(0.27059, 0.27059, 0.86667),
(0.27451, 0.27451, 0.87451),
(0.27843, 0.27843, 0.88235),
(0.28235, 0.28235, 0.89020),
(0.28627, 0.28627, 0.89804),
(0.29020, 0.29020, 0.90588),
(0.29412, 0.29412, 0.91373),
(0.29804, 0.29804, 0.92157),
(0.30196, 0.30196, 0.92941),
(0.30588, 0.30588, 0.93725),
(0.30980, 0.30980, 0.94510),
(0.31373, 0.31373, 0.95294),
(0.31765, 0.31765, 0.96078),
(0.32157, 0.32157, 0.96863),
(0.32549, 0.32549, 0.97647),
(0.32941, 0.32941, 0.98431),
(0.33333, 0.33333, 0.99216),
(0.00392, 0.33333, 0.00392),
(0.00784, 0.34118, 0.00784),
(0.01176, 0.34902, 0.01176),
(0.01569, 0.35686, 0.01569),
(0.01961, 0.36471, 0.01961),
(0.02353, 0.37255, 0.02353),
(0.02745, 0.38039, 0.02745),
(0.03137, 0.38824, 0.03137),
(0.03529, 0.39608, 0.03529),
(0.03922, 0.40392, 0.03922),
(0.04314, 0.41176, 0.04314),
(0.04706, 0.41961, 0.04706),
(0.05098, 0.42745, 0.05098),
(0.05490, 0.43529, 0.05490),
(0.05882, 0.44314, 0.05882),
(0.06275, 0.45098, 0.06275),
(0.06667, 0.45882, 0.06667),
(0.07059, 0.46667, 0.07059),
(0.07451, 0.47451, 0.07451),
(0.07843, 0.48235, 0.07843),
(0.08235, 0.49020, 0.08235),
(0.08627, 0.49804, 0.08627),
(0.09020, 0.50588, 0.09020),
(0.09412, 0.51373, 0.09412),
(0.09804, 0.52157, 0.09804),
(0.10196, 0.52941, 0.10196),
(0.10588, 0.53725, 0.10588),
(0.10980, 0.54510, 0.10980),
(0.11373, 0.55294, 0.11373),
(0.11765, 0.56078, 0.11765),
(0.12157, 0.56863, 0.12157),
(0.12549, 0.57647, 0.12549),
(0.12941, 0.58431, 0.12941),
(0.13333, 0.59216, 0.13333),
(0.13725, 0.60000, 0.13725),
(0.14118, 0.60784, 0.14118),
(0.14510, 0.61569, 0.14510),
(0.14902, 0.62353, 0.14902),
(0.15294, 0.63137, 0.15294),
(0.15686, 0.63922, 0.15686),
(0.16078, 0.64706, 0.16078),
(0.16471, 0.65490, 0.16471),
(0.16863, 0.66275, 0.16863),
(0.17255, 0.67059, 0.17255),
(0.17647, 0.67843, 0.17647),
(0.18039, 0.68627, 0.18039),
(0.18431, 0.69412, 0.18431),
(0.18824, 0.70196, 0.18824),
(0.19216, 0.70980, 0.19216),
(0.19608, 0.71765, 0.19608),
(0.20000, 0.72549, 0.20000),
(0.20392, 0.73333, 0.20392),
(0.20784, 0.74118, 0.20784),
(0.21176, 0.74902, 0.21176),
(0.21569, 0.75686, 0.21569),
(0.21961, 0.76471, 0.21961),
(0.22353, 0.77255, 0.22353),
(0.22745, 0.78039, 0.22745),
(0.23137, 0.78824, 0.23137),
(0.23529, 0.79608, 0.23529),
(0.23922, 0.80392, 0.23922),
(0.24314, 0.81176, 0.24314),
(0.24706, 0.81961, 0.24706),
(0.25098, 0.82745, 0.25098),
(0.25490, 0.83529, 0.25490),
(0.25882, 0.84314, 0.25882),
(0.26275, 0.85098, 0.26275),
(0.26667, 0.85882, 0.26667),
(0.27059, 0.86667, 0.27059),
(0.27451, 0.87451, 0.27451),
(0.27843, 0.88235, 0.27843),
(0.28235, 0.89020, 0.28235),
(0.28627, 0.89804, 0.28627),
(0.29020, 0.90588, 0.29020),
(0.29412, 0.91373, 0.29412),
(0.29804, 0.92157, 0.29804),
(0.30196, 0.92941, 0.30196),
(0.30588, 0.93725, 0.30588),
(0.30980, 0.94510, 0.30980),
(0.31373, 0.95294, 0.31373),
(0.31765, 0.96078, 0.31765),
(0.32157, 0.96863, 0.32157),
(0.32549, 0.97647, 0.32549),
(0.32941, 0.98431, 0.32941),
(0.33333, 0.99216, 0.33333),
(0.33333, 0.00392, 0.00392),
(0.34118, 0.00784, 0.00784),
(0.34902, 0.01176, 0.01176),
(0.35686, 0.01569, 0.01569),
(0.36471, 0.01961, 0.01961),
(0.37255, 0.02353, 0.02353),
(0.38039, 0.02745, 0.02745),
(0.38824, 0.03137, 0.03137),
(0.39608, 0.03529, 0.03529),
(0.40392, 0.03922, 0.03922),
(0.41176, 0.04314, 0.04314),
(0.41961, 0.04706, 0.04706),
(0.42745, 0.05098, 0.05098),
(0.43529, 0.05490, 0.05490),
(0.44314, 0.05882, 0.05882),
(0.45098, 0.06275, 0.06275),
(0.45882, 0.06667, 0.06667),
(0.46667, 0.07059, 0.07059),
(0.47451, 0.07451, 0.07451),
(0.48235, 0.07843, 0.07843),
(0.49020, 0.08235, 0.08235),
(0.49804, 0.08627, 0.08627),
(0.50588, 0.09020, 0.09020),
(0.51373, 0.09412, 0.09412),
(0.52157, 0.09804, 0.09804),
(0.52941, 0.10196, 0.10196),
(0.53725, 0.10588, 0.10588),
(0.54510, 0.10980, 0.10980),
(0.55294, 0.11373, 0.11373),
(0.56078, 0.11765, 0.11765),
(0.56863, 0.12157, 0.12157),
(0.57647, 0.12549, 0.12549),
(0.58431, 0.12941, 0.12941),
(0.59216, 0.13333, 0.13333),
(0.60000, 0.13725, 0.13725),
(0.60784, 0.14118, 0.14118),
(0.61569, 0.14510, 0.14510),
(0.62353, 0.14902, 0.14902),
(0.63137, 0.15294, 0.15294),
(0.63922, 0.15686, 0.15686),
(0.64706, 0.16078, 0.16078),
(0.65490, 0.16471, 0.16471),
(0.66275, 0.16863, 0.16863),
(0.67059, 0.17255, 0.17255),
(0.67843, 0.17647, 0.17647),
(0.68627, 0.18039, 0.18039),
(0.69412, 0.18431, 0.18431),
(0.70196, 0.18824, 0.18824),
(0.70980, 0.19216, 0.19216),
(0.71765, 0.19608, 0.19608),
(0.72549, 0.20000, 0.20000),
(0.73333, 0.20392, 0.20392),
(0.74118, 0.20784, 0.20784),
(0.74902, 0.21176, 0.21176),
(0.75686, 0.21569, 0.21569),
(0.76471, 0.21961, 0.21961),
(0.77255, 0.22353, 0.22353),
(0.78039, 0.22745, 0.22745),
(0.78824, 0.23137, 0.23137),
(0.79608, 0.23529, 0.23529),
(0.80392, 0.23922, 0.23922),
(0.81176, 0.24314, 0.24314),
(0.81961, 0.24706, 0.24706),
(0.82745, 0.25098, 0.25098),
(0.83529, 0.25490, 0.25490),
(0.84314, 0.25882, 0.25882),
(0.85098, 0.26275, 0.26275),
(0.85882, 0.26667, 0.26667),
(0.86667, 0.27059, 0.27059),
(0.87451, 0.27451, 0.27451),
(0.88235, 0.27843, 0.27843),
(0.89020, 0.28235, 0.28235),
(0.89804, 0.28627, 0.28627),
(0.90588, 0.29020, 0.29020),
(0.91373, 0.29412, 0.29412),
(0.92157, 0.29804, 0.29804),
(0.92941, 0.30196, 0.30196),
(0.93725, 0.30588, 0.30588),
(0.94510, 0.30980, 0.30980),
(0.95294, 0.31373, 0.31373),
(0.96078, 0.31765, 0.31765),
(0.96863, 0.32157, 0.32157),
(0.97647, 0.32549, 0.32549),
(0.98431, 0.32941, 0.32941),
(0.99216, 0.33333, 0.33333),
(1.00000, 0.33725, 0.33725),
)
cmap_blulut = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00392),
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01176),
(0.00000, 0.00000, 0.01569),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.02353),
(0.00000, 0.00000, 0.02745),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.03529),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.04314),
(0.00000, 0.00000, 0.04706),
(0.00001, 0.00001, 0.05098),
(0.00001, 0.00001, 0.05490),
(0.00001, 0.00001, 0.05882),
(0.00002, 0.00002, 0.06275),
(0.00002, 0.00002, 0.06667),
(0.00002, 0.00002, 0.07059),
(0.00003, 0.00003, 0.07451),
(0.00004, 0.00004, 0.07843),
(0.00005, 0.00005, 0.08235),
(0.00006, 0.00006, 0.08627),
(0.00007, 0.00007, 0.09020),
(0.00008, 0.00008, 0.09412),
(0.00009, 0.00009, 0.09804),
(0.00011, 0.00011, 0.10196),
(0.00013, 0.00013, 0.10588),
(0.00015, 0.00015, 0.10980),
(0.00017, 0.00017, 0.11373),
(0.00019, 0.00019, 0.11765),
(0.00022, 0.00022, 0.12157),
(0.00025, 0.00025, 0.12549),
(0.00028, 0.00028, 0.12941),
(0.00032, 0.00032, 0.13333),
(0.00035, 0.00035, 0.13725),
(0.00040, 0.00040, 0.14118),
(0.00044, 0.00044, 0.14510),
(0.00049, 0.00049, 0.14902),
(0.00055, 0.00055, 0.15294),
(0.00061, 0.00061, 0.15686),
(0.00067, 0.00067, 0.16078),
(0.00074, 0.00074, 0.16471),
(0.00081, 0.00081, 0.16863),
(0.00089, 0.00089, 0.17255),
(0.00097, 0.00097, 0.17647),
(0.00106, 0.00106, 0.18039),
(0.00115, 0.00115, 0.18431),
(0.00126, 0.00126, 0.18824),
(0.00136, 0.00136, 0.19216),
(0.00148, 0.00148, 0.19608),
(0.00160, 0.00160, 0.20000),
(0.00173, 0.00173, 0.20392),
(0.00187, 0.00187, 0.20784),
(0.00201, 0.00201, 0.21176),
(0.00216, 0.00216, 0.21569),
(0.00233, 0.00233, 0.21961),
(0.00250, 0.00250, 0.22353),
(0.00268, 0.00268, 0.22745),
(0.00287, 0.00287, 0.23137),
(0.00307, 0.00307, 0.23529),
(0.00327, 0.00327, 0.23922),
(0.00349, 0.00349, 0.24314),
(0.00373, 0.00373, 0.24706),
(0.00397, 0.00397, 0.25098),
(0.00422, 0.00422, 0.25490),
(0.00449, 0.00449, 0.25882),
(0.00477, 0.00477, 0.26275),
(0.00506, 0.00506, 0.26667),
(0.00536, 0.00536, 0.27059),
(0.00568, 0.00568, 0.27451),
(0.00601, 0.00601, 0.27843),
(0.00636, 0.00636, 0.28235),
(0.00672, 0.00672, 0.28627),
(0.00709, 0.00709, 0.29020),
(0.00748, 0.00748, 0.29412),
(0.00789, 0.00789, 0.29804),
(0.00831, 0.00831, 0.30196),
(0.00875, 0.00875, 0.30588),
(0.00921, 0.00921, 0.30980),
(0.00969, 0.00969, 0.31373),
(0.01018, 0.01018, 0.31765),
(0.01069, 0.01069, 0.32157),
(0.01122, 0.01122, 0.32549),
(0.01177, 0.01177, 0.32941),
(0.01235, 0.01235, 0.33333),
(0.01294, 0.01294, 0.33725),
(0.01355, 0.01355, 0.34118),
(0.01418, 0.01418, 0.34510),
(0.01484, 0.01484, 0.34902),
(0.01552, 0.01552, 0.35294),
(0.01622, 0.01622, 0.35686),
(0.01694, 0.01694, 0.36078),
(0.01769, 0.01769, 0.36471),
(0.01847, 0.01847, 0.36863),
(0.01926, 0.01926, 0.37255),
(0.02009, 0.02009, 0.37647),
(0.02094, 0.02094, 0.38039),
(0.02181, 0.02181, 0.38431),
(0.02272, 0.02272, 0.38824),
(0.02365, 0.02365, 0.39216),
(0.02461, 0.02461, 0.39608),
(0.02560, 0.02560, 0.40000),
(0.02662, 0.02662, 0.40392),
(0.02767, 0.02767, 0.40784),
(0.02875, 0.02875, 0.41176),
(0.02986, 0.02986, 0.41569),
(0.03100, 0.03100, 0.41961),
(0.03218, 0.03218, 0.42353),
(0.03338, 0.03338, 0.42745),
(0.03463, 0.03463, 0.43137),
(0.03590, 0.03590, 0.43529),
(0.03721, 0.03721, 0.43922),
(0.03856, 0.03856, 0.44314),
(0.03994, 0.03994, 0.44706),
(0.04136, 0.04136, 0.45098),
(0.04282, 0.04282, 0.45490),
(0.04432, 0.04432, 0.45882),
(0.04585, 0.04585, 0.46275),
(0.04743, 0.04743, 0.46667),
(0.04904, 0.04904, 0.47059),
(0.05070, 0.05070, 0.47451),
(0.05239, 0.05239, 0.47843),
(0.05413, 0.05413, 0.48235),
(0.05591, 0.05591, 0.48627),
(0.05774, 0.05774, 0.49020),
(0.05961, 0.05961, 0.49412),
(0.06153, 0.06153, 0.49804),
(0.06349, 0.06349, 0.50196),
(0.06549, 0.06549, 0.50588),
(0.06755, 0.06755, 0.50980),
(0.06965, 0.06965, 0.51373),
(0.07180, 0.07180, 0.51765),
(0.07400, 0.07400, 0.52157),
(0.07625, 0.07625, 0.52549),
(0.07856, 0.07856, 0.52941),
(0.08091, 0.08091, 0.53333),
(0.08331, 0.08331, 0.53725),
(0.08577, 0.08577, 0.54118),
(0.08829, 0.08829, 0.54510),
(0.09086, 0.09086, 0.54902),
(0.09348, 0.09348, 0.55294),
(0.09616, 0.09616, 0.55686),
(0.09890, 0.09890, 0.56078),
(0.10169, 0.10169, 0.56471),
(0.10455, 0.10455, 0.56863),
(0.10746, 0.10746, 0.57255),
(0.11044, 0.11044, 0.57647),
(0.11347, 0.11347, 0.58039),
(0.11657, 0.11657, 0.58431),
(0.11973, 0.11973, 0.58824),
(0.12296, 0.12296, 0.59216),
(0.12624, 0.12624, 0.59608),
(0.12960, 0.12960, 0.60000),
(0.13302, 0.13302, 0.60392),
(0.13651, 0.13651, 0.60784),
(0.14007, 0.14007, 0.61176),
(0.14369, 0.14369, 0.61569),
(0.14739, 0.14739, 0.61961),
(0.15116, 0.15116, 0.62353),
(0.15500, 0.15500, 0.62745),
(0.15891, 0.15891, 0.63137),
(0.16289, 0.16289, 0.63529),
(0.16695, 0.16695, 0.63922),
(0.17109, 0.17109, 0.64314),
(0.17530, 0.17530, 0.64706),
(0.17959, 0.17959, 0.65098),
(0.18395, 0.18395, 0.65490),
(0.18840, 0.18840, 0.65882),
(0.19292, 0.19292, 0.66275),
(0.19753, 0.19753, 0.66667),
(0.20222, 0.20222, 0.67059),
(0.20699, 0.20699, 0.67451),
(0.21185, 0.21185, 0.67843),
(0.21679, 0.21679, 0.68235),
(0.22182, 0.22182, 0.68627),
(0.22693, 0.22693, 0.69020),
(0.23213, 0.23213, 0.69412),
(0.23742, 0.23742, 0.69804),
(0.24280, 0.24280, 0.70196),
(0.24827, 0.24827, 0.70588),
(0.25384, 0.25384, 0.70980),
(0.25949, 0.25949, 0.71373),
(0.26524, 0.26524, 0.71765),
(0.27109, 0.27109, 0.72157),
(0.27703, 0.27703, 0.72549),
(0.28307, 0.28307, 0.72941),
(0.28920, 0.28920, 0.73333),
(0.29544, 0.29544, 0.73725),
(0.30178, 0.30178, 0.74118),
(0.30821, 0.30821, 0.74510),
(0.31476, 0.31476, 0.74902),
(0.32140, 0.32140, 0.75294),
(0.32815, 0.32815, 0.75686),
(0.33500, 0.33500, 0.76078),
(0.34196, 0.34196, 0.76471),
(0.34903, 0.34903, 0.76863),
(0.35621, 0.35621, 0.77255),
(0.36350, 0.36350, 0.77647),
(0.37090, 0.37090, 0.78039),
(0.37841, 0.37841, 0.78431),
(0.38603, 0.38603, 0.78824),
(0.39377, 0.39377, 0.79216),
(0.40163, 0.40163, 0.79608),
(0.40960, 0.40960, 0.80000),
(0.41769, 0.41769, 0.80392),
(0.42590, 0.42590, 0.80784),
(0.43423, 0.43423, 0.81176),
(0.44268, 0.44268, 0.81569),
(0.45126, 0.45126, 0.81961),
(0.45996, 0.45996, 0.82353),
(0.46878, 0.46878, 0.82745),
(0.47773, 0.47773, 0.83137),
(0.48681, 0.48681, 0.83529),
(0.49601, 0.49601, 0.83922),
(0.50535, 0.50535, 0.84314),
(0.51482, 0.51482, 0.84706),
(0.52442, 0.52442, 0.85098),
(0.53415, 0.53415, 0.85490),
(0.54402, 0.54402, 0.85882),
(0.55403, 0.55403, 0.86275),
(0.56417, 0.56417, 0.86667),
(0.57445, 0.57445, 0.87059),
(0.58487, 0.58487, 0.87451),
(0.59543, 0.59543, 0.87843),
(0.60613, 0.60613, 0.88235),
(0.61698, 0.61698, 0.88627),
(0.62798, 0.62798, 0.89020),
(0.63911, 0.63911, 0.89412),
(0.65040, 0.65040, 0.89804),
(0.66184, 0.66184, 0.90196),
(0.67342, 0.67342, 0.90588),
(0.68516, 0.68516, 0.90980),
(0.69705, 0.69705, 0.91373),
(0.70909, 0.70909, 0.91765),
(0.72129, 0.72129, 0.92157),
(0.73365, 0.73365, 0.92549),
(0.74616, 0.74616, 0.92941),
(0.75883, 0.75883, 0.93333),
(0.77167, 0.77167, 0.93725),
(0.78466, 0.78466, 0.94118),
(0.79782, 0.79782, 0.94510),
(0.81115, 0.81115, 0.94902),
(0.82464, 0.82464, 0.95294),
(0.83830, 0.83830, 0.95686),
(0.85213, 0.85213, 0.96078),
(0.86612, 0.86612, 0.96471),
(0.88029, 0.88029, 0.96863),
(0.89464, 0.89464, 0.97255),
(0.90915, 0.90915, 0.97647),
(0.92385, 0.92385, 0.98039),
(0.93872, 0.93872, 0.98431),
(0.95377, 0.95377, 0.98824),
(0.96899, 0.96899, 0.99216),
(0.98441, 0.98441, 0.99608),
(1.00000, 1.00000, 1.00000),
)
cmap_green = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00392, 0.00000),
(0.00000, 0.00784, 0.00000),
(0.00000, 0.01176, 0.00000),
(0.00000, 0.01569, 0.00000),
(0.00000, 0.01961, 0.00000),
(0.00000, 0.02353, 0.00000),
(0.00000, 0.02745, 0.00000),
(0.00000, 0.03137, 0.00000),
(0.00000, 0.03529, 0.00000),
(0.00000, 0.03922, 0.00000),
(0.00000, 0.04314, 0.00000),
(0.00000, 0.04706, 0.00000),
(0.00000, 0.05098, 0.00000),
(0.00000, 0.05490, 0.00000),
(0.00000, 0.05882, 0.00000),
(0.00000, 0.06275, 0.00000),
(0.00000, 0.06667, 0.00000),
(0.00000, 0.07059, 0.00000),
(0.00000, 0.07451, 0.00000),
(0.00000, 0.07843, 0.00000),
(0.00000, 0.08235, 0.00000),
(0.00000, 0.08627, 0.00000),
(0.00000, 0.09020, 0.00000),
(0.00000, 0.09412, 0.00000),
(0.00000, 0.09804, 0.00000),
(0.00000, 0.10196, 0.00000),
(0.00000, 0.10588, 0.00000),
(0.00000, 0.10980, 0.00000),
(0.00000, 0.11373, 0.00000),
(0.00000, 0.11765, 0.00000),
(0.00000, 0.12157, 0.00000),
(0.00000, 0.12549, 0.00000),
(0.00000, 0.12941, 0.00000),
(0.00000, 0.13333, 0.00000),
(0.00000, 0.13725, 0.00000),
(0.00000, 0.14118, 0.00000),
(0.00000, 0.14510, 0.00000),
(0.00000, 0.14902, 0.00000),
(0.00000, 0.15294, 0.00000),
(0.00000, 0.15686, 0.00000),
(0.00000, 0.16078, 0.00000),
(0.00000, 0.16471, 0.00000),
(0.00000, 0.16863, 0.00000),
(0.00000, 0.17255, 0.00000),
(0.00000, 0.17647, 0.00000),
(0.00000, 0.18039, 0.00000),
(0.00000, 0.18431, 0.00000),
(0.00000, 0.18824, 0.00000),
(0.00000, 0.19216, 0.00000),
(0.00000, 0.19608, 0.00000),
(0.00000, 0.20000, 0.00000),
(0.00000, 0.20392, 0.00000),
(0.00000, 0.20784, 0.00000),
(0.00000, 0.21176, 0.00000),
(0.00000, 0.21569, 0.00000),
(0.00000, 0.21961, 0.00000),
(0.00000, 0.22353, 0.00000),
(0.00000, 0.22745, 0.00000),
(0.00000, 0.23137, 0.00000),
(0.00000, 0.23529, 0.00000),
(0.00000, 0.23922, 0.00000),
(0.00000, 0.24314, 0.00000),
(0.00000, 0.24706, 0.00000),
(0.00000, 0.25098, 0.00000),
(0.00000, 0.25490, 0.00000),
(0.00000, 0.25882, 0.00000),
(0.00000, 0.26275, 0.00000),
(0.00000, 0.26667, 0.00000),
(0.00000, 0.27059, 0.00000),
(0.00000, 0.27451, 0.00000),
(0.00000, 0.27843, 0.00000),
(0.00000, 0.28235, 0.00000),
(0.00000, 0.28627, 0.00000),
(0.00000, 0.29020, 0.00000),
(0.00000, 0.29412, 0.00000),
(0.00000, 0.29804, 0.00000),
(0.00000, 0.30196, 0.00000),
(0.00000, 0.30588, 0.00000),
(0.00000, 0.30980, 0.00000),
(0.00000, 0.31373, 0.00000),
(0.00000, 0.31765, 0.00000),
(0.00000, 0.32157, 0.00000),
(0.00000, 0.32549, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.33333, 0.00000),
(0.00000, 0.33725, 0.00000),
(0.00000, 0.34118, 0.00000),
(0.00000, 0.34510, 0.00000),
(0.00000, 0.34902, 0.00000),
(0.00000, 0.35294, 0.00000),
(0.00000, 0.35686, 0.00000),
(0.00000, 0.36078, 0.00000),
(0.00000, 0.36471, 0.00000),
(0.00000, 0.36863, 0.00000),
(0.00000, 0.37255, 0.00000),
(0.00000, 0.37647, 0.00000),
(0.00000, 0.38039, 0.00000),
(0.00000, 0.38431, 0.00000),
(0.00000, 0.38824, 0.00000),
(0.00000, 0.39216, 0.00000),
(0.00000, 0.39608, 0.00000),
(0.00000, 0.40000, 0.00000),
(0.00000, 0.40392, 0.00000),
(0.00000, 0.40784, 0.00000),
(0.00000, 0.41176, 0.00000),
(0.00000, 0.41569, 0.00000),
(0.00000, 0.41961, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.42745, 0.00000),
(0.00000, 0.43137, 0.00000),
(0.00000, 0.43529, 0.00000),
(0.00000, 0.43922, 0.00000),
(0.00000, 0.44314, 0.00000),
(0.00000, 0.44706, 0.00000),
(0.00000, 0.45098, 0.00000),
(0.00000, 0.45490, 0.00000),
(0.00000, 0.45882, 0.00000),
(0.00000, 0.46275, 0.00000),
(0.00000, 0.46667, 0.00000),
(0.00000, 0.47059, 0.00000),
(0.00000, 0.47451, 0.00000),
(0.00000, 0.47843, 0.00000),
(0.00000, 0.48235, 0.00000),
(0.00000, 0.48627, 0.00000),
(0.00000, 0.49020, 0.00000),
(0.00000, 0.49412, 0.00000),
(0.00000, 0.49804, 0.00000),
(0.00000, 0.50196, 0.00000),
(0.00000, 0.50588, 0.00000),
(0.00000, 0.50980, 0.00000),
(0.00000, 0.51373, 0.00000),
(0.00000, 0.51765, 0.00000),
(0.00000, 0.52157, 0.00000),
(0.00000, 0.52549, 0.00000),
(0.00000, 0.52941, 0.00000),
(0.00000, 0.53333, 0.00000),
(0.00000, 0.53725, 0.00000),
(0.00000, 0.54118, 0.00000),
(0.00000, 0.54510, 0.00000),
(0.00000, 0.54902, 0.00000),
(0.00000, 0.55294, 0.00000),
(0.00000, 0.55686, 0.00000),
(0.00000, 0.56078, 0.00000),
(0.00000, 0.56471, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.57255, 0.00000),
(0.00000, 0.57647, 0.00000),
(0.00000, 0.58039, 0.00000),
(0.00000, 0.58431, 0.00000),
(0.00000, 0.58824, 0.00000),
(0.00000, 0.59216, 0.00000),
(0.00000, 0.59608, 0.00000),
(0.00000, 0.60000, 0.00000),
(0.00000, 0.60392, 0.00000),
(0.00000, 0.60784, 0.00000),
(0.00000, 0.61176, 0.00000),
(0.00000, 0.61569, 0.00000),
(0.00000, 0.61961, 0.00000),
(0.00000, 0.62353, 0.00000),
(0.00000, 0.62745, 0.00000),
(0.00000, 0.63137, 0.00000),
(0.00000, 0.63529, 0.00000),
(0.00000, 0.63922, 0.00000),
(0.00000, 0.64314, 0.00000),
(0.00000, 0.64706, 0.00000),
(0.00000, 0.65098, 0.00000),
(0.00000, 0.65490, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.66275, 0.00000),
(0.00000, 0.66667, 0.00000),
(0.00000, 0.67059, 0.00000),
(0.00000, 0.67451, 0.00000),
(0.00000, 0.67843, 0.00000),
(0.00000, 0.68235, 0.00000),
(0.00000, 0.68627, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69412, 0.00000),
(0.00000, 0.69804, 0.00000),
(0.00000, 0.70196, 0.00000),
(0.00000, 0.70588, 0.00000),
(0.00000, 0.70980, 0.00000),
(0.00000, 0.71373, 0.00000),
(0.00000, 0.71765, 0.00000),
(0.00000, 0.72157, 0.00000),
(0.00000, 0.72549, 0.00000),
(0.00000, 0.72941, 0.00000),
(0.00000, 0.73333, 0.00000),
(0.00000, 0.73725, 0.00000),
(0.00000, 0.74118, 0.00000),
(0.00000, 0.74510, 0.00000),
(0.00000, 0.74902, 0.00000),
(0.00000, 0.75294, 0.00000),
(0.00000, 0.75686, 0.00000),
(0.00000, 0.76078, 0.00000),
(0.00000, 0.76471, 0.00000),
(0.00000, 0.76863, 0.00000),
(0.00000, 0.77255, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.78039, 0.00000),
(0.00000, 0.78431, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.79216, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.80000, 0.00000),
(0.00000, 0.80392, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.81176, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.81961, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82745, 0.00000),
(0.00000, 0.83137, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.83922, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.85098, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.85882, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.86667, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.87451, 0.00000),
(0.00000, 0.87843, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88627, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.89412, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.90196, 0.00000),
(0.00000, 0.90588, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.91373, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.92157, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.92941, 0.00000),
(0.00000, 0.93333, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.94118, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.94902, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.95686, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96863, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97647, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.98431, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 0.99216, 0.00000),
(0.00000, 0.99608, 0.00392),
(0.00000, 1.00000, 0.00784),
)
cmap_staircase = (
(0.00392, 0.00392, 0.31373), # noqa
(0.00784, 0.00784, 0.31373),
(0.01176, 0.01176, 0.31373),
(0.01569, 0.01569, 0.31373),
(0.01961, 0.01961, 0.31373),
(0.02353, 0.02353, 0.31373),
(0.02745, 0.02745, 0.31373),
(0.03137, 0.03137, 0.31373),
(0.03529, 0.03529, 0.31373),
(0.03922, 0.03922, 0.31373),
(0.04314, 0.04314, 0.31373),
(0.04706, 0.04706, 0.31373),
(0.05098, 0.05098, 0.31373),
(0.05490, 0.05490, 0.31373),
(0.05882, 0.05882, 0.31373),
(0.06275, 0.06275, 0.31373),
(0.06667, 0.06667, 0.47059),
(0.07059, 0.07059, 0.47059),
(0.07451, 0.07451, 0.47059),
(0.07843, 0.07843, 0.47059),
(0.08235, 0.08235, 0.47059),
(0.08627, 0.08627, 0.47059),
(0.09020, 0.09020, 0.47059),
(0.09412, 0.09412, 0.47059),
(0.09804, 0.09804, 0.47059),
(0.10196, 0.10196, 0.47059),
(0.10588, 0.10588, 0.47059),
(0.10980, 0.10980, 0.47059),
(0.11373, 0.11373, 0.47059),
(0.11765, 0.11765, 0.47059),
(0.12157, 0.12157, 0.47059),
(0.12549, 0.12549, 0.47059),
(0.12941, 0.12941, 0.62745),
(0.13333, 0.13333, 0.62745),
(0.13725, 0.13725, 0.62745),
(0.14118, 0.14118, 0.62745),
(0.14510, 0.14510, 0.62745),
(0.14902, 0.14902, 0.62745),
(0.15294, 0.15294, 0.62745),
(0.15686, 0.15686, 0.62745),
(0.16078, 0.16078, 0.62745),
(0.16471, 0.16471, 0.62745),
(0.16863, 0.16863, 0.62745),
(0.17255, 0.17255, 0.62745),
(0.17647, 0.17647, 0.62745),
(0.18039, 0.18039, 0.62745),
(0.18431, 0.18431, 0.62745),
(0.18824, 0.18824, 0.62745),
(0.19216, 0.19216, 0.78431),
(0.19608, 0.19608, 0.78431),
(0.20000, 0.20000, 0.78431),
(0.20392, 0.20392, 0.78431),
(0.20784, 0.20784, 0.78431),
(0.21176, 0.21176, 0.78431),
(0.21569, 0.21569, 0.78431),
(0.21961, 0.21961, 0.78431),
(0.22353, 0.22353, 0.78431),
(0.22745, 0.22745, 0.78431),
(0.23137, 0.23137, 0.78431),
(0.23529, 0.23529, 0.78431),
(0.23922, 0.23922, 0.78431),
(0.24314, 0.24314, 0.78431),
(0.24706, 0.24706, 0.78431),
(0.25098, 0.25098, 0.78431),
(0.25490, 0.25490, 0.94118),
(0.25882, 0.25882, 0.94118),
(0.26275, 0.26275, 0.94118),
(0.26667, 0.26667, 0.94118),
(0.27059, 0.27059, 0.94118),
(0.27451, 0.27451, 0.94118),
(0.27843, 0.27843, 0.94118),
(0.28235, 0.28235, 0.94118),
(0.28627, 0.28627, 0.94118),
(0.29020, 0.29020, 0.94118),
(0.29412, 0.29412, 0.94118),
(0.29804, 0.29804, 0.94118),
(0.30196, 0.30196, 0.94118),
(0.30588, 0.30588, 0.94118),
(0.30980, 0.30980, 0.94118),
(0.31373, 0.31373, 0.94118),
(0.31765, 0.31765, 0.95294),
(0.32157, 0.32157, 0.96471),
(0.32549, 0.32549, 0.97647),
(0.32941, 0.32941, 0.98824),
(0.33333, 0.33333, 1.00000),
(0.00392, 0.31373, 0.00392),
(0.00784, 0.31373, 0.00784),
(0.01176, 0.31373, 0.01176),
(0.01569, 0.31373, 0.01569),
(0.01961, 0.31373, 0.01961),
(0.02353, 0.31373, 0.02353),
(0.02745, 0.31373, 0.02745),
(0.03137, 0.31373, 0.03137),
(0.03529, 0.31373, 0.03529),
(0.03922, 0.31373, 0.03922),
(0.04314, 0.31373, 0.04314),
(0.04706, 0.31373, 0.04706),
(0.05098, 0.31373, 0.05098),
(0.05490, 0.31373, 0.05490),
(0.05882, 0.31373, 0.05882),
(0.06275, 0.31373, 0.06275),
(0.06667, 0.47059, 0.06667),
(0.07059, 0.47059, 0.07059),
(0.07451, 0.47059, 0.07451),
(0.07843, 0.47059, 0.07843),
(0.08235, 0.47059, 0.08235),
(0.08627, 0.47059, 0.08627),
(0.09020, 0.47059, 0.09020),
(0.09412, 0.47059, 0.09412),
(0.09804, 0.47059, 0.09804),
(0.10196, 0.47059, 0.10196),
(0.10588, 0.47059, 0.10588),
(0.10980, 0.47059, 0.10980),
(0.11373, 0.47059, 0.11373),
(0.11765, 0.47059, 0.11765),
(0.12157, 0.47059, 0.12157),
(0.12549, 0.47059, 0.12549),
(0.12941, 0.62745, 0.12941),
(0.13333, 0.62745, 0.13333),
(0.13725, 0.62745, 0.13725),
(0.14118, 0.62745, 0.14118),
(0.14510, 0.62745, 0.14510),
(0.14902, 0.62745, 0.14902),
(0.15294, 0.62745, 0.15294),
(0.15686, 0.62745, 0.15686),
(0.16078, 0.62745, 0.16078),
(0.16471, 0.62745, 0.16471),
(0.16863, 0.62745, 0.16863),
(0.17255, 0.62745, 0.17255),
(0.17647, 0.62745, 0.17647),
(0.18039, 0.62745, 0.18039),
(0.18431, 0.62745, 0.18431),
(0.18824, 0.62745, 0.18824),
(0.19216, 0.78431, 0.19216),
(0.19608, 0.78431, 0.19608),
(0.20000, 0.78431, 0.20000),
(0.20392, 0.78431, 0.20392),
(0.20784, 0.78431, 0.20784),
(0.21176, 0.78431, 0.21176),
(0.21569, 0.78431, 0.21569),
(0.21961, 0.78431, 0.21961),
(0.22353, 0.78431, 0.22353),
(0.22745, 0.78431, 0.22745),
(0.23137, 0.78431, 0.23137),
(0.23529, 0.78431, 0.23529),
(0.23922, 0.78431, 0.23922),
(0.24314, 0.78431, 0.24314),
(0.24706, 0.78431, 0.24706),
(0.25098, 0.78431, 0.25098),
(0.25490, 0.94118, 0.25490),
(0.25882, 0.94118, 0.25882),
(0.26275, 0.94118, 0.26275),
(0.26667, 0.94118, 0.26667),
(0.27059, 0.94118, 0.27059),
(0.27451, 0.94118, 0.27451),
(0.27843, 0.94118, 0.27843),
(0.28235, 0.94118, 0.28235),
(0.28627, 0.94118, 0.28627),
(0.29020, 0.94118, 0.29020),
(0.29412, 0.94118, 0.29412),
(0.29804, 0.94118, 0.29804),
(0.30196, 0.94118, 0.30196),
(0.30588, 0.94118, 0.30588),
(0.30980, 0.94118, 0.30980),
(0.31373, 0.94118, 0.31373),
(0.31765, 0.95294, 0.31765),
(0.32157, 0.96471, 0.32157),
(0.32549, 0.97647, 0.32549),
(0.32941, 0.98824, 0.32941),
(0.33333, 1.00000, 0.33333),
(0.31373, 0.00392, 0.00392),
(0.31373, 0.00784, 0.00784),
(0.31373, 0.01176, 0.01176),
(0.31373, 0.01569, 0.01569),
(0.31373, 0.01961, 0.01961),
(0.31373, 0.02353, 0.02353),
(0.31373, 0.02745, 0.02745),
(0.31373, 0.03137, 0.03137),
(0.31373, 0.03529, 0.03529),
(0.31373, 0.03922, 0.03922),
(0.31373, 0.04314, 0.04314),
(0.31373, 0.04706, 0.04706),
(0.31373, 0.05098, 0.05098),
(0.31373, 0.05490, 0.05490),
(0.31373, 0.05882, 0.05882),
(0.31373, 0.06275, 0.06275),
(0.47059, 0.06667, 0.06667),
(0.47059, 0.07059, 0.07059),
(0.47059, 0.07451, 0.07451),
(0.47059, 0.07843, 0.07843),
(0.47059, 0.08235, 0.08235),
(0.47059, 0.08627, 0.08627),
(0.47059, 0.09020, 0.09020),
(0.47059, 0.09412, 0.09412),
(0.47059, 0.09804, 0.09804),
(0.47059, 0.10196, 0.10196),
(0.47059, 0.10588, 0.10588),
(0.47059, 0.10980, 0.10980),
(0.47059, 0.11373, 0.11373),
(0.47059, 0.11765, 0.11765),
(0.47059, 0.12157, 0.12157),
(0.47059, 0.12549, 0.12549),
(0.62745, 0.12941, 0.12941),
(0.62745, 0.13333, 0.13333),
(0.62745, 0.13725, 0.13725),
(0.62745, 0.14118, 0.14118),
(0.62745, 0.14510, 0.14510),
(0.62745, 0.14902, 0.14902),
(0.62745, 0.15294, 0.15294),
(0.62745, 0.15686, 0.15686),
(0.62745, 0.16078, 0.16078),
(0.62745, 0.16471, 0.16471),
(0.62745, 0.16863, 0.16863),
(0.62745, 0.17255, 0.17255),
(0.62745, 0.17647, 0.17647),
(0.62745, 0.18039, 0.18039),
(0.62745, 0.18431, 0.18431),
(0.62745, 0.18824, 0.18824),
(0.78431, 0.19216, 0.19216),
(0.78431, 0.19608, 0.19608),
(0.78431, 0.20000, 0.20000),
(0.78431, 0.20392, 0.20392),
(0.78431, 0.20784, 0.20784),
(0.78431, 0.21176, 0.21176),
(0.78431, 0.21569, 0.21569),
(0.78431, 0.21961, 0.21961),
(0.78431, 0.22353, 0.22353),
(0.78431, 0.22745, 0.22745),
(0.78431, 0.23137, 0.23137),
(0.78431, 0.23529, 0.23529),
(0.78431, 0.23922, 0.23922),
(0.78431, 0.24314, 0.24314),
(0.78431, 0.24706, 0.24706),
(0.78431, 0.25098, 0.25098),
(0.94118, 0.25490, 0.25490),
(0.94118, 0.25882, 0.25882),
(0.94118, 0.26275, 0.26275),
(0.94118, 0.26667, 0.26667),
(0.94118, 0.27059, 0.27059),
(0.94118, 0.27451, 0.27451),
(0.94118, 0.27843, 0.27843),
(0.94118, 0.28235, 0.28235),
(0.94118, 0.28627, 0.28627),
(0.94118, 0.29020, 0.29020),
(0.94118, 0.29412, 0.29412),
(0.94118, 0.29804, 0.29804),
(0.94118, 0.30196, 0.30196),
(0.94118, 0.30588, 0.30588),
(0.94118, 0.30980, 0.30980),
(0.94118, 0.31373, 0.31373),
(0.94902, 0.39216, 0.39216),
(0.96078, 0.52941, 0.52941),
(0.97255, 0.66667, 0.66667),
(0.98431, 0.80392, 0.80392),
(0.99216, 0.80000, 0.80000),
(1.00000, 1.00000, 1.00000),
)
cmap_random = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98431, 0.81176, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.76863, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.89804, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(0.92157, 0.61961, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.65882, 0.59608, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.26275, 0.87843, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.74118),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.36078, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.98431, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.97647, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98824, 0.77647, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.36863, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.46667),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(0.91373, 0.00000, 0.97255),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.53333, 0.00000, 0.87451),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.80392),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
)
cmap_blue = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00000, 0.00392),
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01176),
(0.00000, 0.00000, 0.01569),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.02353),
(0.00000, 0.00000, 0.02745),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.03529),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.04314),
(0.00000, 0.00000, 0.04706),
(0.00000, 0.00000, 0.05098),
(0.00000, 0.00000, 0.05490),
(0.00000, 0.00000, 0.05882),
(0.00000, 0.00000, 0.06275),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.07059),
(0.00000, 0.00000, 0.07451),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.08235),
(0.00000, 0.00000, 0.08627),
(0.00000, 0.00000, 0.09020),
(0.00000, 0.00000, 0.09412),
(0.00000, 0.00000, 0.09804),
(0.00000, 0.00000, 0.10196),
(0.00000, 0.00000, 0.10588),
(0.00000, 0.00000, 0.10980),
(0.00000, 0.00000, 0.11373),
(0.00000, 0.00000, 0.11765),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.12549),
(0.00000, 0.00000, 0.12941),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.14118),
(0.00000, 0.00000, 0.14510),
(0.00000, 0.00000, 0.14902),
(0.00000, 0.00000, 0.15294),
(0.00000, 0.00000, 0.15686),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16863),
(0.00000, 0.00000, 0.17255),
(0.00000, 0.00000, 0.17647),
(0.00000, 0.00000, 0.18039),
(0.00000, 0.00000, 0.18431),
(0.00000, 0.00000, 0.18824),
(0.00000, 0.00000, 0.19216),
(0.00000, 0.00000, 0.19608),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20392),
(0.00000, 0.00000, 0.20784),
(0.00000, 0.00000, 0.21176),
(0.00000, 0.00000, 0.21569),
(0.00000, 0.00000, 0.21961),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.22745),
(0.00000, 0.00000, 0.23137),
(0.00000, 0.00000, 0.23529),
(0.00000, 0.00000, 0.23922),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.24706),
(0.00000, 0.00000, 0.25098),
(0.00000, 0.00000, 0.25490),
(0.00000, 0.00000, 0.25882),
(0.00000, 0.00000, 0.26275),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.27059),
(0.00000, 0.00000, 0.27451),
(0.00000, 0.00000, 0.27843),
(0.00000, 0.00000, 0.28235),
(0.00000, 0.00000, 0.28627),
(0.00000, 0.00000, 0.29020),
(0.00000, 0.00000, 0.29412),
(0.00000, 0.00000, 0.29804),
(0.00000, 0.00000, 0.30196),
(0.00000, 0.00000, 0.30588),
(0.00000, 0.00000, 0.30980),
(0.00000, 0.00000, 0.31373),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.32157),
(0.00000, 0.00000, 0.32549),
(0.00000, 0.00000, 0.32941),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33725),
(0.00000, 0.00000, 0.34118),
(0.00000, 0.00000, 0.34510),
(0.00000, 0.00000, 0.34902),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.35686),
(0.00000, 0.00000, 0.36078),
(0.00000, 0.00000, 0.36471),
(0.00000, 0.00000, 0.36863),
(0.00000, 0.00000, 0.37255),
(0.00000, 0.00000, 0.37647),
(0.00000, 0.00000, 0.38039),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.38824),
(0.00000, 0.00000, 0.39216),
(0.00000, 0.00000, 0.39608),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40392),
(0.00000, 0.00000, 0.40784),
(0.00000, 0.00000, 0.41176),
(0.00000, 0.00000, 0.41569),
(0.00000, 0.00000, 0.41961),
(0.00000, 0.00000, 0.42353),
(0.00000, 0.00000, 0.42745),
(0.00000, 0.00000, 0.43137),
(0.00000, 0.00000, 0.43529),
(0.00000, 0.00000, 0.43922),
(0.00000, 0.00000, 0.44314),
(0.00000, 0.00000, 0.44706),
(0.00000, 0.00000, 0.45098),
(0.00000, 0.00000, 0.45490),
(0.00000, 0.00000, 0.45882),
(0.00000, 0.00000, 0.46275),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47451),
(0.00000, 0.00000, 0.47843),
(0.00000, 0.00000, 0.48235),
(0.00000, 0.00000, 0.48627),
(0.00000, 0.00000, 0.49020),
(0.00000, 0.00000, 0.49412),
(0.00000, 0.00000, 0.49804),
(0.00000, 0.00000, 0.50196),
(0.00000, 0.00000, 0.50588),
(0.00000, 0.00000, 0.50980),
(0.00000, 0.00000, 0.51373),
(0.00000, 0.00000, 0.51765),
(0.00000, 0.00000, 0.52157),
(0.00000, 0.00000, 0.52549),
(0.00000, 0.00000, 0.52941),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53725),
(0.00000, 0.00000, 0.54118),
(0.00000, 0.00000, 0.54510),
(0.00000, 0.00000, 0.54902),
(0.00000, 0.00000, 0.55294),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.56078),
(0.00000, 0.00000, 0.56471),
(0.00000, 0.00000, 0.56863),
(0.00000, 0.00000, 0.57255),
(0.00000, 0.00000, 0.57647),
(0.00000, 0.00000, 0.58039),
(0.00000, 0.00000, 0.58431),
(0.00000, 0.00000, 0.58824),
(0.00000, 0.00000, 0.59216),
(0.00000, 0.00000, 0.59608),
(0.00000, 0.00000, 0.60000),
(0.00000, 0.00000, 0.60392),
(0.00000, 0.00000, 0.60784),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.61569),
(0.00000, 0.00000, 0.61961),
(0.00000, 0.00000, 0.62353),
(0.00000, 0.00000, 0.62745),
(0.00000, 0.00000, 0.63137),
(0.00000, 0.00000, 0.63529),
(0.00000, 0.00000, 0.63922),
(0.00000, 0.00000, 0.64314),
(0.00000, 0.00000, 0.64706),
(0.00000, 0.00000, 0.65098),
(0.00000, 0.00000, 0.65490),
(0.00000, 0.00000, 0.65882),
(0.00000, 0.00000, 0.66275),
(0.00000, 0.00000, 0.66667),
(0.00000, 0.00000, 0.67059),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.67843),
(0.00000, 0.00000, 0.68235),
(0.00000, 0.00000, 0.68627),
(0.00000, 0.00000, 0.69020),
(0.00000, 0.00000, 0.69412),
(0.00000, 0.00000, 0.69804),
(0.00000, 0.00000, 0.70196),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70980),
(0.00000, 0.00000, 0.71373),
(0.00000, 0.00000, 0.71765),
(0.00000, 0.00000, 0.72157),
(0.00000, 0.00000, 0.72549),
(0.00000, 0.00000, 0.72941),
(0.00000, 0.00000, 0.73333),
(0.00000, 0.00000, 0.73725),
(0.00000, 0.00000, 0.74118),
(0.00000, 0.00000, 0.74510),
(0.00000, 0.00000, 0.74902),
(0.00000, 0.00000, 0.75294),
(0.00000, 0.00000, 0.75686),
(0.00000, 0.00000, 0.76078),
(0.00000, 0.00000, 0.76471),
(0.00000, 0.00000, 0.76863),
(0.00000, 0.00000, 0.77255),
(0.00000, 0.00000, 0.77647),
(0.00000, 0.00000, 0.78039),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78824),
(0.00000, 0.00000, 0.79216),
(0.00000, 0.00000, 0.79608),
(0.00000, 0.00000, 0.80000),
(0.00000, 0.00000, 0.80392),
(0.00000, 0.00000, 0.80784),
(0.00000, 0.00000, 0.81176),
(0.00000, 0.00000, 0.81569),
(0.00000, 0.00000, 0.81961),
(0.00000, 0.00000, 0.82353),
(0.00000, 0.00000, 0.82745),
(0.00000, 0.00000, 0.83137),
(0.00000, 0.00000, 0.83529),
(0.00000, 0.00000, 0.83922),
(0.00000, 0.00000, 0.84314),
(0.00000, 0.00000, 0.84706),
(0.00000, 0.00000, 0.85098),
(0.00000, 0.00000, 0.85490),
(0.00000, 0.00000, 0.85882),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86667),
(0.00000, 0.00000, 0.87059),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.87843),
(0.00000, 0.00000, 0.88235),
(0.00000, 0.00000, 0.88627),
(0.00000, 0.00000, 0.89020),
(0.00000, 0.00000, 0.89412),
(0.00000, 0.00000, 0.89804),
(0.00000, 0.00000, 0.90196),
(0.00000, 0.00000, 0.90588),
(0.00000, 0.00000, 0.90980),
(0.00000, 0.00000, 0.91373),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.92157),
(0.00000, 0.00000, 0.92549),
(0.00000, 0.00000, 0.92941),
(0.00000, 0.00000, 0.93333),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.94118),
(0.00000, 0.00000, 0.94510),
(0.00000, 0.00000, 0.94902),
(0.00000, 0.00000, 0.95294),
(0.00000, 0.00000, 0.95686),
(0.00000, 0.00000, 0.96078),
(0.00000, 0.00000, 0.96471),
(0.00000, 0.00000, 0.96863),
(0.00000, 0.00000, 0.97255),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.98039),
(0.00000, 0.00392, 0.98431),
(0.00000, 0.00784, 0.98824),
(0.00000, 0.01176, 0.99216),
(0.00000, 0.01569, 0.99608),
(0.00000, 0.00392, 1.00000),
)
cmap_red = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00392, 0.00000, 0.00000),
(0.00784, 0.00000, 0.00000),
(0.01176, 0.00000, 0.00000),
(0.01569, 0.00000, 0.00000),
(0.01961, 0.00000, 0.00000),
(0.02353, 0.00000, 0.00000),
(0.02745, 0.00000, 0.00000),
(0.03137, 0.00000, 0.00000),
(0.03529, 0.00000, 0.00000),
(0.03922, 0.00000, 0.00000),
(0.04314, 0.00000, 0.00000),
(0.04706, 0.00000, 0.00000),
(0.05098, 0.00000, 0.00000),
(0.05490, 0.00000, 0.00000),
(0.05882, 0.00000, 0.00000),
(0.06275, 0.00000, 0.00000),
(0.06667, 0.00000, 0.00000),
(0.07059, 0.00000, 0.00000),
(0.07451, 0.00000, 0.00000),
(0.07843, 0.00000, 0.00000),
(0.08235, 0.00000, 0.00000),
(0.08627, 0.00000, 0.00000),
(0.09020, 0.00000, 0.00000),
(0.09412, 0.00000, 0.00000),
(0.09804, 0.00000, 0.00000),
(0.10196, 0.00000, 0.00000),
(0.10588, 0.00000, 0.00000),
(0.10980, 0.00000, 0.00000),
(0.11373, 0.00000, 0.00000),
(0.11765, 0.00000, 0.00000),
(0.12157, 0.00000, 0.00000),
(0.12549, 0.00000, 0.00000),
(0.12941, 0.00000, 0.00000),
(0.13333, 0.00000, 0.00000),
(0.13725, 0.00000, 0.00000),
(0.14118, 0.00000, 0.00000),
(0.14510, 0.00000, 0.00000),
(0.14902, 0.00000, 0.00000),
(0.15294, 0.00000, 0.00000),
(0.15686, 0.00000, 0.00000),
(0.16078, 0.00000, 0.00000),
(0.16471, 0.00000, 0.00000),
(0.16863, 0.00000, 0.00000),
(0.17255, 0.00000, 0.00000),
(0.17647, 0.00000, 0.00000),
(0.18039, 0.00000, 0.00000),
(0.18431, 0.00000, 0.00000),
(0.18824, 0.00000, 0.00000),
(0.19216, 0.00000, 0.00000),
(0.19608, 0.00000, 0.00000),
(0.20000, 0.00000, 0.00000),
(0.20392, 0.00000, 0.00000),
(0.20784, 0.00000, 0.00000),
(0.21176, 0.00000, 0.00000),
(0.21569, 0.00000, 0.00000),
(0.21961, 0.00000, 0.00000),
(0.22353, 0.00000, 0.00000),
(0.22745, 0.00000, 0.00000),
(0.23137, 0.00000, 0.00000),
(0.23529, 0.00000, 0.00000),
(0.23922, 0.00000, 0.00000),
(0.24314, 0.00000, 0.00000),
(0.24706, 0.00000, 0.00000),
(0.25098, 0.00000, 0.00000),
(0.25490, 0.00000, 0.00000),
(0.25882, 0.00000, 0.00000),
(0.26275, 0.00000, 0.00000),
(0.26667, 0.00000, 0.00000),
(0.27059, 0.00000, 0.00000),
(0.27451, 0.00000, 0.00000),
(0.27843, 0.00000, 0.00000),
(0.28235, 0.00000, 0.00000),
(0.28627, 0.00000, 0.00000),
(0.29020, 0.00000, 0.00000),
(0.29412, 0.00000, 0.00000),
(0.29804, 0.00000, 0.00000),
(0.30196, 0.00000, 0.00000),
(0.30588, 0.00000, 0.00000),
(0.30980, 0.00000, 0.00000),
(0.31373, 0.00000, 0.00000),
(0.31765, 0.00000, 0.00000),
(0.32157, 0.00000, 0.00000),
(0.32549, 0.00000, 0.00000),
(0.32941, 0.00000, 0.00000),
(0.33333, 0.00000, 0.00000),
(0.33725, 0.00000, 0.00000),
(0.34118, 0.00000, 0.00000),
(0.34510, 0.00000, 0.00000),
(0.34902, 0.00000, 0.00000),
(0.35294, 0.00000, 0.00000),
(0.35686, 0.00000, 0.00000),
(0.36078, 0.00000, 0.00000),
(0.36471, 0.00000, 0.00000),
(0.36863, 0.00000, 0.00000),
(0.37255, 0.00000, 0.00000),
(0.37647, 0.00000, 0.00000),
(0.38039, 0.00000, 0.00000),
(0.38431, 0.00000, 0.00000),
(0.38824, 0.00000, 0.00000),
(0.39216, 0.00000, 0.00000),
(0.39608, 0.00000, 0.00000),
(0.40000, 0.00000, 0.00000),
(0.40392, 0.00000, 0.00000),
(0.40784, 0.00000, 0.00000),
(0.41176, 0.00000, 0.00000),
(0.41569, 0.00000, 0.00000),
(0.41961, 0.00000, 0.00000),
(0.42353, 0.00000, 0.00000),
(0.42745, 0.00000, 0.00000),
(0.43137, 0.00000, 0.00000),
(0.43529, 0.00000, 0.00000),
(0.43922, 0.00000, 0.00000),
(0.44314, 0.00000, 0.00000),
(0.44706, 0.00000, 0.00000),
(0.45098, 0.00000, 0.00000),
(0.45490, 0.00000, 0.00000),
(0.45882, 0.00000, 0.00000),
(0.46275, 0.00000, 0.00000),
(0.46667, 0.00000, 0.00000),
(0.47059, 0.00000, 0.00000),
(0.47451, 0.00000, 0.00000),
(0.47843, 0.00000, 0.00000),
(0.48235, 0.00000, 0.00000),
(0.48627, 0.00000, 0.00000),
(0.49020, 0.00000, 0.00000),
(0.49412, 0.00000, 0.00000),
(0.49804, 0.00000, 0.00000),
(0.50196, 0.00000, 0.00000),
(0.50588, 0.00000, 0.00000),
(0.50980, 0.00000, 0.00000),
(0.51373, 0.00000, 0.00000),
(0.51765, 0.00000, 0.00000),
(0.52157, 0.00000, 0.00000),
(0.52549, 0.00000, 0.00000),
(0.52941, 0.00000, 0.00000),
(0.53333, 0.00000, 0.00000),
(0.53725, 0.00000, 0.00000),
(0.54118, 0.00000, 0.00000),
(0.54510, 0.00000, 0.00000),
(0.54902, 0.00000, 0.00000),
(0.55294, 0.00000, 0.00000),
(0.55686, 0.00000, 0.00000),
(0.56078, 0.00000, 0.00000),
(0.56471, 0.00000, 0.00000),
(0.56863, 0.00000, 0.00000),
(0.57255, 0.00000, 0.00000),
(0.57647, 0.00000, 0.00000),
(0.58039, 0.00000, 0.00000),
(0.58431, 0.00000, 0.00000),
(0.58824, 0.00000, 0.00000),
(0.59216, 0.00000, 0.00000),
(0.59608, 0.00000, 0.00000),
(0.60000, 0.00000, 0.00000),
(0.60392, 0.00000, 0.00000),
(0.60784, 0.00000, 0.00000),
(0.61176, 0.00000, 0.00000),
(0.61569, 0.00000, 0.00000),
(0.61961, 0.00000, 0.00000),
(0.62353, 0.00000, 0.00000),
(0.62745, 0.00000, 0.00000),
(0.63137, 0.00000, 0.00000),
(0.63529, 0.00000, 0.00000),
(0.63922, 0.00000, 0.00000),
(0.64314, 0.00000, 0.00000),
(0.64706, 0.00000, 0.00000),
(0.65098, 0.00000, 0.00000),
(0.65490, 0.00000, 0.00000),
(0.65882, 0.00000, 0.00000),
(0.66275, 0.00000, 0.00000),
(0.66667, 0.00000, 0.00000),
(0.67059, 0.00000, 0.00000),
(0.67451, 0.00000, 0.00000),
(0.67843, 0.00000, 0.00000),
(0.68235, 0.00000, 0.00000),
(0.68627, 0.00000, 0.00000),
(0.69020, 0.00000, 0.00000),
(0.69412, 0.00000, 0.00000),
(0.69804, 0.00000, 0.00000),
(0.70196, 0.00000, 0.00000),
(0.70588, 0.00000, 0.00000),
(0.70980, 0.00000, 0.00000),
(0.71373, 0.00000, 0.00000),
(0.71765, 0.00000, 0.00000),
(0.72157, 0.00000, 0.00000),
(0.72549, 0.00000, 0.00000),
(0.72941, 0.00000, 0.00000),
(0.73333, 0.00000, 0.00000),
(0.73725, 0.00000, 0.00000),
(0.74118, 0.00000, 0.00000),
(0.74510, 0.00000, 0.00000),
(0.74902, 0.00000, 0.00000),
(0.75294, 0.00000, 0.00000),
(0.75686, 0.00000, 0.00000),
(0.76078, 0.00000, 0.00000),
(0.76471, 0.00000, 0.00000),
(0.76863, 0.00000, 0.00000),
(0.77255, 0.00000, 0.00000),
(0.77647, 0.00000, 0.00000),
(0.78039, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.79216, 0.00000, 0.00000),
(0.79608, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80392, 0.00000, 0.00000),
(0.80784, 0.00000, 0.00000),
(0.81176, 0.00000, 0.00000),
(0.81569, 0.00000, 0.00000),
(0.81961, 0.00000, 0.00000),
(0.82353, 0.00000, 0.00000),
(0.82745, 0.00000, 0.00000),
(0.83137, 0.00000, 0.00000),
(0.83529, 0.00000, 0.00000),
(0.83922, 0.00000, 0.00000),
(0.84314, 0.00000, 0.00000),
(0.84706, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85490, 0.00000, 0.00000),
(0.85882, 0.00000, 0.00000),
(0.86275, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.87059, 0.00000, 0.00000),
(0.87451, 0.00000, 0.00000),
(0.87843, 0.00000, 0.00000),
(0.88235, 0.00000, 0.00000),
(0.88627, 0.00000, 0.00000),
(0.89020, 0.00000, 0.00000),
(0.89412, 0.00000, 0.00000),
(0.89804, 0.00000, 0.00000),
(0.90196, 0.00000, 0.00000),
(0.90588, 0.00000, 0.00000),
(0.90980, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91765, 0.00000, 0.00000),
(0.92157, 0.00000, 0.00000),
(0.92549, 0.00000, 0.00000),
(0.92941, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.94118, 0.00000, 0.00000),
(0.94510, 0.00000, 0.00000),
(0.94902, 0.00000, 0.00000),
(0.95294, 0.00000, 0.00000),
(0.95686, 0.00000, 0.00000),
(0.96078, 0.00000, 0.00000),
(0.96471, 0.00000, 0.00000),
(0.96863, 0.00000, 0.00000),
(0.97255, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.98039, 0.00000, 0.00000),
(0.98431, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.99216, 0.00000, 0.00000),
(0.99608, 0.00000, 0.00392),
(1.00000, 0.00000, 0.00784),
)
cmap_aips0 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_stairs8 = (
(0.76471, 0.00000, 1.00000), # noqa
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_idl11 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.00392, 0.00392),
(0.00000, 0.00784, 0.00784),
(0.00000, 0.01176, 0.01176),
(0.00000, 0.01569, 0.01569),
(0.00000, 0.03137, 0.03137),
(0.00000, 0.04706, 0.04706),
(0.00000, 0.06275, 0.06275),
(0.00000, 0.08235, 0.08235),
(0.00000, 0.09804, 0.09804),
(0.00000, 0.11373, 0.11373),
(0.00000, 0.12941, 0.12941),
(0.00000, 0.14902, 0.14902),
(0.00000, 0.16471, 0.16471),
(0.00000, 0.18039, 0.18039),
(0.00000, 0.19608, 0.19608),
(0.00000, 0.21569, 0.21569),
(0.00000, 0.23137, 0.23137),
(0.00000, 0.24706, 0.24706),
(0.00000, 0.26275, 0.26275),
(0.00000, 0.28235, 0.28235),
(0.00000, 0.29804, 0.29804),
(0.00000, 0.31373, 0.31373),
(0.00000, 0.32941, 0.32941),
(0.00000, 0.34902, 0.34902),
(0.00000, 0.36471, 0.36471),
(0.00000, 0.38039, 0.38039),
(0.00000, 0.39608, 0.39608),
(0.00000, 0.41569, 0.41569),
(0.00000, 0.43137, 0.43137),
(0.00000, 0.44706, 0.44706),
(0.00000, 0.46275, 0.46275),
(0.00000, 0.48235, 0.48235),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.51373, 0.51373),
(0.00000, 0.52941, 0.52941),
(0.00000, 0.54902, 0.54902),
(0.00000, 0.56471, 0.56471),
(0.00000, 0.58039, 0.58039),
(0.00000, 0.59608, 0.59608),
(0.00000, 0.61569, 0.61569),
(0.00000, 0.63137, 0.63137),
(0.00000, 0.64706, 0.64706),
(0.00000, 0.66275, 0.66275),
(0.00000, 0.68235, 0.68235),
(0.00000, 0.69804, 0.69804),
(0.00000, 0.71373, 0.71373),
(0.00000, 0.72941, 0.72941),
(0.00000, 0.74902, 0.74902),
(0.00000, 0.76471, 0.76471),
(0.00000, 0.78039, 0.78039),
(0.00000, 0.79608, 0.79608),
(0.00000, 0.81569, 0.81569),
(0.00000, 0.83137, 0.83137),
(0.00000, 0.84706, 0.84706),
(0.00000, 0.86275, 0.86275),
(0.00000, 0.88235, 0.88235),
(0.00000, 0.89804, 0.89804),
(0.00000, 0.91373, 0.91373),
(0.00000, 0.92941, 0.92941),
(0.00000, 0.94902, 0.94902),
(0.00000, 0.96471, 0.96471),
(0.00000, 0.98039, 0.98039),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.98431, 1.00000),
(0.00000, 0.96863, 1.00000),
(0.00000, 0.95294, 1.00000),
(0.00000, 0.93725, 1.00000),
(0.00000, 0.92157, 1.00000),
(0.00000, 0.90588, 1.00000),
(0.00000, 0.89020, 1.00000),
(0.00000, 0.87451, 1.00000),
(0.00000, 0.85882, 1.00000),
(0.00000, 0.84314, 1.00000),
(0.00000, 0.82745, 1.00000),
(0.00000, 0.81176, 1.00000),
(0.00000, 0.79608, 1.00000),
(0.00000, 0.78039, 1.00000),
(0.00000, 0.76471, 1.00000),
(0.00000, 0.74902, 1.00000),
(0.00000, 0.73333, 1.00000),
(0.00000, 0.71765, 1.00000),
(0.00000, 0.70196, 1.00000),
(0.00000, 0.68627, 1.00000),
(0.00000, 0.66667, 1.00000),
(0.00000, 0.65098, 1.00000),
(0.00000, 0.63529, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.60392, 1.00000),
(0.00000, 0.58824, 1.00000),
(0.00000, 0.57255, 1.00000),
(0.00000, 0.55686, 1.00000),
(0.00000, 0.54118, 1.00000),
(0.00000, 0.52549, 1.00000),
(0.00000, 0.50980, 1.00000),
(0.00000, 0.49412, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.46275, 1.00000),
(0.00000, 0.44706, 1.00000),
(0.00000, 0.43137, 1.00000),
(0.00000, 0.41569, 1.00000),
(0.00000, 0.40000, 1.00000),
(0.00000, 0.38431, 1.00000),
(0.00000, 0.36863, 1.00000),
(0.00000, 0.35294, 1.00000),
(0.00000, 0.33333, 1.00000),
(0.00000, 0.31765, 1.00000),
(0.00000, 0.30196, 1.00000),
(0.00000, 0.28627, 1.00000),
(0.00000, 0.27059, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.23922, 1.00000),
(0.00000, 0.22353, 1.00000),
(0.00000, 0.20784, 1.00000),
(0.00000, 0.19216, 1.00000),
(0.00000, 0.17647, 1.00000),
(0.00000, 0.16078, 1.00000),
(0.00000, 0.14510, 1.00000),
(0.00000, 0.12941, 1.00000),
(0.00000, 0.11373, 1.00000),
(0.00000, 0.09804, 1.00000),
(0.00000, 0.08235, 1.00000),
(0.00000, 0.06667, 1.00000),
(0.00000, 0.05098, 1.00000),
(0.00000, 0.03529, 1.00000),
(0.00000, 0.01961, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.01569, 0.00000, 1.00000),
(0.03137, 0.00000, 1.00000),
(0.04706, 0.00000, 1.00000),
(0.06275, 0.00000, 1.00000),
(0.07843, 0.00000, 1.00000),
(0.09412, 0.00000, 1.00000),
(0.10980, 0.00000, 1.00000),
(0.12549, 0.00000, 1.00000),
(0.14118, 0.00000, 1.00000),
(0.15686, 0.00000, 1.00000),
(0.17255, 0.00000, 1.00000),
(0.18824, 0.00000, 1.00000),
(0.20392, 0.00000, 1.00000),
(0.21961, 0.00000, 1.00000),
(0.23529, 0.00000, 1.00000),
(0.25098, 0.00000, 1.00000),
(0.26667, 0.00000, 1.00000),
(0.28235, 0.00000, 1.00000),
(0.29804, 0.00000, 1.00000),
(0.31373, 0.00000, 1.00000),
(0.33333, 0.00000, 1.00000),
(0.34902, 0.00000, 1.00000),
(0.36471, 0.00000, 1.00000),
(0.38039, 0.00000, 1.00000),
(0.39608, 0.00000, 1.00000),
(0.41176, 0.00000, 1.00000),
(0.42745, 0.00000, 1.00000),
(0.44314, 0.00000, 1.00000),
(0.45882, 0.00000, 1.00000),
(0.47451, 0.00000, 1.00000),
(0.49020, 0.00000, 1.00000),
(0.50588, 0.00000, 1.00000),
(0.52157, 0.00000, 1.00000),
(0.53725, 0.00000, 1.00000),
(0.55294, 0.00000, 1.00000),
(0.56863, 0.00000, 1.00000),
(0.58431, 0.00000, 1.00000),
(0.60000, 0.00000, 1.00000),
(0.61569, 0.00000, 1.00000),
(0.63137, 0.00000, 1.00000),
(0.64706, 0.00000, 1.00000),
(0.66667, 0.00000, 1.00000),
(0.68235, 0.00000, 1.00000),
(0.69804, 0.00000, 1.00000),
(0.71373, 0.00000, 1.00000),
(0.72941, 0.00000, 1.00000),
(0.74510, 0.00000, 1.00000),
(0.76078, 0.00000, 1.00000),
(0.77647, 0.00000, 1.00000),
(0.79216, 0.00000, 1.00000),
(0.80784, 0.00000, 1.00000),
(0.82353, 0.00000, 1.00000),
(0.83922, 0.00000, 1.00000),
(0.85490, 0.00000, 1.00000),
(0.87059, 0.00000, 1.00000),
(0.88627, 0.00000, 1.00000),
(0.90196, 0.00000, 1.00000),
(0.91765, 0.00000, 1.00000),
(0.93333, 0.00000, 1.00000),
(0.94902, 0.00000, 1.00000),
(0.96471, 0.00000, 1.00000),
(0.98039, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 0.98431),
(1.00000, 0.00000, 0.96863),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.93725),
(1.00000, 0.00000, 0.92157),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.89020),
(1.00000, 0.00000, 0.87451),
(1.00000, 0.00000, 0.85490),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.82353),
(1.00000, 0.00000, 0.80784),
(1.00000, 0.00000, 0.79216),
(1.00000, 0.00000, 0.77647),
(1.00000, 0.00000, 0.76078),
(1.00000, 0.00000, 0.74510),
(1.00000, 0.00000, 0.72941),
(1.00000, 0.00000, 0.70980),
(1.00000, 0.00000, 0.69412),
(1.00000, 0.00000, 0.67843),
(1.00000, 0.00000, 0.66275),
(1.00000, 0.00000, 0.64706),
(1.00000, 0.00000, 0.63137),
(1.00000, 0.00000, 0.61569),
(1.00000, 0.00000, 0.60000),
(1.00000, 0.00000, 0.58431),
(1.00000, 0.00000, 0.56471),
(1.00000, 0.00000, 0.54902),
(1.00000, 0.00000, 0.53333),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.48627),
(1.00000, 0.00000, 0.47059),
(1.00000, 0.00000, 0.45490),
(1.00000, 0.00000, 0.43922),
(1.00000, 0.00000, 0.41961),
(1.00000, 0.00000, 0.40392),
(1.00000, 0.00000, 0.38824),
(1.00000, 0.00000, 0.37255),
(1.00000, 0.00000, 0.35686),
(1.00000, 0.00000, 0.34118),
(1.00000, 0.00000, 0.32549),
(1.00000, 0.00000, 0.30980),
(1.00000, 0.00000, 0.29412),
(1.00000, 0.00000, 0.27451),
(1.00000, 0.00000, 0.25882),
(1.00000, 0.00000, 0.24314),
(1.00000, 0.00000, 0.22745),
(1.00000, 0.00000, 0.21176),
(1.00000, 0.00000, 0.19608),
(1.00000, 0.00000, 0.18039),
(1.00000, 0.00000, 0.16471),
(1.00000, 0.00000, 0.14902),
(1.00000, 0.00000, 0.12941),
(1.00000, 0.00000, 0.11373),
(1.00000, 0.00000, 0.09804),
(1.00000, 0.00000, 0.08235),
(1.00000, 0.00000, 0.06667),
(1.00000, 0.00000, 0.05098),
(1.00000, 0.00000, 0.03529),
(1.00000, 0.00000, 0.01961),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_stairs9 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
)
cmap_backgr = (
(0.00000, 0.00000, 0.00000), # noqa
(0.01587, 0.01587, 0.01587),
(0.03174, 0.03174, 0.03174),
(0.04761, 0.04761, 0.04761),
(0.06348, 0.06348, 0.06348),
(0.07935, 0.07935, 0.07935),
(0.09522, 0.09522, 0.09522),
(0.11109, 0.11109, 0.11109),
(0.12696, 0.12696, 0.12696),
(0.14283, 0.14283, 0.14283),
(0.15870, 0.15870, 0.15870),
(0.17457, 0.17457, 0.17457),
(0.19044, 0.19044, 0.19044),
(0.20631, 0.20631, 0.20631),
(0.22218, 0.22218, 0.22218),
(0.23805, 0.23805, 0.23805),
(0.25392, 0.25392, 0.25392),
(0.26979, 0.26979, 0.26979),
(0.28566, 0.28566, 0.28566),
(0.30153, 0.30153, 0.30153),
(0.31740, 0.31740, 0.31740),
(0.33327, 0.33327, 0.33327),
(0.34914, 0.34914, 0.34914),
(0.36501, 0.36501, 0.36501),
(0.38088, 0.38088, 0.38088),
(0.39675, 0.39675, 0.39675),
(0.41262, 0.41262, 0.41262),
(0.42849, 0.42849, 0.42849),
(0.44436, 0.44436, 0.44436),
(0.46023, 0.46023, 0.46023),
(0.47610, 0.47610, 0.47610),
(0.49197, 0.49197, 0.49197),
(0.50784, 0.50784, 0.50784),
(0.52371, 0.52371, 0.52371),
(0.53958, 0.53958, 0.53958),
(0.55545, 0.55545, 0.55545),
(0.57132, 0.57132, 0.57132),
(0.58719, 0.58719, 0.58719),
(0.60306, 0.60306, 0.60306),
(0.61893, 0.61893, 0.61893),
(0.63480, 0.63480, 0.63480),
(0.65067, 0.65067, 0.65067),
(0.66654, 0.66654, 0.66654),
(0.68241, 0.68241, 0.68241),
(0.69828, 0.69828, 0.69828),
(0.71415, 0.71415, 0.71415),
(0.73002, 0.73002, 0.73002),
(0.74589, 0.74589, 0.74589),
(0.76176, 0.76176, 0.76176),
(0.77763, 0.77763, 0.77763),
(0.79350, 0.79350, 0.79350),
(0.80937, 0.80937, 0.80937),
(0.82524, 0.82524, 0.82524),
(0.84111, 0.84111, 0.84111),
(0.85698, 0.85698, 0.85698),
(0.87285, 0.87285, 0.87285),
(0.88872, 0.88872, 0.88872),
(0.90459, 0.90459, 0.90459),
(0.92046, 0.92046, 0.92046),
(0.93633, 0.93633, 0.93633),
(0.95220, 0.95220, 0.95220),
(0.96807, 0.96807, 0.96807),
(0.98394, 0.98394, 0.98394),
(0.99981, 0.99981, 0.99981),
(0.00000, 0.00000, 0.99981),
(0.00000, 0.01587, 0.98394),
(0.00000, 0.03174, 0.96807),
(0.00000, 0.04761, 0.95220),
(0.00000, 0.06348, 0.93633),
(0.00000, 0.07935, 0.92046),
(0.00000, 0.09522, 0.90459),
(0.00000, 0.11109, 0.88872),
(0.00000, 0.12696, 0.87285),
(0.00000, 0.14283, 0.85698),
(0.00000, 0.15870, 0.84111),
(0.00000, 0.17457, 0.82524),
(0.00000, 0.19044, 0.80937),
(0.00000, 0.20631, 0.79350),
(0.00000, 0.22218, 0.77763),
(0.00000, 0.23805, 0.76176),
(0.00000, 0.25392, 0.74589),
(0.00000, 0.26979, 0.73002),
(0.00000, 0.28566, 0.71415),
(0.00000, 0.30153, 0.69828),
(0.00000, 0.31740, 0.68241),
(0.00000, 0.33327, 0.66654),
(0.00000, 0.34914, 0.65067),
(0.00000, 0.36501, 0.63480),
(0.00000, 0.38088, 0.61893),
(0.00000, 0.39675, 0.60306),
(0.00000, 0.41262, 0.58719),
(0.00000, 0.42849, 0.57132),
(0.00000, 0.44436, 0.55545),
(0.00000, 0.46023, 0.53958),
(0.00000, 0.47610, 0.52371),
(0.00000, 0.49197, 0.50784),
(0.00000, 0.50784, 0.49197),
(0.00000, 0.52371, 0.47610),
(0.00000, 0.53958, 0.46023),
(0.00000, 0.55545, 0.44436),
(0.00000, 0.57132, 0.42849),
(0.00000, 0.58719, 0.41262),
(0.00000, 0.60306, 0.39675),
(0.00000, 0.61893, 0.38088),
(0.00000, 0.63480, 0.36501),
(0.00000, 0.65067, 0.34914),
(0.00000, 0.66654, 0.33327),
(0.00000, 0.68241, 0.31740),
(0.00000, 0.69828, 0.30153),
(0.00000, 0.71415, 0.28566),
(0.00000, 0.73002, 0.26979),
(0.00000, 0.74589, 0.25392),
(0.00000, 0.76176, 0.23805),
(0.00000, 0.77763, 0.22218),
(0.00000, 0.79350, 0.20631),
(0.00000, 0.80937, 0.19044),
(0.00000, 0.82524, 0.17457),
(0.00000, 0.84111, 0.15870),
(0.00000, 0.85698, 0.14283),
(0.00000, 0.87285, 0.12696),
(0.00000, 0.88872, 0.11109),
(0.00000, 0.90459, 0.09522),
(0.00000, 0.92046, 0.07935),
(0.00000, 0.93633, 0.06348),
(0.00000, 0.95220, 0.04761),
(0.00000, 0.96807, 0.03174),
(0.00000, 0.98394, 0.01587),
(0.00000, 0.99981, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.01587, 1.00000, 0.00000),
(0.03174, 1.00000, 0.00000),
(0.04761, 1.00000, 0.00000),
(0.06348, 1.00000, 0.00000),
(0.07935, 1.00000, 0.00000),
(0.09522, 1.00000, 0.00000),
(0.11109, 1.00000, 0.00000),
(0.12696, 1.00000, 0.00000),
(0.14283, 1.00000, 0.00000),
(0.15870, 1.00000, 0.00000),
(0.17457, 1.00000, 0.00000),
(0.19044, 1.00000, 0.00000),
(0.20631, 1.00000, 0.00000),
(0.22218, 1.00000, 0.00000),
(0.23805, 1.00000, 0.00000),
(0.25392, 1.00000, 0.00000),
(0.26979, 1.00000, 0.00000),
(0.28566, 1.00000, 0.00000),
(0.30153, 1.00000, 0.00000),
(0.31740, 1.00000, 0.00000),
(0.33327, 1.00000, 0.00000),
(0.34914, 1.00000, 0.00000),
(0.36501, 1.00000, 0.00000),
(0.38088, 1.00000, 0.00000),
(0.39675, 1.00000, 0.00000),
(0.41262, 1.00000, 0.00000),
(0.42849, 1.00000, 0.00000),
(0.44436, 1.00000, 0.00000),
(0.46023, 1.00000, 0.00000),
(0.47610, 1.00000, 0.00000),
(0.49197, 1.00000, 0.00000),
(0.50784, 1.00000, 0.00000),
(0.52371, 1.00000, 0.00000),
(0.53958, 1.00000, 0.00000),
(0.55545, 1.00000, 0.00000),
(0.57132, 1.00000, 0.00000),
(0.58719, 1.00000, 0.00000),
(0.60306, 1.00000, 0.00000),
(0.61893, 1.00000, 0.00000),
(0.63480, 1.00000, 0.00000),
(0.65067, 1.00000, 0.00000),
(0.66654, 1.00000, 0.00000),
(0.68241, 1.00000, 0.00000),
(0.69828, 1.00000, 0.00000),
(0.71415, 1.00000, 0.00000),
(0.73002, 1.00000, 0.00000),
(0.74589, 1.00000, 0.00000),
(0.76176, 1.00000, 0.00000),
(0.77763, 1.00000, 0.00000),
(0.79350, 1.00000, 0.00000),
(0.80937, 1.00000, 0.00000),
(0.82524, 1.00000, 0.00000),
(0.84111, 1.00000, 0.00000),
(0.85698, 1.00000, 0.00000),
(0.87285, 1.00000, 0.00000),
(0.88872, 1.00000, 0.00000),
(0.90459, 1.00000, 0.00000),
(0.92046, 1.00000, 0.00000),
(0.93633, 1.00000, 0.00000),
(0.95220, 1.00000, 0.00000),
(0.96807, 1.00000, 0.00000),
(0.98394, 1.00000, 0.00000),
(0.99981, 1.00000, 0.00000),
(1.00000, 0.99981, 0.00000),
(1.00000, 0.98394, 0.00000),
(1.00000, 0.96807, 0.00000),
(1.00000, 0.95220, 0.00000),
(1.00000, 0.93633, 0.00000),
(1.00000, 0.92046, 0.00000),
(1.00000, 0.90459, 0.00000),
(1.00000, 0.88872, 0.00000),
(1.00000, 0.87285, 0.00000),
(1.00000, 0.85698, 0.00000),
(1.00000, 0.84111, 0.00000),
(1.00000, 0.82524, 0.00000),
(1.00000, 0.80937, 0.00000),
(1.00000, 0.79350, 0.00000),
(1.00000, 0.77763, 0.00000),
(1.00000, 0.76176, 0.00000),
(1.00000, 0.74589, 0.00000),
(1.00000, 0.73002, 0.00000),
(1.00000, 0.71415, 0.00000),
(1.00000, 0.69828, 0.00000),
(1.00000, 0.68241, 0.00000),
(1.00000, 0.66654, 0.00000),
(1.00000, 0.65067, 0.00000),
(1.00000, 0.63480, 0.00000),
(1.00000, 0.61893, 0.00000),
(1.00000, 0.60306, 0.00000),
(1.00000, 0.58719, 0.00000),
(1.00000, 0.57132, 0.00000),
(1.00000, 0.55545, 0.00000),
(1.00000, 0.53958, 0.00000),
(1.00000, 0.52371, 0.00000),
(1.00000, 0.50784, 0.00000),
(1.00000, 0.49197, 0.00000),
(1.00000, 0.47610, 0.00000),
(1.00000, 0.46023, 0.00000),
(1.00000, 0.44436, 0.00000),
(1.00000, 0.42849, 0.00000),
(1.00000, 0.41262, 0.00000),
(1.00000, 0.39675, 0.00000),
(1.00000, 0.38088, 0.00000),
(1.00000, 0.36501, 0.00000),
(1.00000, 0.34914, 0.00000),
(1.00000, 0.33327, 0.00000),
(1.00000, 0.31740, 0.00000),
(1.00000, 0.30153, 0.00000),
(1.00000, 0.28566, 0.00000),
(1.00000, 0.26979, 0.00000),
(1.00000, 0.25392, 0.00000),
(1.00000, 0.23805, 0.00000),
(1.00000, 0.22218, 0.00000),
(1.00000, 0.20631, 0.00000),
(1.00000, 0.19044, 0.00000),
(1.00000, 0.17457, 0.00000),
(1.00000, 0.15870, 0.00000),
(1.00000, 0.14283, 0.00000),
(1.00000, 0.12696, 0.00000),
(1.00000, 0.11109, 0.00000),
(1.00000, 0.09522, 0.00000),
(1.00000, 0.07935, 0.00000),
(1.00000, 0.06348, 0.00000),
(1.00000, 0.04761, 0.00000),
(1.00000, 0.03174, 0.00000),
(1.00000, 0.01587, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_idl12 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87451, 0.74510, 0.74510),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow1 = (
(0.00000, 0.00000, 0.16471),
(0.02745, 0.00000, 0.18431),
(0.05882, 0.00000, 0.20000),
(0.08627, 0.00000, 0.21961),
(0.11373, 0.00000, 0.23922),
(0.14510, 0.00000, 0.25882),
(0.17647, 0.00000, 0.27843),
(0.20392, 0.00000, 0.29804),
(0.23137, 0.00000, 0.31765),
(0.26275, 0.00000, 0.33725),
(0.29412, 0.00000, 0.35686),
(0.32157, 0.00000, 0.37647),
(0.35294, 0.00000, 0.39608),
(0.38039, 0.00000, 0.41569),
(0.41176, 0.00000, 0.43529),
(0.43922, 0.00000, 0.45490),
(0.47059, 0.00000, 0.47451),
(0.49804, 0.00000, 0.49412),
(0.52941, 0.00000, 0.51373),
(0.55686, 0.00000, 0.53725),
(0.58824, 0.00000, 0.55686),
(0.55686, 0.00000, 0.57647),
(0.52941, 0.00000, 0.59608),
(0.49804, 0.00000, 0.61569),
(0.47059, 0.00000, 0.63922),
(0.43922, 0.00000, 0.65882),
(0.41176, 0.00000, 0.67843),
(0.38039, 0.00000, 0.70196),
(0.35294, 0.00000, 0.72157),
(0.32157, 0.00000, 0.74118),
(0.29412, 0.00000, 0.76471),
(0.26275, 0.00000, 0.78431),
(0.23137, 0.00000, 0.80392),
(0.20392, 0.00000, 0.82745),
(0.17647, 0.00000, 0.84706),
(0.14510, 0.00000, 0.87059),
(0.11373, 0.00000, 0.89020),
(0.08627, 0.00000, 0.91373),
(0.05882, 0.00000, 0.93333),
(0.02745, 0.00000, 0.95686),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00784, 0.95686),
(0.00000, 0.01569, 0.93333),
(0.00000, 0.02353, 0.91373),
(0.00000, 0.03137, 0.89020),
(0.00000, 0.03922, 0.87059),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.06275, 0.83137),
(0.00000, 0.07843, 0.81176),
(0.00000, 0.09804, 0.79216),
(0.00000, 0.11765, 0.77255),
(0.00000, 0.13725, 0.75294),
(0.00000, 0.15686, 0.73333),
(0.00000, 0.17647, 0.71373),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.21569, 0.67451),
(0.00000, 0.23529, 0.65882),
(0.00000, 0.25490, 0.64314),
(0.00000, 0.27059, 0.62745),
(0.00000, 0.28627, 0.61176),
(0.00000, 0.30196, 0.59608),
(0.00000, 0.32157, 0.58039),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.34510, 0.54902),
(0.00000, 0.35686, 0.53333),
(0.00000, 0.36863, 0.51765),
(0.00000, 0.38039, 0.50196),
(0.00000, 0.39216, 0.48627),
(0.00000, 0.40392, 0.47059),
(0.00000, 0.41176, 0.45882),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.43529, 0.43529),
(0.00000, 0.44706, 0.42353),
(0.00000, 0.45882, 0.41176),
(0.00000, 0.46667, 0.40000),
(0.00000, 0.47843, 0.38824),
(0.00000, 0.49020, 0.37647),
(0.00000, 0.49804, 0.36471),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.52157, 0.34118),
(0.00000, 0.52941, 0.32941),
(0.00000, 0.54118, 0.31765),
(0.00000, 0.55294, 0.30588),
(0.00000, 0.56078, 0.29412),
(0.00000, 0.57255, 0.28235),
(0.00000, 0.58431, 0.27059),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.60392, 0.24706),
(0.00000, 0.61176, 0.23529),
(0.00000, 0.62353, 0.22353),
(0.00000, 0.63137, 0.21176),
(0.00000, 0.64314, 0.20000),
(0.00000, 0.65098, 0.18824),
(0.00000, 0.66275, 0.17647),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.68235, 0.15294),
(0.00000, 0.69020, 0.14118),
(0.00000, 0.70196, 0.12941),
(0.00000, 0.70980, 0.11765),
(0.00000, 0.72157, 0.10196),
(0.00000, 0.72941, 0.08627),
(0.00000, 0.74118, 0.07059),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.76078, 0.03922),
(0.00000, 0.76863, 0.02353),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00784, 1.00000, 0.00000),
(0.01569, 0.98824, 0.00000),
(0.02353, 0.98039, 0.00000),
(0.03137, 0.97255, 0.00000),
(0.04314, 0.96078, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.06667, 0.94510, 0.00000),
(0.07843, 0.93725, 0.00000),
(0.09020, 0.92549, 0.00000),
(0.10196, 0.91765, 0.00000),
(0.11373, 0.90980, 0.00000),
(0.12549, 0.89804, 0.00000),
(0.13725, 0.89020, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.16471, 0.87059, 0.00000),
(0.20000, 0.86275, 0.00000),
(0.23529, 0.85490, 0.00000),
(0.26667, 0.84314, 0.00000),
(0.30588, 0.83529, 0.00000),
(0.34118, 0.82353, 0.00000),
(0.37647, 0.81569, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.44706, 0.79608, 0.00000),
(0.48627, 0.78824, 0.00000),
(0.52157, 0.77647, 0.00000),
(0.56078, 0.76863, 0.00000),
(0.59608, 0.77647, 0.00000),
(0.63529, 0.78824, 0.00000),
(0.67059, 0.80000, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.74902, 0.82745, 0.00000),
(0.78431, 0.84314, 0.00000),
(0.82353, 0.85882, 0.00000),
(0.85882, 0.87059, 0.00000),
(0.89804, 0.89020, 0.00000),
(0.93333, 0.90196, 0.00000),
(0.97647, 0.92157, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.95294, 0.00000),
(1.00000, 0.96863, 0.00000),
(1.00000, 0.98824, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.97647, 0.00000),
(1.00000, 0.93725, 0.00000),
(1.00000, 0.89804, 0.00000),
(1.00000, 0.85882, 0.00000),
(1.00000, 0.81961, 0.00000),
(1.00000, 0.78039, 0.00000),
(1.00000, 0.74118, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.66275, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.58431, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.50980, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.43137, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.35294, 0.00000),
(1.00000, 0.31765, 0.00000),
(1.00000, 0.27451, 0.00000),
(1.00000, 0.23922, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.12941, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.08235, 0.00000),
(1.00000, 0.06275, 0.00000),
(1.00000, 0.04706, 0.00000),
(1.00000, 0.02353, 0.00000),
(1.00000, 0.00000, 0.00000),
(0.99216, 0.00000, 0.00000),
(0.98431, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.96863, 0.00000, 0.00000),
(0.96078, 0.00000, 0.00000),
(0.95294, 0.00000, 0.00000),
(0.94510, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.92941, 0.00000, 0.00000),
(0.92157, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.90588, 0.00000, 0.00000),
(0.89804, 0.00000, 0.00000),
(0.89020, 0.00000, 0.00000),
(0.88235, 0.00000, 0.00000),
(0.87451, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.85882, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.84314, 0.00000, 0.00000),
(0.83529, 0.00000, 0.00000),
(0.82745, 0.00000, 0.00000),
(0.81961, 0.00000, 0.00000),
(0.81176, 0.00000, 0.00000),
(0.80392, 0.00000, 0.00000),
(0.79608, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78039, 0.00000, 0.00000),
(0.77255, 0.00000, 0.00000),
(0.76471, 0.00000, 0.00000),
(0.75686, 0.00000, 0.00000),
(0.74902, 0.00000, 0.00000),
(0.74118, 0.00000, 0.00000),
(0.73333, 0.00000, 0.00000),
)
cmap_idl14 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.16471, 0.00000),
(0.00000, 0.33333, 0.00000),
(0.00000, 0.49804, 0.00000),
(0.00000, 0.66667, 0.00000),
(0.00000, 0.83137, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.88627, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.77255, 0.00000),
(0.00000, 0.73333, 0.00000),
(0.00000, 0.69412, 0.00000),
(0.00000, 0.65490, 0.00000),
(0.00000, 0.61569, 0.00000),
(0.00000, 0.58039, 0.00000),
(0.00000, 0.54118, 0.00000),
(0.00000, 0.50196, 0.00000),
(0.00000, 0.46275, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.38824, 0.00000),
(0.00000, 0.34902, 0.00000),
(0.00000, 0.30980, 0.00000),
(0.00000, 0.27059, 0.00000),
(0.00000, 0.23137, 0.00000),
(0.00000, 0.19608, 0.00000),
(0.00000, 0.15686, 0.00000),
(0.00000, 0.11765, 0.00000),
(0.00000, 0.07843, 0.00000),
(0.00000, 0.03922, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.06275),
(0.00000, 0.00000, 0.09412),
(0.00000, 0.00000, 0.12549),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.19216),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.25490),
(0.00000, 0.00000, 0.29020),
(0.00000, 0.00000, 0.32157),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.41569),
(0.00000, 0.00000, 0.45098),
(0.00000, 0.00000, 0.48235),
(0.00000, 0.00000, 0.51373),
(0.00000, 0.00000, 0.54510),
(0.00000, 0.00000, 0.58039),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.64314),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.74118),
(0.00000, 0.00000, 0.77255),
(0.00000, 0.00000, 0.80392),
(0.00000, 0.00000, 0.83529),
(0.00000, 0.00000, 0.87059),
(0.00000, 0.00000, 0.90196),
(0.00000, 0.00000, 0.93333),
(0.00000, 0.00000, 0.96471),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.00000),
(0.02745, 0.00000, 0.01961),
(0.05882, 0.00000, 0.03922),
(0.09020, 0.00000, 0.05882),
(0.12157, 0.00000, 0.08235),
(0.15294, 0.00000, 0.10196),
(0.18431, 0.00000, 0.12157),
(0.21569, 0.00000, 0.14510),
(0.24706, 0.00000, 0.16471),
(0.27451, 0.00000, 0.18431),
(0.30588, 0.00000, 0.20784),
(0.33725, 0.00000, 0.22745),
(0.36863, 0.00000, 0.24706),
(0.40000, 0.00000, 0.27059),
(0.43137, 0.00000, 0.29020),
(0.46275, 0.00000, 0.30980),
(0.49412, 0.00000, 0.33333),
(0.52549, 0.00000, 0.34902),
(0.55686, 0.00000, 0.36863),
(0.59216, 0.00000, 0.38431),
(0.62353, 0.00000, 0.40392),
(0.65882, 0.00000, 0.42353),
(0.69020, 0.00000, 0.43922),
(0.72157, 0.00000, 0.45882),
(0.75686, 0.00000, 0.47451),
(0.78824, 0.00000, 0.49412),
(0.82353, 0.00000, 0.51373),
(0.85490, 0.00000, 0.52941),
(0.88627, 0.00000, 0.54902),
(0.92157, 0.00000, 0.56471),
(0.95294, 0.00000, 0.58431),
(0.98824, 0.00000, 0.60392),
(0.00000, 0.00000, 0.00000),
(0.00392, 0.00000, 0.00000),
(0.00784, 0.00000, 0.00000),
(0.01176, 0.00000, 0.00000),
(0.01569, 0.00000, 0.00000),
(0.01961, 0.00000, 0.00000),
(0.02353, 0.00000, 0.00000),
(0.02745, 0.00000, 0.00000),
(0.03137, 0.00000, 0.00000),
(0.03529, 0.00000, 0.00000),
(0.03922, 0.00000, 0.00000),
(0.04314, 0.00000, 0.00000),
(0.04706, 0.00000, 0.00000),
(0.05490, 0.00000, 0.00000),
(0.06275, 0.00000, 0.00000),
(0.07059, 0.00000, 0.00000),
(0.07843, 0.00000, 0.00000),
(0.09020, 0.00000, 0.00000),
(0.09804, 0.00000, 0.00000),
(0.10588, 0.00000, 0.00000),
(0.11373, 0.00000, 0.00000),
(0.12549, 0.00000, 0.00000),
(0.13333, 0.00000, 0.00000),
(0.14118, 0.00000, 0.00000),
(0.14902, 0.00000, 0.00000),
(0.16078, 0.00000, 0.00000),
(0.17255, 0.00000, 0.00000),
(0.18431, 0.00000, 0.00000),
(0.19608, 0.00000, 0.00000),
(0.20784, 0.00000, 0.00000),
(0.21961, 0.00000, 0.00000),
(0.23137, 0.00000, 0.00000),
(0.24706, 0.00000, 0.00000),
(0.25882, 0.00000, 0.00000),
(0.27059, 0.00000, 0.00000),
(0.28235, 0.00000, 0.00000),
(0.29412, 0.00000, 0.00000),
(0.30588, 0.00000, 0.00000),
(0.32157, 0.00000, 0.00392),
(0.33333, 0.00000, 0.00392),
(0.34902, 0.00000, 0.00392),
(0.36471, 0.00000, 0.00392),
(0.38039, 0.00000, 0.00392),
(0.39608, 0.00000, 0.00392),
(0.41176, 0.00000, 0.00392),
(0.42353, 0.00000, 0.00392),
(0.43922, 0.00000, 0.00392),
(0.45490, 0.00000, 0.00392),
(0.47059, 0.00000, 0.00392),
(0.48627, 0.00000, 0.00392),
(0.50196, 0.00392, 0.00392),
(0.51373, 0.00392, 0.00392),
(0.52941, 0.00392, 0.00392),
(0.54510, 0.00392, 0.00392),
(0.56078, 0.00392, 0.00392),
(0.57647, 0.00392, 0.00392),
(0.59216, 0.00392, 0.00392),
(0.60784, 0.00392, 0.00392),
(0.62353, 0.00392, 0.00392),
(0.63922, 0.00392, 0.00392),
(0.65490, 0.00392, 0.00392),
(0.67059, 0.00392, 0.00392),
(0.68627, 0.00392, 0.00392),
(0.69804, 0.00392, 0.00392),
(0.70980, 0.00392, 0.00392),
(0.72549, 0.00392, 0.00392),
(0.73725, 0.00392, 0.00392),
(0.75294, 0.00392, 0.00392),
(0.76471, 0.00392, 0.00392),
(0.77647, 0.00392, 0.00392),
(0.79216, 0.00392, 0.00392),
(0.80392, 0.00392, 0.00392),
(0.81961, 0.00392, 0.00392),
(0.83137, 0.00392, 0.00392),
(0.84706, 0.00392, 0.00784),
(0.85490, 0.00392, 0.00784),
(0.86275, 0.00392, 0.00784),
(0.87451, 0.00392, 0.00784),
(0.88235, 0.00392, 0.00784),
(0.89020, 0.00392, 0.00784),
(0.90196, 0.00392, 0.00784),
(0.90980, 0.00392, 0.00784),
(0.91765, 0.00392, 0.00784),
(0.92941, 0.00392, 0.00784),
(0.93725, 0.00392, 0.00784),
(0.94510, 0.00392, 0.00784),
(0.95686, 0.00784, 0.00784),
(0.95686, 0.00784, 0.00784),
(0.96078, 0.00784, 0.00784),
(0.96471, 0.00784, 0.00784),
(0.96863, 0.00784, 0.00784),
(0.96863, 0.00784, 0.00784),
(0.97255, 0.00784, 0.00784),
(0.97647, 0.00784, 0.00784),
(0.98039, 0.00784, 0.00784),
(0.98039, 0.00784, 0.00784),
(0.98431, 0.00784, 0.00784),
(0.98824, 0.00784, 0.00784),
(0.99216, 0.00784, 0.00784),
(0.99608, 0.00392, 0.00784),
(0.99608, 0.00392, 0.00784),
(0.99608, 0.01176, 0.00784),
(0.99608, 0.01961, 0.00784),
(0.99608, 0.03137, 0.00784),
(0.99608, 0.03922, 0.00784),
(0.99608, 0.04706, 0.00784),
(0.99608, 0.05882, 0.00784),
(0.99608, 0.06667, 0.00784),
(0.99608, 0.07451, 0.00784),
(0.99608, 0.08627, 0.00784),
(0.99608, 0.09412, 0.00784),
(0.99608, 0.10196, 0.00784),
(0.99608, 0.11373, 0.00784),
(0.99608, 0.12157, 0.00784),
(0.99608, 0.12941, 0.00784),
(0.99608, 0.14118, 0.00784),
(0.99608, 0.14118, 0.00784),
(0.99608, 0.14902, 0.01176),
(0.99608, 0.15686, 0.01569),
(0.99608, 0.16471, 0.01961),
(1.00000, 0.17647, 0.02745),
(1.00000, 0.18824, 0.03529),
(1.00000, 0.20000, 0.04706),
(1.00000, 0.21176, 0.05490),
(1.00000, 0.22745, 0.06667),
(1.00000, 0.23922, 0.07843),
(1.00000, 0.25098, 0.09020),
(1.00000, 0.26275, 0.10588),
(1.00000, 0.27451, 0.11765),
(1.00000, 0.28627, 0.13333),
(1.00000, 0.30196, 0.15294),
(1.00000, 0.32157, 0.17255),
(1.00000, 0.34118, 0.19216),
(1.00000, 0.36078, 0.21569),
(1.00000, 0.37647, 0.23529),
(1.00000, 0.39216, 0.25490),
(1.00000, 0.40784, 0.27843),
(1.00000, 0.42353, 0.29804),
(1.00000, 0.44314, 0.32157),
(1.00000, 0.46667, 0.34902),
(1.00000, 0.49020, 0.38039),
(1.00000, 0.51373, 0.40784),
(1.00000, 0.54118, 0.43922),
(1.00000, 0.56471, 0.47059),
(1.00000, 0.59216, 0.50196),
(1.00000, 0.61569, 0.53333),
(1.00000, 0.64314, 0.56863),
(1.00000, 0.67059, 0.60000),
(1.00000, 0.69804, 0.63529),
(1.00000, 0.72549, 0.67059),
(1.00000, 0.75686, 0.70588),
(1.00000, 0.78431, 0.74118),
(1.00000, 0.81569, 0.77647),
(1.00000, 0.84314, 0.81176),
(1.00000, 0.87451, 0.85098),
(1.00000, 0.89804, 0.87843),
(1.00000, 0.92157, 0.90980),
(1.00000, 0.94902, 0.93725),
(1.00000, 0.97255, 0.96863),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow2 = (
(0.00000, 0.00000, 0.00000),
(0.03137, 0.00000, 0.03137),
(0.06275, 0.00000, 0.06275),
(0.09412, 0.00000, 0.09412),
(0.12549, 0.00000, 0.12549),
(0.15686, 0.00000, 0.15686),
(0.18824, 0.00000, 0.18824),
(0.21961, 0.00000, 0.21961),
(0.25098, 0.00000, 0.25098),
(0.28235, 0.00000, 0.28235),
(0.31373, 0.00000, 0.31373),
(0.34510, 0.00000, 0.34510),
(0.37647, 0.00000, 0.37647),
(0.40784, 0.00000, 0.40784),
(0.43922, 0.00000, 0.43922),
(0.47059, 0.00000, 0.47059),
(0.50196, 0.00000, 0.50196),
(0.53333, 0.00000, 0.53333),
(0.56471, 0.00000, 0.56471),
(0.59608, 0.00000, 0.59608),
(0.62745, 0.00000, 0.62745),
(0.65882, 0.00000, 0.65882),
(0.69020, 0.00000, 0.69020),
(0.72157, 0.00000, 0.72157),
(0.75294, 0.00000, 0.75294),
(0.78431, 0.00000, 0.78431),
(0.81569, 0.00000, 0.81569),
(0.84706, 0.00000, 0.84706),
(0.87843, 0.00000, 0.87843),
(0.90980, 0.00000, 0.90980),
(0.94118, 0.00000, 0.94118),
(0.97255, 0.00000, 0.97255),
(1.00000, 0.00000, 1.00000),
(0.96863, 0.00000, 1.00000),
(0.93725, 0.00000, 1.00000),
(0.90588, 0.00000, 1.00000),
(0.87451, 0.00000, 1.00000),
(0.84314, 0.00000, 1.00000),
(0.81176, 0.00000, 1.00000),
(0.78039, 0.00000, 1.00000),
(0.74902, 0.00000, 1.00000),
(0.71765, 0.00000, 1.00000),
(0.68627, 0.00000, 1.00000),
(0.65490, 0.00000, 1.00000),
(0.62353, 0.00000, 1.00000),
(0.59216, 0.00000, 1.00000),
(0.56078, 0.00000, 1.00000),
(0.52941, 0.00000, 1.00000),
(0.49804, 0.00000, 1.00000),
(0.46667, 0.00000, 1.00000),
(0.43529, 0.00000, 1.00000),
(0.40392, 0.00000, 1.00000),
(0.37255, 0.00000, 1.00000),
(0.34118, 0.00000, 1.00000),
(0.30980, 0.00000, 1.00000),
(0.27843, 0.00000, 1.00000),
(0.24706, 0.00000, 1.00000),
(0.21569, 0.00000, 1.00000),
(0.18431, 0.00000, 1.00000),
(0.15294, 0.00000, 1.00000),
(0.12157, 0.00000, 1.00000),
(0.09020, 0.00000, 1.00000),
(0.05882, 0.00000, 1.00000),
(0.02745, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25098, 1.00000),
(0.00000, 0.28235, 1.00000),
(0.00000, 0.31373, 1.00000),
(0.00000, 0.34510, 1.00000),
(0.00000, 0.37647, 1.00000),
(0.00000, 0.40784, 1.00000),
(0.00000, 0.43922, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.50196, 1.00000),
(0.00000, 0.53333, 1.00000),
(0.00000, 0.56471, 1.00000),
(0.00000, 0.59608, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.69020, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.75294, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.81569, 1.00000),
(0.00000, 0.84706, 1.00000),
(0.00000, 0.87843, 1.00000),
(0.00000, 0.90980, 1.00000),
(0.00000, 0.94118, 1.00000),
(0.00000, 0.97255, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.96863),
(0.00000, 1.00000, 0.93725),
(0.00000, 1.00000, 0.90588),
(0.00000, 1.00000, 0.87451),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.81176),
(0.00000, 1.00000, 0.78039),
(0.00000, 1.00000, 0.74902),
(0.00000, 1.00000, 0.71765),
(0.00000, 1.00000, 0.68627),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.62353),
(0.00000, 1.00000, 0.59216),
(0.00000, 1.00000, 0.56078),
(0.00000, 1.00000, 0.52941),
(0.00000, 1.00000, 0.49804),
(0.00000, 1.00000, 0.46667),
(0.00000, 1.00000, 0.43529),
(0.00000, 1.00000, 0.40392),
(0.00000, 1.00000, 0.37255),
(0.00000, 1.00000, 0.34118),
(0.00000, 1.00000, 0.30980),
(0.00000, 1.00000, 0.27843),
(0.00000, 1.00000, 0.24706),
(0.00000, 1.00000, 0.21569),
(0.00000, 1.00000, 0.18431),
(0.00000, 1.00000, 0.15294),
(0.00000, 1.00000, 0.12157),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.05882),
(0.00000, 1.00000, 0.02745),
(0.00000, 1.00000, 0.00000),
(0.03137, 1.00000, 0.00000),
(0.06275, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.12549, 1.00000, 0.00000),
(0.15686, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.21961, 1.00000, 0.00000),
(0.25098, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.31373, 1.00000, 0.00000),
(0.34510, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.40784, 1.00000, 0.00000),
(0.43922, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.50196, 1.00000, 0.00000),
(0.53333, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.62745, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.69020, 1.00000, 0.00000),
(0.72157, 1.00000, 0.00000),
(0.75294, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.81569, 1.00000, 0.00000),
(0.84706, 1.00000, 0.00000),
(0.87843, 1.00000, 0.00000),
(0.90980, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.97255, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.98431, 0.00000),
(1.00000, 0.96863, 0.00000),
(1.00000, 0.95294, 0.00000),
(1.00000, 0.93725, 0.00000),
(1.00000, 0.92157, 0.00000),
(1.00000, 0.90588, 0.00000),
(1.00000, 0.89020, 0.00000),
(1.00000, 0.87451, 0.00000),
(1.00000, 0.85882, 0.00000),
(1.00000, 0.84314, 0.00000),
(1.00000, 0.82745, 0.00000),
(1.00000, 0.81176, 0.00000),
(1.00000, 0.79608, 0.00000),
(1.00000, 0.78039, 0.00000),
(1.00000, 0.76471, 0.00000),
(1.00000, 0.74902, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.71765, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.68627, 0.00000),
(1.00000, 0.67059, 0.00000),
(1.00000, 0.65490, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.60784, 0.00000),
(1.00000, 0.59216, 0.00000),
(1.00000, 0.57647, 0.00000),
(1.00000, 0.56078, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.52941, 0.00000),
(1.00000, 0.51373, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.48235, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.45098, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.41961, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.38824, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.35686, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.32549, 0.00000),
(1.00000, 0.30980, 0.00000),
(1.00000, 0.29412, 0.00000),
(1.00000, 0.27843, 0.00000),
(1.00000, 0.26275, 0.00000),
(1.00000, 0.24706, 0.00000),
(1.00000, 0.23137, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.18431, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.15294, 0.00000),
(1.00000, 0.13725, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.07451, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.04314, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.01176, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.03137, 0.03137),
(1.00000, 0.06275, 0.06275),
(1.00000, 0.09412, 0.09412),
(1.00000, 0.12549, 0.12549),
(1.00000, 0.15686, 0.15686),
(1.00000, 0.18824, 0.18824),
(1.00000, 0.21961, 0.21961),
(1.00000, 0.25098, 0.25098),
(1.00000, 0.28235, 0.28235),
(1.00000, 0.31373, 0.31373),
(1.00000, 0.34510, 0.34510),
(1.00000, 0.37647, 0.37647),
(1.00000, 0.40784, 0.40784),
(1.00000, 0.43922, 0.43922),
(1.00000, 0.47059, 0.47059),
(1.00000, 0.50196, 0.50196),
(1.00000, 0.53333, 0.53333),
(1.00000, 0.56471, 0.56471),
(1.00000, 0.59608, 0.59608),
(1.00000, 0.62745, 0.62745),
(1.00000, 0.65882, 0.65882),
(1.00000, 0.69020, 0.69020),
(1.00000, 0.72157, 0.72157),
(1.00000, 0.75294, 0.75294),
(1.00000, 0.78431, 0.78431),
(1.00000, 0.81569, 0.81569),
(1.00000, 0.84706, 0.84706),
(1.00000, 0.87843, 0.87843),
(1.00000, 0.90980, 0.90980),
(1.00000, 0.94118, 0.94118),
(1.00000, 0.97255, 0.97255),
)
cmap_real = (
(0.00784, 0.00392, 0.00000), # noqa
(0.01569, 0.00784, 0.00000),
(0.02353, 0.01176, 0.00000),
(0.03137, 0.01569, 0.00000),
(0.03922, 0.01961, 0.00000),
(0.04706, 0.02353, 0.00000),
(0.05490, 0.02745, 0.00000),
(0.06275, 0.03137, 0.00000),
(0.07059, 0.03529, 0.00000),
(0.07843, 0.03922, 0.00000),
(0.08627, 0.04314, 0.00000),
(0.09412, 0.04706, 0.00000),
(0.10196, 0.05098, 0.00000),
(0.10980, 0.05490, 0.00000),
(0.11765, 0.05882, 0.00000),
(0.12549, 0.06275, 0.00000),
(0.13333, 0.06667, 0.00000),
(0.14118, 0.07059, 0.00000),
(0.14902, 0.07451, 0.00000),
(0.15686, 0.07843, 0.00000),
(0.16471, 0.08235, 0.00000),
(0.17255, 0.08627, 0.00000),
(0.18039, 0.09020, 0.00000),
(0.18824, 0.09412, 0.00000),
(0.19608, 0.09804, 0.00000),
(0.20392, 0.10196, 0.00000),
(0.21176, 0.10588, 0.00000),
(0.21961, 0.10980, 0.00000),
(0.22745, 0.11373, 0.00000),
(0.23529, 0.11765, 0.00000),
(0.24314, 0.12157, 0.00000),
(0.25098, 0.12549, 0.00000),
(0.25882, 0.12941, 0.00000),
(0.26667, 0.13333, 0.00000),
(0.27451, 0.13725, 0.00000),
(0.28235, 0.14118, 0.00000),
(0.29020, 0.14510, 0.00000),
(0.29804, 0.14902, 0.00000),
(0.30588, 0.15294, 0.00000),
(0.31373, 0.15686, 0.00000),
(0.32157, 0.16078, 0.00000),
(0.32941, 0.16471, 0.00000),
(0.33725, 0.16863, 0.00000),
(0.34510, 0.17255, 0.00000),
(0.35294, 0.17647, 0.00000),
(0.36078, 0.18039, 0.00000),
(0.36863, 0.18431, 0.00000),
(0.37647, 0.18824, 0.00000),
(0.38431, 0.19216, 0.00000),
(0.39216, 0.19608, 0.00000),
(0.40000, 0.20000, 0.00000),
(0.40784, 0.20392, 0.00000),
(0.41569, 0.20784, 0.00000),
(0.42353, 0.21176, 0.00000),
(0.43137, 0.21569, 0.00000),
(0.43922, 0.21961, 0.00000),
(0.44706, 0.22353, 0.00000),
(0.45490, 0.22745, 0.00000),
(0.46275, 0.23137, 0.00000),
(0.47059, 0.23529, 0.00000),
(0.47843, 0.23922, 0.00000),
(0.48627, 0.24314, 0.00000),
(0.49412, 0.24706, 0.00000),
(0.50196, 0.25098, 0.00000),
(0.50980, 0.25490, 0.00000),
(0.51765, 0.25882, 0.00000),
(0.52549, 0.26275, 0.00000),
(0.53333, 0.26667, 0.00000),
(0.54118, 0.27059, 0.00000),
(0.54902, 0.27451, 0.00000),
(0.55686, 0.27843, 0.00000),
(0.56471, 0.28235, 0.00000),
(0.57255, 0.28627, 0.00000),
(0.58039, 0.29020, 0.00000),
(0.58824, 0.29412, 0.00000),
(0.59608, 0.29804, 0.00000),
(0.60392, 0.30196, 0.00000),
(0.61176, 0.30588, 0.00000),
(0.61961, 0.30980, 0.00000),
(0.62745, 0.31373, 0.00000),
(0.63529, 0.31765, 0.00000),
(0.64314, 0.32157, 0.00000),
(0.65098, 0.32549, 0.00000),
(0.65882, 0.32941, 0.00000),
(0.66667, 0.33333, 0.00000),
(0.67451, 0.33725, 0.00000),
(0.68235, 0.34118, 0.00000),
(0.69020, 0.34510, 0.00000),
(0.69804, 0.34902, 0.00000),
(0.70588, 0.35294, 0.00000),
(0.71373, 0.35686, 0.00000),
(0.72157, 0.36078, 0.00000),
(0.72941, 0.36471, 0.00000),
(0.73725, 0.36863, 0.00000),
(0.74510, 0.37255, 0.00000),
(0.75294, 0.37647, 0.00000),
(0.76078, 0.38039, 0.00000),
(0.76863, 0.38431, 0.00000),
(0.77647, 0.38824, 0.00000),
(0.78431, 0.39216, 0.00000),
(0.79216, 0.39608, 0.00000),
(0.80000, 0.40000, 0.00000),
(0.80784, 0.40392, 0.00000),
(0.81569, 0.40784, 0.00000),
(0.82353, 0.41176, 0.00000),
(0.83137, 0.41569, 0.00000),
(0.83922, 0.41961, 0.00000),
(0.84706, 0.42353, 0.00000),
(0.85490, 0.42745, 0.00000),
(0.86275, 0.43137, 0.00000),
(0.87059, 0.43529, 0.00000),
(0.87843, 0.43922, 0.00000),
(0.88627, 0.44314, 0.00000),
(0.89412, 0.44706, 0.00000),
(0.90196, 0.45098, 0.00000),
(0.90980, 0.45490, 0.00000),
(0.91765, 0.45882, 0.00000),
(0.92549, 0.46275, 0.00000),
(0.93333, 0.46667, 0.00000),
(0.94118, 0.47059, 0.00000),
(0.94902, 0.47451, 0.00000),
(0.95686, 0.47843, 0.00000),
(0.96471, 0.48235, 0.00000),
(0.97255, 0.48627, 0.00000),
(0.98039, 0.49020, 0.00000),
(0.98824, 0.49412, 0.00000),
(0.99608, 0.49804, 0.00000),
(1.00000, 0.50196, 0.00000),
(1.00000, 0.50588, 0.00784),
(1.00000, 0.50980, 0.01569),
(1.00000, 0.51373, 0.02353),
(1.00000, 0.51765, 0.03137),
(1.00000, 0.52157, 0.03922),
(1.00000, 0.52549, 0.04706),
(1.00000, 0.52941, 0.05490),
(1.00000, 0.53333, 0.06275),
(1.00000, 0.53725, 0.07059),
(1.00000, 0.54118, 0.07843),
(1.00000, 0.54510, 0.08627),
(1.00000, 0.54902, 0.09412),
(1.00000, 0.55294, 0.10196),
(1.00000, 0.55686, 0.10980),
(1.00000, 0.56078, 0.11765),
(1.00000, 0.56471, 0.12549),
(1.00000, 0.56863, 0.13333),
(1.00000, 0.57255, 0.14118),
(1.00000, 0.57647, 0.14902),
(1.00000, 0.58039, 0.15686),
(1.00000, 0.58431, 0.16471),
(1.00000, 0.58824, 0.17255),
(1.00000, 0.59216, 0.18039),
(1.00000, 0.59608, 0.18824),
(1.00000, 0.60000, 0.19608),
(1.00000, 0.60392, 0.20392),
(1.00000, 0.60784, 0.21176),
(1.00000, 0.61176, 0.21961),
(1.00000, 0.61569, 0.22745),
(1.00000, 0.61961, 0.23529),
(1.00000, 0.62353, 0.24314),
(1.00000, 0.62745, 0.25098),
(1.00000, 0.63137, 0.25882),
(1.00000, 0.63529, 0.26667),
(1.00000, 0.63922, 0.27451),
(1.00000, 0.64314, 0.28235),
(1.00000, 0.64706, 0.29020),
(1.00000, 0.65098, 0.29804),
(1.00000, 0.65490, 0.30588),
(1.00000, 0.65882, 0.31373),
(1.00000, 0.66275, 0.32157),
(1.00000, 0.66667, 0.32941),
(1.00000, 0.67059, 0.33725),
(1.00000, 0.67451, 0.34510),
(1.00000, 0.67843, 0.35294),
(1.00000, 0.68235, 0.36078),
(1.00000, 0.68627, 0.36863),
(1.00000, 0.69020, 0.37647),
(1.00000, 0.69412, 0.38431),
(1.00000, 0.69804, 0.39216),
(1.00000, 0.70196, 0.40000),
(1.00000, 0.70588, 0.40784),
(1.00000, 0.70980, 0.41569),
(1.00000, 0.71373, 0.42353),
(1.00000, 0.71765, 0.43137),
(1.00000, 0.72157, 0.43922),
(1.00000, 0.72549, 0.44706),
(1.00000, 0.72941, 0.45490),
(1.00000, 0.73333, 0.46275),
(1.00000, 0.73725, 0.47059),
(1.00000, 0.74118, 0.47843),
(1.00000, 0.74510, 0.48627),
(1.00000, 0.74902, 0.49412),
(1.00000, 0.75294, 0.50196),
(1.00000, 0.75686, 0.50980),
(1.00000, 0.76078, 0.51765),
(1.00000, 0.76471, 0.52549),
(1.00000, 0.76863, 0.53333),
(1.00000, 0.77255, 0.54118),
(1.00000, 0.77647, 0.54902),
(1.00000, 0.78039, 0.55686),
(1.00000, 0.78431, 0.56471),
(1.00000, 0.78824, 0.57255),
(1.00000, 0.79216, 0.58039),
(1.00000, 0.79608, 0.58824),
(1.00000, 0.80000, 0.59608),
(1.00000, 0.80392, 0.60392),
(1.00000, 0.80784, 0.61176),
(1.00000, 0.81176, 0.61961),
(1.00000, 0.81569, 0.62745),
(1.00000, 0.81961, 0.63529),
(1.00000, 0.82353, 0.64314),
(1.00000, 0.82745, 0.65098),
(1.00000, 0.83137, 0.65882),
(1.00000, 0.83529, 0.66667),
(1.00000, 0.83922, 0.67451),
(1.00000, 0.84314, 0.68235),
(1.00000, 0.84706, 0.69020),
(1.00000, 0.85098, 0.69804),
(1.00000, 0.85490, 0.70588),
(1.00000, 0.85882, 0.71373),
(1.00000, 0.86275, 0.72157),
(1.00000, 0.86667, 0.72941),
(1.00000, 0.87059, 0.73725),
(1.00000, 0.87451, 0.74510),
(1.00000, 0.87843, 0.75294),
(1.00000, 0.88235, 0.76078),
(1.00000, 0.88627, 0.76863),
(1.00000, 0.89020, 0.77647),
(1.00000, 0.89412, 0.78431),
(1.00000, 0.89804, 0.79216),
(1.00000, 0.90196, 0.80000),
(1.00000, 0.90588, 0.80784),
(1.00000, 0.90980, 0.81569),
(1.00000, 0.91373, 0.82353),
(1.00000, 0.91765, 0.83137),
(1.00000, 0.92157, 0.83922),
(1.00000, 0.92549, 0.84706),
(1.00000, 0.92941, 0.85490),
(1.00000, 0.93333, 0.86275),
(1.00000, 0.93725, 0.87059),
(1.00000, 0.94118, 0.87843),
(1.00000, 0.94510, 0.88627),
(1.00000, 0.94902, 0.89412),
(1.00000, 0.95294, 0.90196),
(1.00000, 0.95686, 0.90980),
(1.00000, 0.96078, 0.91765),
(1.00000, 0.96471, 0.92549),
(1.00000, 0.96863, 0.93333),
(1.00000, 0.97255, 0.94118),
(1.00000, 0.97647, 0.94902),
(1.00000, 0.98039, 0.95686),
(1.00000, 0.98431, 0.96471),
(1.00000, 0.98824, 0.97255),
(1.00000, 0.99216, 0.98039),
(1.00000, 0.99608, 0.98824),
(1.00000, 1.00000, 0.99608),
(1.00000, 1.00000, 1.00000),
)
cmap_idl15 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.07059, 0.00392, 0.00392),
(0.14118, 0.00784, 0.01176),
(0.21176, 0.01176, 0.01961),
(0.28235, 0.01569, 0.02745),
(0.35294, 0.01961, 0.03529),
(0.42353, 0.02353, 0.04314),
(0.49804, 0.02745, 0.05098),
(0.56863, 0.03137, 0.05882),
(0.63922, 0.03529, 0.06667),
(0.70980, 0.03922, 0.07451),
(0.78039, 0.04314, 0.08235),
(0.85098, 0.04706, 0.09020),
(0.92157, 0.05098, 0.09804),
(0.99608, 0.05490, 0.10588),
(0.97647, 0.05882, 0.11373),
(0.95686, 0.06275, 0.12157),
(0.93725, 0.06667, 0.12941),
(0.91765, 0.07059, 0.13725),
(0.89804, 0.07451, 0.14510),
(0.87451, 0.07843, 0.15294),
(0.85490, 0.08235, 0.16078),
(0.83529, 0.08627, 0.16863),
(0.81569, 0.09020, 0.17647),
(0.79608, 0.09412, 0.18431),
(0.77255, 0.09804, 0.19216),
(0.75294, 0.10196, 0.20000),
(0.73333, 0.10588, 0.20784),
(0.71373, 0.10980, 0.21569),
(0.69412, 0.11373, 0.22353),
(0.67451, 0.11765, 0.23137),
(0.65098, 0.12157, 0.23922),
(0.63137, 0.12549, 0.24706),
(0.61176, 0.12941, 0.25490),
(0.59216, 0.13333, 0.26275),
(0.57255, 0.13725, 0.27059),
(0.54902, 0.14118, 0.27843),
(0.52941, 0.14510, 0.28627),
(0.50980, 0.14902, 0.29412),
(0.49020, 0.15294, 0.30196),
(0.47059, 0.15686, 0.30980),
(0.45098, 0.16078, 0.31765),
(0.42745, 0.16471, 0.32549),
(0.40784, 0.16863, 0.33333),
(0.38824, 0.17255, 0.34118),
(0.36863, 0.17647, 0.34902),
(0.34902, 0.18039, 0.35686),
(0.32549, 0.18431, 0.36471),
(0.30588, 0.18824, 0.37255),
(0.28627, 0.19216, 0.38039),
(0.26667, 0.19608, 0.38824),
(0.24706, 0.20000, 0.39608),
(0.22745, 0.20392, 0.40392),
(0.20392, 0.20784, 0.41176),
(0.18431, 0.21176, 0.41961),
(0.16471, 0.21569, 0.42745),
(0.14510, 0.21961, 0.43529),
(0.12549, 0.22353, 0.44314),
(0.10196, 0.22745, 0.45098),
(0.08235, 0.23137, 0.45882),
(0.06275, 0.23529, 0.46667),
(0.04314, 0.23922, 0.47451),
(0.02353, 0.24314, 0.48235),
(0.00000, 0.24706, 0.49020),
(0.25098, 0.25098, 0.49804),
(0.25490, 0.25490, 0.50588),
(0.25882, 0.25882, 0.51373),
(0.26275, 0.26275, 0.52157),
(0.26667, 0.26667, 0.52941),
(0.27059, 0.27059, 0.53725),
(0.27451, 0.27451, 0.54510),
(0.27843, 0.27843, 0.55294),
(0.28235, 0.28235, 0.56078),
(0.28627, 0.28627, 0.56863),
(0.29020, 0.29020, 0.57647),
(0.29412, 0.29412, 0.58431),
(0.29804, 0.29804, 0.59216),
(0.30196, 0.30196, 0.60000),
(0.30588, 0.30588, 0.60784),
(0.30980, 0.30980, 0.61569),
(0.31373, 0.31373, 0.62353),
(0.31765, 0.31765, 0.63137),
(0.32157, 0.32157, 0.63922),
(0.32549, 0.32549, 0.64706),
(0.32941, 0.32941, 0.65490),
(0.33333, 0.33333, 0.66275),
(0.33725, 0.33725, 0.67059),
(0.34118, 0.34118, 0.67843),
(0.34510, 0.34510, 0.68627),
(0.34902, 0.34902, 0.69412),
(0.35294, 0.35294, 0.70196),
(0.35686, 0.35686, 0.70980),
(0.36078, 0.36078, 0.71765),
(0.36471, 0.36471, 0.72549),
(0.36863, 0.36863, 0.73333),
(0.37255, 0.37255, 0.74118),
(0.37647, 0.37647, 0.74902),
(0.38039, 0.38039, 0.75686),
(0.38431, 0.38431, 0.76471),
(0.38824, 0.38824, 0.77255),
(0.39216, 0.39216, 0.78039),
(0.39608, 0.39608, 0.78824),
(0.40000, 0.40000, 0.79608),
(0.40392, 0.40392, 0.80392),
(0.40784, 0.40784, 0.81176),
(0.41176, 0.41176, 0.81961),
(0.41569, 0.41569, 0.82745),
(0.41961, 0.41961, 0.83529),
(0.42353, 0.42353, 0.84314),
(0.42745, 0.42745, 0.85098),
(0.43137, 0.43137, 0.85882),
(0.43529, 0.43529, 0.86667),
(0.43922, 0.43922, 0.87451),
(0.44314, 0.44314, 0.88235),
(0.44706, 0.44706, 0.89020),
(0.45098, 0.45098, 0.89804),
(0.45490, 0.45490, 0.90588),
(0.45882, 0.45882, 0.91373),
(0.46275, 0.46275, 0.92157),
(0.46667, 0.46667, 0.92941),
(0.47059, 0.47059, 0.93725),
(0.47451, 0.47451, 0.94510),
(0.47843, 0.47843, 0.95294),
(0.48235, 0.48235, 0.96078),
(0.48627, 0.48627, 0.96863),
(0.49020, 0.49020, 0.97647),
(0.49412, 0.49412, 0.98431),
(0.49804, 0.49804, 0.99216),
(0.50196, 0.50196, 1.00000),
(0.50588, 0.50588, 0.98431),
(0.50980, 0.50980, 0.96863),
(0.51373, 0.51373, 0.95294),
(0.51765, 0.51765, 0.93333),
(0.52157, 0.52157, 0.91765),
(0.52549, 0.52549, 0.90196),
(0.52941, 0.52941, 0.88627),
(0.53333, 0.53333, 0.86667),
(0.53725, 0.53725, 0.85098),
(0.54118, 0.54118, 0.83529),
(0.54510, 0.54510, 0.81961),
(0.54902, 0.54902, 0.80000),
(0.55294, 0.55294, 0.78431),
(0.55686, 0.55686, 0.76863),
(0.56078, 0.56078, 0.75294),
(0.56471, 0.56471, 0.73333),
(0.56863, 0.56863, 0.71765),
(0.57255, 0.57255, 0.70196),
(0.57647, 0.57647, 0.68627),
(0.58039, 0.58039, 0.66667),
(0.58431, 0.58431, 0.65098),
(0.58824, 0.58824, 0.63529),
(0.59216, 0.59216, 0.61961),
(0.59608, 0.59608, 0.60000),
(0.60000, 0.60000, 0.58431),
(0.60392, 0.60392, 0.56863),
(0.60784, 0.60784, 0.55294),
(0.61176, 0.61176, 0.53333),
(0.61569, 0.61569, 0.51765),
(0.61961, 0.61961, 0.50196),
(0.62353, 0.62353, 0.48627),
(0.62745, 0.62745, 0.46667),
(0.63137, 0.63137, 0.45098),
(0.63529, 0.63529, 0.43529),
(0.63922, 0.63922, 0.41961),
(0.64314, 0.64314, 0.40000),
(0.64706, 0.64706, 0.38431),
(0.65098, 0.65098, 0.36863),
(0.65490, 0.65490, 0.35294),
(0.65882, 0.65882, 0.33333),
(0.66275, 0.66275, 0.31765),
(0.66667, 0.66667, 0.30196),
(0.67059, 0.67059, 0.28627),
(0.67451, 0.67451, 0.26667),
(0.67843, 0.67843, 0.25098),
(0.68235, 0.68235, 0.23529),
(0.68627, 0.68627, 0.21961),
(0.69020, 0.69020, 0.20000),
(0.69412, 0.69412, 0.18431),
(0.69804, 0.69804, 0.16863),
(0.70196, 0.70196, 0.15294),
(0.70588, 0.70588, 0.13333),
(0.70980, 0.70980, 0.11765),
(0.71373, 0.71373, 0.10196),
(0.71765, 0.71765, 0.08627),
(0.72157, 0.72157, 0.06667),
(0.72549, 0.72549, 0.05098),
(0.72941, 0.72941, 0.03529),
(0.73333, 0.73333, 0.01961),
(0.73725, 0.73725, 0.00000),
(0.74118, 0.74118, 0.01176),
(0.74510, 0.74510, 0.02745),
(0.74902, 0.74902, 0.04314),
(0.75294, 0.75294, 0.05882),
(0.75686, 0.75686, 0.07451),
(0.76078, 0.76078, 0.08627),
(0.76471, 0.76471, 0.10196),
(0.76863, 0.76863, 0.11765),
(0.77255, 0.77255, 0.13333),
(0.77647, 0.77647, 0.14902),
(0.78039, 0.78039, 0.16078),
(0.78431, 0.78431, 0.17647),
(0.78824, 0.78824, 0.19216),
(0.79216, 0.79216, 0.20784),
(0.79608, 0.79608, 0.22353),
(0.80000, 0.80000, 0.23529),
(0.80392, 0.80392, 0.25098),
(0.80784, 0.80784, 0.26667),
(0.81176, 0.81176, 0.28235),
(0.81569, 0.81569, 0.29804),
(0.81961, 0.81961, 0.30980),
(0.82353, 0.82353, 0.32549),
(0.82745, 0.82745, 0.34118),
(0.83137, 0.83137, 0.35686),
(0.83529, 0.83529, 0.37255),
(0.83922, 0.83922, 0.38431),
(0.84314, 0.84314, 0.40000),
(0.84706, 0.84706, 0.41569),
(0.85098, 0.85098, 0.43137),
(0.85490, 0.85490, 0.44706),
(0.85882, 0.85882, 0.45882),
(0.86275, 0.86275, 0.47451),
(0.86667, 0.86667, 0.49020),
(0.87059, 0.87059, 0.50588),
(0.87451, 0.87451, 0.52157),
(0.87843, 0.87843, 0.53725),
(0.88235, 0.88235, 0.54902),
(0.88627, 0.88627, 0.56471),
(0.89020, 0.89020, 0.58039),
(0.89412, 0.89412, 0.59608),
(0.89804, 0.89804, 0.61176),
(0.90196, 0.90196, 0.62353),
(0.90588, 0.90588, 0.63922),
(0.90980, 0.90980, 0.65490),
(0.91373, 0.91373, 0.67059),
(0.91765, 0.91765, 0.68627),
(0.92157, 0.92157, 0.69804),
(0.92549, 0.92549, 0.71373),
(0.92941, 0.92941, 0.72941),
(0.93333, 0.93333, 0.74510),
(0.93725, 0.93725, 0.76078),
(0.94118, 0.94118, 0.77255),
(0.94510, 0.94510, 0.78824),
(0.94902, 0.94902, 0.80392),
(0.95294, 0.95294, 0.81961),
(0.95686, 0.95686, 0.83529),
(0.96078, 0.96078, 0.84706),
(0.96471, 0.96471, 0.86275),
(0.96863, 0.96863, 0.87843),
(0.97255, 0.97255, 0.89412),
(0.97647, 0.97647, 0.90980),
(0.98039, 0.98039, 0.92157),
(0.98431, 0.98431, 0.93725),
(0.98824, 0.98824, 0.95294),
(0.99216, 0.99216, 0.96863),
(0.99608, 0.99608, 0.98431),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow3 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.11765),
(0.00000, 0.00000, 0.15686),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.23922),
(0.00000, 0.00000, 0.27843),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.35686),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.43922),
(0.00000, 0.00000, 0.47843),
(0.00000, 0.00000, 0.51765),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.60000),
(0.00000, 0.00000, 0.63922),
(0.00000, 0.00000, 0.67843),
(0.00000, 0.00000, 0.71765),
(0.00000, 0.00000, 0.75686),
(0.00000, 0.00000, 0.80000),
(0.00000, 0.00000, 0.83922),
(0.00000, 0.00000, 0.87843),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.95686),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.28627, 1.00000),
(0.00000, 0.31765, 1.00000),
(0.00000, 0.34902, 1.00000),
(0.00000, 0.38039, 1.00000),
(0.00000, 0.41176, 1.00000),
(0.00000, 0.44314, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.49804, 1.00000),
(0.00000, 0.51765, 1.00000),
(0.00000, 0.53725, 1.00000),
(0.00000, 0.55686, 1.00000),
(0.00000, 0.58039, 1.00000),
(0.00000, 0.60000, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.63922, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.68235, 1.00000),
(0.00000, 0.70196, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.74118, 1.00000),
(0.00000, 0.76078, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.79608, 1.00000),
(0.00000, 0.81176, 1.00000),
(0.00000, 0.82745, 1.00000),
(0.00000, 0.83922, 1.00000),
(0.00000, 0.85490, 1.00000),
(0.00000, 0.87059, 1.00000),
(0.00000, 0.88235, 1.00000),
(0.00000, 0.89804, 1.00000),
(0.00000, 0.91373, 1.00000),
(0.00000, 0.92549, 1.00000),
(0.00000, 0.94118, 1.00000),
(0.00000, 0.95686, 1.00000),
(0.00000, 0.96863, 1.00000),
(0.00000, 0.98431, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.98039),
(0.00000, 1.00000, 0.96078),
(0.00000, 1.00000, 0.94118),
(0.00000, 1.00000, 0.92157),
(0.00000, 1.00000, 0.90196),
(0.00000, 1.00000, 0.88235),
(0.00000, 1.00000, 0.86275),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.82353),
(0.00000, 1.00000, 0.80392),
(0.00000, 1.00000, 0.78431),
(0.00000, 1.00000, 0.76471),
(0.00000, 1.00000, 0.74510),
(0.00000, 1.00000, 0.72549),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.61176),
(0.00000, 1.00000, 0.56471),
(0.00000, 1.00000, 0.51765),
(0.00000, 1.00000, 0.47059),
(0.00000, 1.00000, 0.42353),
(0.00000, 1.00000, 0.37647),
(0.00000, 1.00000, 0.32549),
(0.00000, 1.00000, 0.27843),
(0.00000, 1.00000, 0.23137),
(0.00000, 1.00000, 0.18431),
(0.00000, 1.00000, 0.13725),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.04314),
(0.00000, 1.00000, 0.00000),
(0.04706, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.14118, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.23529, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.32941, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.42353, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.51765, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.61176, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.72549, 1.00000, 0.00000),
(0.74510, 1.00000, 0.00000),
(0.76471, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.80392, 1.00000, 0.00000),
(0.82353, 1.00000, 0.00000),
(0.84314, 1.00000, 0.00000),
(0.86275, 1.00000, 0.00000),
(0.88235, 1.00000, 0.00000),
(0.90196, 1.00000, 0.00000),
(0.92157, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.96078, 1.00000, 0.00000),
(0.98039, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.98039, 0.00000),
(0.99608, 0.96078, 0.00000),
(0.99608, 0.94510, 0.00000),
(0.99608, 0.92549, 0.00000),
(0.99608, 0.90588, 0.00000),
(0.99216, 0.89020, 0.00000),
(0.99216, 0.87059, 0.00000),
(0.99216, 0.85098, 0.00000),
(0.99216, 0.83529, 0.00000),
(0.99216, 0.81569, 0.00000),
(0.98824, 0.79608, 0.00000),
(0.98824, 0.78039, 0.00000),
(0.98824, 0.76078, 0.00000),
(0.98824, 0.74118, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.70588, 0.00000),
(0.98824, 0.69020, 0.00000),
(0.98824, 0.67451, 0.00000),
(0.98824, 0.65490, 0.00000),
(0.99216, 0.63922, 0.00000),
(0.99216, 0.62353, 0.00000),
(0.99216, 0.60392, 0.00000),
(0.99216, 0.58824, 0.00000),
(0.99216, 0.57255, 0.00000),
(0.99608, 0.55294, 0.00000),
(0.99608, 0.53725, 0.00000),
(0.99608, 0.52157, 0.00000),
(0.99608, 0.50196, 0.00000),
(0.99608, 0.48627, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.43922, 0.00000),
(1.00000, 0.40784, 0.00000),
(1.00000, 0.37647, 0.00000),
(1.00000, 0.34510, 0.00000),
(1.00000, 0.31373, 0.00000),
(1.00000, 0.28235, 0.00000),
(1.00000, 0.25098, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.18431, 0.00000),
(1.00000, 0.15294, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.04706),
(1.00000, 0.00000, 0.09412),
(1.00000, 0.00000, 0.14118),
(1.00000, 0.00000, 0.18824),
(1.00000, 0.00000, 0.23529),
(1.00000, 0.00000, 0.28235),
(1.00000, 0.00000, 0.32941),
(1.00000, 0.00000, 0.37647),
(1.00000, 0.00000, 0.42353),
(1.00000, 0.00000, 0.47059),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.56471),
(1.00000, 0.00000, 0.61176),
(1.00000, 0.00000, 0.65882),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.72549),
(1.00000, 0.00000, 0.74902),
(1.00000, 0.00000, 0.77255),
(1.00000, 0.00000, 0.79608),
(1.00000, 0.00000, 0.81569),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.88627),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.92941),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.97647),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.03529, 1.00000),
(1.00000, 0.07059, 1.00000),
(1.00000, 0.10588, 1.00000),
(1.00000, 0.14118, 1.00000),
(1.00000, 0.18039, 1.00000),
(1.00000, 0.21569, 1.00000),
(1.00000, 0.25098, 1.00000),
(1.00000, 0.28627, 1.00000),
(1.00000, 0.32549, 1.00000),
(1.00000, 0.36078, 1.00000),
(1.00000, 0.39608, 1.00000),
(1.00000, 0.43137, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.48627, 1.00000),
(1.00000, 0.50588, 1.00000),
(1.00000, 0.52157, 1.00000),
(1.00000, 0.54118, 1.00000),
(1.00000, 0.56078, 1.00000),
(1.00000, 0.57647, 1.00000),
(1.00000, 0.59608, 1.00000),
(1.00000, 0.61176, 1.00000),
(1.00000, 0.63137, 1.00000),
(1.00000, 0.65098, 1.00000),
(1.00000, 0.66667, 1.00000),
(1.00000, 0.68627, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.74510, 1.00000),
(1.00000, 0.78824, 1.00000),
(1.00000, 0.83137, 1.00000),
(1.00000, 0.87059, 1.00000),
(1.00000, 0.91373, 1.00000),
(1.00000, 0.95686, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_ds9_a = (
(0.0, 0.0, 0.0), # noqa
(0.0, 0.011764705882352941, 0.0),
(0.0, 0.027450980392156862, 0.0),
(0.0, 0.043137254901960784, 0.0),
(0.0, 0.058823529411764705, 0.0),
(0.0, 0.078431372549019607, 0.0),
(0.0, 0.090196078431372548, 0.0),
(0.0, 0.10980392156862745, 0.0),
(0.0, 0.12156862745098039, 0.0),
(0.0, 0.14117647058823529, 0.0),
(0.0, 0.15294117647058825, 0.0),
(0.0, 0.17254901960784313, 0.0),
(0.0, 0.18431372549019609, 0.0),
(0.0, 0.20392156862745098, 0.0),
(0.0, 0.21568627450980393, 0.0),
(0.0, 0.23529411764705882, 0.0),
(0.0, 0.24705882352941178, 0.0),
(0.0, 0.26666666666666666, 0.0),
(0.0, 0.27843137254901962, 0.0),
(0.0, 0.29411764705882354, 0.0),
(0.0, 0.30980392156862746, 0.0),
(0.0, 0.32549019607843138, 0.0),
(0.0, 0.3411764705882353, 0.0),
(0.0, 0.35686274509803922, 0.0),
(0.0, 0.37254901960784315, 0.0),
(0.0, 0.38823529411764707, 0.0),
(0.0, 0.40392156862745099, 0.0),
(0.0, 0.41960784313725491, 0.0),
(0.0, 0.43529411764705883, 0.0),
(0.0, 0.45098039215686275, 0.0),
(0.0, 0.47058823529411764, 0.0),
(0.0, 0.4823529411764706, 0.0),
(0.0, 0.49803921568627452, 0.0),
(0.0, 0.51372549019607838, 0.0078431372549019607),
(0.0, 0.52941176470588236, 0.019607843137254902),
(0.0, 0.54509803921568623, 0.027450980392156862),
(0.0, 0.5607843137254902, 0.039215686274509803),
(0.0, 0.57647058823529407, 0.050980392156862744),
(0.0, 0.58823529411764708, 0.058823529411764705),
(0.0, 0.60784313725490191, 0.070588235294117646),
(0.0, 0.61960784313725492, 0.078431372549019607),
(0.0, 0.63921568627450975, 0.090196078431372548),
(0.0, 0.65098039215686276, 0.10196078431372549),
(0.0, 0.6705882352941176, 0.11372549019607843),
(0.0, 0.68235294117647061, 0.12156862745098039),
(0.0, 0.70196078431372544, 0.13333333333333333),
(0.0, 0.71372549019607845, 0.14117647058823529),
(0.0, 0.73333333333333328, 0.15294117647058825),
(0.0, 0.74509803921568629, 0.16470588235294117),
(0.0, 0.76470588235294112, 0.17647058823529413),
(0.0, 0.77647058823529413, 0.18431372549019609),
(0.0, 0.79607843137254897, 0.19607843137254902),
(0.0, 0.81176470588235294, 0.20784313725490197),
(0.0, 0.82745098039215681, 0.21568627450980393),
(0.0, 0.84313725490196079, 0.22745098039215686),
(0.0, 0.85490196078431369, 0.23529411764705882),
(0.0, 0.87450980392156863, 0.24705882352941178),
(0.0, 0.88627450980392153, 0.25882352941176473),
(0.0, 0.90588235294117647, 0.27058823529411763),
(0.0, 0.91764705882352937, 0.27843137254901962),
(0.0, 0.93725490196078431, 0.29019607843137257),
(0.0, 0.94901960784313721, 0.29803921568627451),
(0.0, 0.96862745098039216, 0.30980392156862746),
(0.0, 0.98039215686274506, 0.32156862745098042),
(0.0, 1.0, 0.33333333333333331),
(0.011764705882352941, 0.98431372549019602, 0.3411764705882353),
(0.027450980392156862, 0.96862745098039216, 0.35294117647058826),
(0.043137254901960784, 0.95294117647058818, 0.36078431372549019),
(0.058823529411764705, 0.93725490196078431, 0.37254901960784315),
(0.074509803921568626, 0.92156862745098034, 0.38039215686274508),
(0.090196078431372548, 0.90588235294117647, 0.39215686274509803),
(0.10588235294117647, 0.8901960784313725, 0.40392156862745099),
(0.12156862745098039, 0.87450980392156863, 0.41568627450980394),
(0.13725490196078433, 0.85882352941176465, 0.42352941176470588),
(0.15294117647058825, 0.84313725490196079, 0.43529411764705883),
(0.16862745098039217, 0.82745098039215681, 0.44313725490196076),
(0.18431372549019609, 0.81176470588235294, 0.45490196078431372),
(0.20000000000000001, 0.79607843137254897, 0.46666666666666667),
(0.21568627450980393, 0.7803921568627451, 0.47450980392156861),
(0.23137254901960785, 0.76470588235294112, 0.48627450980392156),
(0.24705882352941178, 0.74901960784313726, 0.49803921568627452),
(0.2627450980392157, 0.73333333333333328, 0.50980392156862742),
(0.27843137254901962, 0.71764705882352942, 0.51764705882352946),
(0.29411764705882354, 0.70196078431372544, 0.52941176470588236),
(0.30980392156862746, 0.68627450980392157, 0.53725490196078429),
(0.32549019607843138, 0.6705882352941176, 0.5490196078431373),
(0.3411764705882353, 0.65490196078431373, 0.5607843137254902),
(0.35686274509803922, 0.63921568627450975, 0.5725490196078431),
(0.37254901960784315, 0.62352941176470589, 0.58039215686274515),
(0.38823529411764707, 0.60784313725490191, 0.59215686274509804),
(0.40392156862745099, 0.59215686274509804, 0.59999999999999998),
(0.41960784313725491, 0.57647058823529407, 0.61176470588235299),
(0.43529411764705883, 0.5607843137254902, 0.62352941176470589),
(0.44705882352941179, 0.5490196078431373, 0.63137254901960782),
(0.46666666666666667, 0.52941176470588236, 0.64313725490196083),
(0.47843137254901963, 0.51764705882352946, 0.65098039215686276),
(0.49803921568627452, 0.49803921568627452, 0.66274509803921566),
(0.50980392156862742, 0.48627450980392156, 0.6705882352941176),
(0.52549019607843139, 0.47058823529411764, 0.68235294117647061),
(0.54117647058823526, 0.45490196078431372, 0.69411764705882351),
(0.55686274509803924, 0.4392156862745098, 0.70588235294117652),
(0.5725490196078431, 0.42352941176470588, 0.71372549019607845),
(0.58823529411764708, 0.40784313725490196, 0.72549019607843135),
(0.60392156862745094, 0.39215686274509803, 0.73725490196078436),
(0.62352941176470589, 0.37254901960784315, 0.74901960784313726),
(0.63529411764705879, 0.36078431372549019, 0.75686274509803919),
(0.65490196078431373, 0.3411764705882353, 0.7686274509803922),
(0.66666666666666663, 0.32941176470588235, 0.77647058823529413),
(0.68627450980392157, 0.30980392156862746, 0.78823529411764703),
(0.69803921568627447, 0.29803921568627451, 0.80000000000000004),
(0.71372549019607845, 0.28235294117647058, 0.80784313725490198),
(0.72941176470588232, 0.26666666666666666, 0.81960784313725488),
(0.74509803921568629, 0.25098039215686274, 0.82745098039215681),
(0.76078431372549016, 0.23529411764705882, 0.83921568627450982),
(0.77647058823529413, 0.2196078431372549, 0.85098039215686272),
(0.792156862745098, 0.20392156862745098, 0.86274509803921573),
(0.80784313725490198, 0.18823529411764706, 0.87058823529411766),
(0.82352941176470584, 0.17254901960784313, 0.88235294117647056),
(0.83921568627450982, 0.15686274509803921, 0.8901960784313725),
(0.85490196078431369, 0.14117647058823529, 0.90196078431372551),
(0.87058823529411766, 0.12549019607843137, 0.9137254901960784),
(0.88627450980392153, 0.10980392156862745, 0.92549019607843142),
(0.90196078431372551, 0.094117647058823528, 0.93333333333333335),
(0.91764705882352937, 0.078431372549019607, 0.94509803921568625),
(0.93333333333333335, 0.062745098039215685, 0.95294117647058818),
(0.94901960784313721, 0.047058823529411764, 0.96470588235294119),
(0.96470588235294119, 0.031372549019607843, 0.97647058823529409),
(0.98039215686274506, 0.015686274509803921, 0.9882352941176471),
(0.99607843137254903, 0.0, 0.99607843137254903),
(1.0, 0.0, 0.9882352941176471),
(1.0, 0.0, 0.97254901960784312),
(1.0, 0.0, 0.96078431372549022),
(1.0, 0.0, 0.94509803921568625),
(1.0, 0.0, 0.93333333333333335),
(1.0, 0.0, 0.91764705882352937),
(1.0, 0.0, 0.90196078431372551),
(1.0, 0.0, 0.88627450980392153),
(1.0, 0.0, 0.87450980392156863),
(1.0, 0.0, 0.85882352941176465),
(1.0, 0.0, 0.84705882352941175),
(1.0, 0.0, 0.83137254901960789),
(1.0, 0.0, 0.81960784313725488),
(1.0, 0.0, 0.80392156862745101),
(1.0, 0.0, 0.792156862745098),
(1.0, 0.0, 0.77647058823529413),
(1.0, 0.0, 0.76470588235294112),
(1.0, 0.0, 0.74901960784313726),
(1.0, 0.0, 0.73725490196078436),
(1.0, 0.0, 0.72156862745098038),
(1.0, 0.0, 0.70980392156862748),
(1.0, 0.0, 0.69411764705882351),
(1.0, 0.0, 0.67843137254901964),
(1.0, 0.0, 0.66666666666666663),
(1.0, 0.0, 0.65098039215686276),
(1.0, 0.0, 0.63921568627450975),
(1.0, 0.0, 0.62352941176470589),
(1.0, 0.0, 0.61176470588235299),
(1.0, 0.0, 0.59607843137254901),
(1.0, 0.0, 0.58431372549019611),
(1.0, 0.0, 0.56862745098039214),
(1.0, 0.0, 0.55686274509803924),
(1.0, 0.0, 0.54117647058823526),
(1.0, 0.0, 0.52549019607843139),
(1.0, 0.0, 0.50980392156862742),
(1.0, 0.0, 0.49803921568627452),
(1.0, 0.0, 0.48627450980392156),
(1.0, 0.0, 0.47058823529411764),
(1.0, 0.0, 0.45490196078431372),
(1.0, 0.0, 0.4392156862745098),
(1.0, 0.0, 0.42745098039215684),
(1.0, 0.0, 0.40784313725490196),
(1.0, 0.0, 0.396078431372549),
(1.0, 0.0, 0.38039215686274508),
(1.0, 0.0, 0.36470588235294116),
(1.0, 0.0, 0.34901960784313724),
(1.0, 0.0, 0.33333333333333331),
(1.0, 0.0, 0.31764705882352939),
(1.0, 0.0, 0.30588235294117649),
(1.0, 0.0, 0.28627450980392155),
(1.0, 0.0, 0.27450980392156865),
(1.0, 0.0, 0.25882352941176473),
(1.0, 0.0, 0.24313725490196078),
(1.0, 0.0, 0.22745098039215686),
(1.0, 0.0, 0.21568627450980393),
(1.0, 0.0, 0.20000000000000001),
(1.0, 0.0, 0.18431372549019609),
(1.0, 0.0, 0.17254901960784313),
(1.0, 0.0, 0.15294117647058825),
(1.0, 0.0, 0.14117647058823529),
(1.0, 0.0, 0.12549019607843137),
(1.0, 0.0, 0.10980392156862745),
(1.0, 0.0, 0.094117647058823528),
(1.0, 0.0, 0.082352941176470587),
(1.0, 0.0, 0.062745098039215685),
(1.0, 0.0, 0.050980392156862744),
(1.0, 0.0, 0.035294117647058823),
(1.0, 0.0, 0.019607843137254902),
(1.0, 0.0, 0.0039215686274509803),
(1.0, 0.0078431372549019607, 0.0),
(1.0, 0.027450980392156862, 0.0),
(1.0, 0.043137254901960784, 0.0),
(1.0, 0.058823529411764705, 0.0),
(1.0, 0.078431372549019607, 0.0),
(1.0, 0.090196078431372548, 0.0),
(1.0, 0.10980392156862745, 0.0),
(1.0, 0.12549019607843137, 0.0),
(1.0, 0.14509803921568629, 0.0),
(1.0, 0.16078431372549021, 0.0),
(1.0, 0.1803921568627451, 0.0),
(1.0, 0.19215686274509805, 0.0),
(1.0, 0.21176470588235294, 0.0),
(1.0, 0.22745098039215686, 0.0),
(1.0, 0.24705882352941178, 0.0),
(1.0, 0.2627450980392157, 0.0),
(1.0, 0.28235294117647058, 0.0),
(1.0, 0.29411764705882354, 0.0),
(1.0, 0.31372549019607843, 0.0),
(1.0, 0.32941176470588235, 0.0),
(1.0, 0.34901960784313724, 0.0),
(1.0, 0.36470588235294116, 0.0),
(1.0, 0.37647058823529411, 0.0),
(1.0, 0.396078431372549, 0.0),
(1.0, 0.41568627450980394, 0.0),
(1.0, 0.43137254901960786, 0.0),
(1.0, 0.44705882352941179, 0.0),
(1.0, 0.46666666666666667, 0.0),
(1.0, 0.4823529411764706, 0.0),
(1.0, 0.50196078431372548, 0.0),
(1.0, 0.51764705882352946, 0.0),
(1.0, 0.53333333333333333, 0.0),
(1.0, 0.5490196078431373, 0.0),
(1.0, 0.56862745098039214, 0.0),
(1.0, 0.58431372549019611, 0.0),
(1.0, 0.60392156862745094, 0.0),
(1.0, 0.61960784313725492, 0.0),
(1.0, 0.63529411764705879, 0.0),
(1.0, 0.65098039215686276, 0.0),
(1.0, 0.6705882352941176, 0.0),
(1.0, 0.68627450980392157, 0.0),
(1.0, 0.70196078431372544, 0.0),
(1.0, 0.72156862745098038, 0.0),
(1.0, 0.73333333333333328, 0.0),
(1.0, 0.75294117647058822, 0.0),
(1.0, 0.7686274509803922, 0.0),
(1.0, 0.78823529411764703, 0.0),
(1.0, 0.80392156862745101, 0.0),
(1.0, 0.82352941176470584, 0.0),
(1.0, 0.83921568627450982, 0.0),
(1.0, 0.85490196078431369, 0.0),
(1.0, 0.87450980392156863, 0.0),
(1.0, 0.8901960784313725, 0.0),
(1.0, 0.90588235294117647, 0.0),
(1.0, 0.92549019607843142, 0.0),
(1.0, 0.94117647058823528, 0.0),
(1.0, 0.96078431372549022, 0.0),
(1.0, 0.97647058823529409, 0.0),
)
cmap_ds9_b = (
(0.0, 0.0, 0.0), # noqa
(0.0, 0.0, 0.011764705882352941),
(0.0, 0.0, 0.027450980392156862),
(0.0, 0.0, 0.043137254901960784),
(0.0, 0.0, 0.058823529411764705),
(0.0, 0.0, 0.078431372549019607),
(0.0, 0.0, 0.090196078431372548),
(0.0, 0.0, 0.10980392156862745),
(0.0, 0.0, 0.12156862745098039),
(0.0, 0.0, 0.14117647058823529),
(0.0, 0.0, 0.15294117647058825),
(0.0, 0.0, 0.17254901960784313),
(0.0, 0.0, 0.18431372549019609),
(0.0, 0.0, 0.20392156862745098),
(0.0, 0.0, 0.21568627450980393),
(0.0, 0.0, 0.23529411764705882),
(0.0, 0.0, 0.24705882352941178),
(0.0, 0.0, 0.26666666666666666),
(0.0, 0.0, 0.27843137254901962),
(0.0, 0.0, 0.29411764705882354),
(0.0, 0.0, 0.30980392156862746),
(0.0, 0.0, 0.32549019607843138),
(0.0, 0.0, 0.3411764705882353),
(0.0, 0.0, 0.35686274509803922),
(0.0, 0.0, 0.37254901960784315),
(0.0, 0.0, 0.38823529411764707),
(0.0, 0.0, 0.40392156862745099),
(0.0, 0.0, 0.41960784313725491),
(0.0, 0.0, 0.43529411764705883),
(0.0, 0.0, 0.45098039215686275),
(0.0, 0.0, 0.47058823529411764),
(0.0, 0.0, 0.4823529411764706),
(0.0, 0.0, 0.49803921568627452),
(0.0, 0.0, 0.51372549019607838),
(0.0, 0.0, 0.52941176470588236),
(0.0, 0.0, 0.54509803921568623),
(0.0, 0.0, 0.5607843137254902),
(0.0, 0.0, 0.57647058823529407),
(0.0, 0.0, 0.58823529411764708),
(0.0, 0.0, 0.60784313725490191),
(0.0, 0.0, 0.61960784313725492),
(0.0, 0.0, 0.63921568627450975),
(0.0, 0.0, 0.65098039215686276),
(0.0, 0.0, 0.6705882352941176),
(0.0, 0.0, 0.68235294117647061),
(0.0, 0.0, 0.70196078431372544),
(0.0, 0.0, 0.71372549019607845),
(0.0, 0.0, 0.73333333333333328),
(0.0, 0.0, 0.74509803921568629),
(0.0, 0.0, 0.76470588235294112),
(0.0, 0.0, 0.77647058823529413),
(0.0, 0.0, 0.79607843137254897),
(0.0, 0.0, 0.81176470588235294),
(0.0, 0.0, 0.82745098039215681),
(0.0, 0.0, 0.84313725490196079),
(0.0, 0.0, 0.85490196078431369),
(0.0, 0.0, 0.87450980392156863),
(0.0, 0.0, 0.88627450980392153),
(0.0, 0.0, 0.90588235294117647),
(0.0, 0.0, 0.91764705882352937),
(0.0, 0.0, 0.93725490196078431),
(0.0, 0.0, 0.94901960784313721),
(0.0, 0.0, 0.96862745098039216),
(0.0, 0.0, 0.98039215686274506),
(0.0, 0.0, 1.0),
(0.011764705882352941, 0.0, 0.98431372549019602),
(0.027450980392156862, 0.0, 0.96862745098039216),
(0.043137254901960784, 0.0, 0.95294117647058818),
(0.058823529411764705, 0.0, 0.93725490196078431),
(0.074509803921568626, 0.0, 0.92156862745098034),
(0.090196078431372548, 0.0, 0.90588235294117647),
(0.10588235294117647, 0.0, 0.8901960784313725),
(0.12156862745098039, 0.0, 0.87450980392156863),
(0.13725490196078433, 0.0, 0.85882352941176465),
(0.15294117647058825, 0.0, 0.84313725490196079),
(0.16862745098039217, 0.0, 0.82745098039215681),
(0.18431372549019609, 0.0, 0.81176470588235294),
(0.20000000000000001, 0.0, 0.79607843137254897),
(0.21568627450980393, 0.0, 0.7803921568627451),
(0.23137254901960785, 0.0, 0.76470588235294112),
(0.24705882352941178, 0.0, 0.74901960784313726),
(0.2627450980392157, 0.0, 0.73333333333333328),
(0.27843137254901962, 0.0, 0.71764705882352942),
(0.29411764705882354, 0.0, 0.70196078431372544),
(0.30980392156862746, 0.0, 0.68627450980392157),
(0.32549019607843138, 0.0, 0.6705882352941176),
(0.3411764705882353, 0.0, 0.65490196078431373),
(0.35686274509803922, 0.0, 0.63921568627450975),
(0.37254901960784315, 0.0, 0.62352941176470589),
(0.38823529411764707, 0.0, 0.60784313725490191),
(0.40392156862745099, 0.0, 0.59215686274509804),
(0.41960784313725491, 0.0, 0.57647058823529407),
(0.43529411764705883, 0.0, 0.5607843137254902),
(0.44705882352941179, 0.0, 0.5490196078431373),
(0.46666666666666667, 0.0, 0.52941176470588236),
(0.47843137254901963, 0.0, 0.51764705882352946),
(0.49803921568627452, 0.0, 0.49803921568627452),
(0.50980392156862742, 0.0, 0.48627450980392156),
(0.52549019607843139, 0.0, 0.47058823529411764),
(0.54117647058823526, 0.0, 0.45490196078431372),
(0.55686274509803924, 0.0, 0.4392156862745098),
(0.5725490196078431, 0.0, 0.42352941176470588),
(0.58823529411764708, 0.0, 0.40784313725490196),
(0.60392156862745094, 0.0, 0.39215686274509803),
(0.62352941176470589, 0.0, 0.37254901960784315),
(0.63529411764705879, 0.0, 0.36078431372549019),
(0.65490196078431373, 0.0, 0.3411764705882353),
(0.66666666666666663, 0.0, 0.32941176470588235),
(0.68627450980392157, 0.0, 0.30980392156862746),
(0.69803921568627447, 0.0, 0.29803921568627451),
(0.71372549019607845, 0.0, 0.28235294117647058),
(0.72941176470588232, 0.0, 0.26666666666666666),
(0.74509803921568629, 0.0, 0.25098039215686274),
(0.76078431372549016, 0.0, 0.23529411764705882),
(0.77647058823529413, 0.0, 0.2196078431372549),
(0.792156862745098, 0.0, 0.20392156862745098),
(0.80784313725490198, 0.0, 0.18823529411764706),
(0.82352941176470584, 0.0, 0.17254901960784313),
(0.83921568627450982, 0.0, 0.15686274509803921),
(0.85490196078431369, 0.0, 0.14117647058823529),
(0.87058823529411766, 0.0, 0.12549019607843137),
(0.88627450980392153, 0.0, 0.10980392156862745),
(0.90196078431372551, 0.0, 0.094117647058823528),
(0.91764705882352937, 0.0, 0.078431372549019607),
(0.93333333333333335, 0.0, 0.062745098039215685),
(0.94901960784313721, 0.0, 0.047058823529411764),
(0.96470588235294119, 0.0, 0.031372549019607843),
(0.98039215686274506, 0.0, 0.015686274509803921),
(0.99607843137254903, 0.0, 0.0),
(1.0, 0.0078431372549019607, 0.0),
(1.0, 0.027450980392156862, 0.0),
(1.0, 0.039215686274509803, 0.0),
(1.0, 0.058823529411764705, 0.0),
(1.0, 0.070588235294117646, 0.0),
(1.0, 0.090196078431372548, 0.0),
(1.0, 0.10196078431372549, 0.0),
(1.0, 0.12156862745098039, 0.0),
(1.0, 0.13333333333333333, 0.0),
(1.0, 0.15294117647058825, 0.0),
(1.0, 0.16470588235294117, 0.0),
(1.0, 0.18431372549019609, 0.0),
(1.0, 0.19607843137254902, 0.0),
(1.0, 0.21568627450980393, 0.0),
(1.0, 0.22745098039215686, 0.0),
(1.0, 0.24705882352941178, 0.0),
(1.0, 0.25882352941176473, 0.0),
(1.0, 0.27843137254901962, 0.0),
(1.0, 0.29019607843137257, 0.0),
(1.0, 0.30588235294117649, 0.0),
(1.0, 0.32156862745098042, 0.0),
(1.0, 0.33725490196078434, 0.0),
(1.0, 0.35686274509803922, 0.0),
(1.0, 0.36862745098039218, 0.0),
(1.0, 0.38823529411764707, 0.0),
(1.0, 0.40000000000000002, 0.0),
(1.0, 0.41960784313725491, 0.0),
(1.0, 0.43137254901960786, 0.0),
(1.0, 0.45098039215686275, 0.0),
(1.0, 0.46274509803921571, 0.0),
(1.0, 0.4823529411764706, 0.0),
(1.0, 0.49411764705882355, 0.0),
(1.0, 0.50980392156862742, 0.0),
(1.0, 0.52549019607843139, 0.0),
(1.0, 0.54117647058823526, 0.0),
(1.0, 0.55686274509803924, 0.0),
(1.0, 0.56862745098039214, 0.0),
(1.0, 0.58823529411764708, 0.0),
(1.0, 0.59999999999999998, 0.0),
(1.0, 0.61960784313725492, 0.0),
(1.0, 0.63137254901960782, 0.0),
(1.0, 0.65098039215686276, 0.0),
(1.0, 0.66274509803921566, 0.0),
(1.0, 0.68235294117647061, 0.0),
(1.0, 0.69411764705882351, 0.0),
(1.0, 0.71372549019607845, 0.0),
(1.0, 0.72941176470588232, 0.0),
(1.0, 0.74509803921568629, 0.0),
(1.0, 0.76078431372549016, 0.0),
(1.0, 0.77647058823529413, 0.0),
(1.0, 0.792156862745098, 0.0),
(1.0, 0.80784313725490198, 0.0),
(1.0, 0.82352941176470584, 0.0),
(1.0, 0.83921568627450982, 0.0),
(1.0, 0.85490196078431369, 0.0),
(1.0, 0.8666666666666667, 0.0),
(1.0, 0.88627450980392153, 0.0),
(1.0, 0.89803921568627454, 0.0),
(1.0, 0.91764705882352937, 0.0),
(1.0, 0.92941176470588238, 0.0),
(1.0, 0.94901960784313721, 0.0),
(1.0, 0.96078431372549022, 0.0),
(1.0, 0.98039215686274506, 0.0),
(1.0, 0.99215686274509807, 0.0),
(1.0, 1.0, 0.0078431372549019607),
(1.0, 1.0, 0.023529411764705882),
(1.0, 1.0, 0.039215686274509803),
(1.0, 1.0, 0.054901960784313725),
(1.0, 1.0, 0.070588235294117646),
(1.0, 1.0, 0.086274509803921567),
(1.0, 1.0, 0.10196078431372549),
(1.0, 1.0, 0.11764705882352941),
(1.0, 1.0, 0.13333333333333333),
(1.0, 1.0, 0.14901960784313725),
(1.0, 1.0, 0.16470588235294117),
(1.0, 1.0, 0.1803921568627451),
(1.0, 1.0, 0.19607843137254902),
(1.0, 1.0, 0.21176470588235294),
(1.0, 1.0, 0.22745098039215686),
(1.0, 1.0, 0.24313725490196078),
(1.0, 1.0, 0.25882352941176473),
(1.0, 1.0, 0.27450980392156865),
(1.0, 1.0, 0.29019607843137257),
(1.0, 1.0, 0.30588235294117649),
(1.0, 1.0, 0.32156862745098042),
(1.0, 1.0, 0.33725490196078434),
(1.0, 1.0, 0.35294117647058826),
(1.0, 1.0, 0.36862745098039218),
(1.0, 1.0, 0.3843137254901961),
(1.0, 1.0, 0.40000000000000002),
(1.0, 1.0, 0.41568627450980394),
(1.0, 1.0, 0.42745098039215684),
(1.0, 1.0, 0.44705882352941179),
(1.0, 1.0, 0.46274509803921571),
(1.0, 1.0, 0.47843137254901963),
(1.0, 1.0, 0.49411764705882355),
(1.0, 1.0, 0.50980392156862742),
(1.0, 1.0, 0.52156862745098043),
(1.0, 1.0, 0.54117647058823526),
(1.0, 1.0, 0.55294117647058827),
(1.0, 1.0, 0.5725490196078431),
(1.0, 1.0, 0.58431372549019611),
(1.0, 1.0, 0.60392156862745094),
(1.0, 1.0, 0.61568627450980395),
(1.0, 1.0, 0.63529411764705879),
(1.0, 1.0, 0.6470588235294118),
(1.0, 1.0, 0.66666666666666663),
(1.0, 1.0, 0.67843137254901964),
(1.0, 1.0, 0.69803921568627447),
(1.0, 1.0, 0.70980392156862748),
(1.0, 1.0, 0.72549019607843135),
(1.0, 1.0, 0.74117647058823533),
(1.0, 1.0, 0.75686274509803919),
(1.0, 1.0, 0.77254901960784317),
(1.0, 1.0, 0.78823529411764703),
(1.0, 1.0, 0.80392156862745101),
(1.0, 1.0, 0.81960784313725488),
(1.0, 1.0, 0.83529411764705885),
(1.0, 1.0, 0.85098039215686272),
(1.0, 1.0, 0.8666666666666667),
(1.0, 1.0, 0.88235294117647056),
(1.0, 1.0, 0.90196078431372551),
(1.0, 1.0, 0.9137254901960784),
(1.0, 1.0, 0.93333333333333335),
(1.0, 1.0, 0.94509803921568625),
(1.0, 1.0, 0.96470588235294119),
(1.0, 1.0, 0.97647058823529409),
)
cmap_ds9_bb = (
(0.0, 0.0, 0.0), # noqa
(0.0039215686274509803, 0.0, 0.0),
(0.011764705882352941, 0.0, 0.0),
(0.019607843137254902, 0.0, 0.0),
(0.027450980392156862, 0.0, 0.0),
(0.039215686274509803, 0.0, 0.0),
(0.043137254901960784, 0.0, 0.0),
(0.054901960784313725, 0.0, 0.0),
(0.058823529411764705, 0.0, 0.0),
(0.070588235294117646, 0.0, 0.0),
(0.074509803921568626, 0.0, 0.0),
(0.086274509803921567, 0.0, 0.0),
(0.090196078431372548, 0.0, 0.0),
(0.10196078431372549, 0.0, 0.0),
(0.10588235294117647, 0.0, 0.0),
(0.11764705882352941, 0.0, 0.0),
(0.12156862745098039, 0.0, 0.0),
(0.13333333333333333, 0.0, 0.0),
(0.13725490196078433, 0.0, 0.0),
(0.14509803921568629, 0.0, 0.0),
(0.15294117647058825, 0.0, 0.0),
(0.16078431372549021, 0.0, 0.0),
(0.16862745098039217, 0.0, 0.0),
(0.17647058823529413, 0.0, 0.0),
(0.18431372549019609, 0.0, 0.0),
(0.19215686274509805, 0.0, 0.0),
(0.20000000000000001, 0.0, 0.0),
(0.20784313725490197, 0.0, 0.0),
(0.21568627450980393, 0.0, 0.0),
(0.22352941176470589, 0.0, 0.0),
(0.23529411764705882, 0.0, 0.0),
(0.23921568627450981, 0.0, 0.0),
(0.24705882352941178, 0.0, 0.0),
(0.25490196078431371, 0.0, 0.0),
(0.2627450980392157, 0.0, 0.0),
(0.27058823529411763, 0.0, 0.0),
(0.27843137254901962, 0.0, 0.0),
(0.28627450980392155, 0.0, 0.0),
(0.29411764705882354, 0.0, 0.0),
(0.30196078431372547, 0.0, 0.0),
(0.30980392156862746, 0.0, 0.0),
(0.31764705882352939, 0.0, 0.0),
(0.32549019607843138, 0.0, 0.0),
(0.33333333333333331, 0.0, 0.0),
(0.3411764705882353, 0.0, 0.0),
(0.34901960784313724, 0.0, 0.0),
(0.35686274509803922, 0.0, 0.0),
(0.36470588235294116, 0.0, 0.0),
(0.37254901960784315, 0.0, 0.0),
(0.38039215686274508, 0.0, 0.0),
(0.38823529411764707, 0.0, 0.0),
(0.396078431372549, 0.0, 0.0),
(0.40392156862745099, 0.0, 0.0),
(0.41176470588235292, 0.0, 0.0),
(0.41960784313725491, 0.0, 0.0),
(0.42745098039215684, 0.0, 0.0),
(0.43529411764705883, 0.0, 0.0),
(0.44313725490196076, 0.0, 0.0),
(0.45098039215686275, 0.0, 0.0),
(0.45882352941176469, 0.0, 0.0),
(0.46666666666666667, 0.0, 0.0),
(0.47450980392156861, 0.0, 0.0),
(0.4823529411764706, 0.0, 0.0),
(0.49019607843137253, 0.0, 0.0),
(0.49803921568627452, 0.0, 0.0),
(0.50588235294117645, 0.0039215686274509803, 0.0),
(0.51372549019607838, 0.011764705882352941, 0.0),
(0.52156862745098043, 0.019607843137254902, 0.0),
(0.52941176470588236, 0.027450980392156862, 0.0),
(0.53725490196078429, 0.035294117647058823, 0.0),
(0.54509803921568623, 0.043137254901960784, 0.0),
(0.55294117647058827, 0.050980392156862744, 0.0),
(0.5607843137254902, 0.058823529411764705, 0.0),
(0.56862745098039214, 0.066666666666666666, 0.0),
(0.57647058823529407, 0.074509803921568626, 0.0),
(0.58431372549019611, 0.082352941176470587, 0.0),
(0.59215686274509804, 0.090196078431372548, 0.0),
(0.59999999999999998, 0.098039215686274508, 0.0),
(0.60784313725490191, 0.10588235294117647, 0.0),
(0.61568627450980395, 0.11372549019607843, 0.0),
(0.62352941176470589, 0.12156862745098039, 0.0),
(0.63137254901960782, 0.12941176470588237, 0.0),
(0.63921568627450975, 0.13725490196078433, 0.0),
(0.6470588235294118, 0.14509803921568629, 0.0),
(0.65490196078431373, 0.15294117647058825, 0.0),
(0.66274509803921566, 0.16078431372549021, 0.0),
(0.6705882352941176, 0.16862745098039217, 0.0),
(0.67843137254901964, 0.17647058823529413, 0.0),
(0.68627450980392157, 0.18431372549019609, 0.0),
(0.69411764705882351, 0.19215686274509805, 0.0),
(0.70196078431372544, 0.20000000000000001, 0.0),
(0.70980392156862748, 0.20784313725490197, 0.0),
(0.71764705882352942, 0.21568627450980393, 0.0),
(0.72156862745098038, 0.22352941176470589, 0.0),
(0.73333333333333328, 0.23137254901960785, 0.0),
(0.73725490196078436, 0.23921568627450981, 0.0),
(0.74901960784313726, 0.24705882352941178, 0.0),
(0.75294117647058822, 0.25490196078431371, 0.0),
(0.76078431372549016, 0.2627450980392157, 0.0),
(0.7686274509803922, 0.27058823529411763, 0.0),
(0.77647058823529413, 0.27843137254901962, 0.0),
(0.78431372549019607, 0.28627450980392155, 0.0),
(0.792156862745098, 0.29411764705882354, 0.0),
(0.80000000000000004, 0.30196078431372547, 0.0),
(0.81176470588235294, 0.30980392156862746, 0.0),
(0.81568627450980391, 0.31764705882352939, 0.0),
(0.82745098039215681, 0.32549019607843138, 0.0),
(0.83137254901960789, 0.33333333333333331, 0.0),
(0.84313725490196079, 0.3411764705882353, 0.0),
(0.84705882352941175, 0.34901960784313724, 0.0),
(0.85490196078431369, 0.35686274509803922, 0.0),
(0.86274509803921573, 0.36470588235294116, 0.0),
(0.87058823529411766, 0.37254901960784315, 0.0),
(0.8784313725490196, 0.38039215686274508, 0.0),
(0.88627450980392153, 0.38823529411764707, 0.0),
(0.89411764705882357, 0.396078431372549, 0.0),
(0.90196078431372551, 0.40392156862745099, 0.0),
(0.90980392156862744, 0.41176470588235292, 0.0),
(0.91764705882352937, 0.41960784313725491, 0.0),
(0.92549019607843142, 0.42745098039215684, 0.0),
(0.93333333333333335, 0.43529411764705883, 0.0),
(0.94117647058823528, 0.44313725490196076, 0.0),
(0.94901960784313721, 0.45098039215686275, 0.0),
(0.95686274509803926, 0.45882352941176469, 0.0),
(0.96470588235294119, 0.46666666666666667, 0.0),
(0.97254901960784312, 0.47450980392156861, 0.0),
(0.98039215686274506, 0.4823529411764706, 0.0),
(0.9882352941176471, 0.49019607843137253, 0.0),
(0.99607843137254903, 0.49803921568627452, 0.0),
(1.0, 0.50196078431372548, 0.0039215686274509803),
(1.0, 0.51372549019607838, 0.011764705882352941),
(1.0, 0.51764705882352946, 0.019607843137254902),
(1.0, 0.52941176470588236, 0.027450980392156862),
(1.0, 0.53333333333333333, 0.035294117647058823),
(1.0, 0.54509803921568623, 0.043137254901960784),
(1.0, 0.5490196078431373, 0.050980392156862744),
(1.0, 0.5607843137254902, 0.058823529411764705),
(1.0, 0.56470588235294117, 0.066666666666666666),
(1.0, 0.57647058823529407, 0.074509803921568626),
(1.0, 0.58039215686274515, 0.082352941176470587),
(1.0, 0.59215686274509804, 0.090196078431372548),
(1.0, 0.59607843137254901, 0.098039215686274508),
(1.0, 0.60784313725490191, 0.10588235294117647),
(1.0, 0.61176470588235299, 0.11372549019607843),
(1.0, 0.62352941176470589, 0.12156862745098039),
(1.0, 0.62745098039215685, 0.12941176470588237),
(1.0, 0.63921568627450975, 0.13725490196078433),
(1.0, 0.64313725490196083, 0.14509803921568629),
(1.0, 0.65098039215686276, 0.15294117647058825),
(1.0, 0.6588235294117647, 0.16078431372549021),
(1.0, 0.66666666666666663, 0.16862745098039217),
(1.0, 0.67843137254901964, 0.17647058823529413),
(1.0, 0.68235294117647061, 0.18431372549019609),
(1.0, 0.69411764705882351, 0.19215686274509805),
(1.0, 0.69803921568627447, 0.20000000000000001),
(1.0, 0.70980392156862748, 0.20784313725490197),
(1.0, 0.71372549019607845, 0.21568627450980393),
(1.0, 0.72549019607843135, 0.22352941176470589),
(1.0, 0.72941176470588232, 0.23137254901960785),
(1.0, 0.74117647058823533, 0.23921568627450981),
(1.0, 0.74509803921568629, 0.24705882352941178),
(1.0, 0.75294117647058822, 0.25490196078431371),
(1.0, 0.76078431372549016, 0.2627450980392157),
(1.0, 0.7686274509803922, 0.27058823529411763),
(1.0, 0.77647058823529413, 0.27843137254901962),
(1.0, 0.78431372549019607, 0.28235294117647058),
(1.0, 0.792156862745098, 0.29411764705882354),
(1.0, 0.80000000000000004, 0.29803921568627451),
(1.0, 0.80784313725490198, 0.30980392156862746),
(1.0, 0.81568627450980391, 0.31372549019607843),
(1.0, 0.82352941176470584, 0.32549019607843138),
(1.0, 0.83137254901960789, 0.32941176470588235),
(1.0, 0.83921568627450982, 0.3411764705882353),
(1.0, 0.84705882352941175, 0.34509803921568627),
(1.0, 0.85490196078431369, 0.35686274509803922),
(1.0, 0.86274509803921573, 0.36470588235294116),
(1.0, 0.87058823529411766, 0.37254901960784315),
(1.0, 0.8784313725490196, 0.38039215686274508),
(1.0, 0.88627450980392153, 0.38823529411764707),
(1.0, 0.89411764705882357, 0.396078431372549),
(1.0, 0.90196078431372551, 0.40392156862745099),
(1.0, 0.90980392156862744, 0.41176470588235292),
(1.0, 0.91764705882352937, 0.41960784313725491),
(1.0, 0.92549019607843142, 0.42745098039215684),
(1.0, 0.93333333333333335, 0.43137254901960786),
(1.0, 0.94117647058823528, 0.44313725490196076),
(1.0, 0.94901960784313721, 0.44705882352941179),
(1.0, 0.95686274509803926, 0.45882352941176469),
(1.0, 0.96470588235294119, 0.46274509803921571),
(1.0, 0.97254901960784312, 0.47450980392156861),
(1.0, 0.98039215686274506, 0.47843137254901963),
(1.0, 0.9882352941176471, 0.49019607843137253),
(1.0, 0.99607843137254903, 0.49411764705882355),
(1.0, 1.0, 0.50196078431372548),
(1.0, 1.0, 0.50980392156862742),
(1.0, 1.0, 0.51764705882352946),
(1.0, 1.0, 0.52549019607843139),
(1.0, 1.0, 0.53333333333333333),
(1.0, 1.0, 0.54117647058823526),
(1.0, 1.0, 0.5490196078431373),
(1.0, 1.0, 0.55686274509803924),
(1.0, 1.0, 0.56470588235294117),
(1.0, 1.0, 0.5725490196078431),
(1.0, 1.0, 0.58039215686274515),
(1.0, 1.0, 0.58823529411764708),
(1.0, 1.0, 0.59607843137254901),
(1.0, 1.0, 0.60392156862745094),
(1.0, 1.0, 0.61176470588235299),
(1.0, 1.0, 0.61960784313725492),
(1.0, 1.0, 0.62745098039215685),
(1.0, 1.0, 0.63529411764705879),
(1.0, 1.0, 0.64313725490196083),
(1.0, 1.0, 0.65098039215686276),
(1.0, 1.0, 0.6588235294117647),
(1.0, 1.0, 0.66666666666666663),
(1.0, 1.0, 0.67450980392156867),
(1.0, 1.0, 0.68235294117647061),
(1.0, 1.0, 0.69019607843137254),
(1.0, 1.0, 0.69803921568627447),
(1.0, 1.0, 0.70588235294117652),
(1.0, 1.0, 0.71372549019607845),
(1.0, 1.0, 0.72156862745098038),
(1.0, 1.0, 0.72941176470588232),
(1.0, 1.0, 0.73725490196078436),
(1.0, 1.0, 0.74509803921568629),
(1.0, 1.0, 0.75294117647058822),
(1.0, 1.0, 0.76078431372549016),
(1.0, 1.0, 0.7686274509803922),
(1.0, 1.0, 0.77647058823529413),
(1.0, 1.0, 0.78431372549019607),
(1.0, 1.0, 0.792156862745098),
(1.0, 1.0, 0.80000000000000004),
(1.0, 1.0, 0.80784313725490198),
(1.0, 1.0, 0.81568627450980391),
(1.0, 1.0, 0.82352941176470584),
(1.0, 1.0, 0.83137254901960789),
(1.0, 1.0, 0.83921568627450982),
(1.0, 1.0, 0.84705882352941175),
(1.0, 1.0, 0.85490196078431369),
(1.0, 1.0, 0.86274509803921573),
(1.0, 1.0, 0.87058823529411766),
(1.0, 1.0, 0.8784313725490196),
(1.0, 1.0, 0.88627450980392153),
(1.0, 1.0, 0.89411764705882357),
(1.0, 1.0, 0.90196078431372551),
(1.0, 1.0, 0.90980392156862744),
(1.0, 1.0, 0.91764705882352937),
(1.0, 1.0, 0.92549019607843142),
(1.0, 1.0, 0.93333333333333335),
(1.0, 1.0, 0.94117647058823528),
(1.0, 1.0, 0.94901960784313721),
(1.0, 1.0, 0.95686274509803926),
(1.0, 1.0, 0.96470588235294119),
(1.0, 1.0, 0.97254901960784312),
(1.0, 1.0, 0.98039215686274506),
(1.0, 1.0, 0.9882352941176471),
)
cmap_ds9_cool = (
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0039215686274509803),
(0.0, 0.0, 0.011764705882352941),
(0.0, 0.0, 0.019607843137254902),
(0.0, 0.0, 0.027450980392156862),
(0.0, 0.0, 0.035294117647058823),
(0.0, 0.0, 0.043137254901960784),
(0.0, 0.0, 0.050980392156862744),
(0.0, 0.0, 0.058823529411764705),
(0.0, 0.0, 0.066666666666666666),
(0.0, 0.0, 0.070588235294117646),
(0.0, 0.0, 0.078431372549019607),
(0.0, 0.0, 0.086274509803921567),
(0.0, 0.0, 0.094117647058823528),
(0.0, 0.0, 0.10196078431372549),
(0.0, 0.0, 0.10980392156862745),
(0.0, 0.0, 0.11764705882352941),
(0.0, 0.0, 0.12549019607843137),
(0.0, 0.0, 0.12941176470588237),
(0.0, 0.0, 0.13725490196078433),
(0.0, 0.0, 0.14509803921568629),
(0.0, 0.0, 0.15294117647058825),
(0.0, 0.0, 0.16078431372549021),
(0.0, 0.0, 0.16470588235294117),
(0.0, 0.0, 0.17647058823529413),
(0.0, 0.0, 0.1803921568627451),
(0.0, 0.0, 0.18823529411764706),
(0.0, 0.0, 0.19607843137254902),
(0.0, 0.0, 0.20392156862745098),
(0.0, 0.0, 0.21176470588235294),
(0.0, 0.0, 0.2196078431372549),
(0.0, 0.0, 0.22745098039215686),
(0.0, 0.0, 0.23529411764705882),
(0.0, 0.0, 0.24313725490196078),
(0.0, 0.0, 0.25098039215686274),
(0.0, 0.0, 0.25490196078431371),
(0.0, 0.0, 0.2627450980392157),
(0.0, 0.0, 0.27058823529411763),
(0.0, 0.0, 0.27843137254901962),
(0.0, 0.0, 0.28627450980392155),
(0.0, 0.0, 0.29019607843137257),
(0.0, 0.0, 0.30196078431372547),
(0.0, 0.0, 0.30588235294117649),
(0.0, 0.0, 0.31372549019607843),
(0.0, 0.0, 0.32156862745098042),
(0.0, 0.0, 0.32941176470588235),
(0.0, 0.0, 0.33725490196078434),
(0.0, 0.0, 0.34509803921568627),
(0.0, 0.0, 0.34901960784313724),
(0.0, 0.0, 0.36078431372549019),
(0.0, 0.0, 0.36470588235294116),
(0.0, 0.0, 0.37254901960784315),
(0.0, 0.0, 0.38039215686274508),
(0.0, 0.0, 0.38823529411764707),
(0.0, 0.0, 0.396078431372549),
(0.0, 0.0, 0.40392156862745099),
(0.0, 0.0, 0.41176470588235292),
(0.0, 0.0, 0.41568627450980394),
(0.0, 0.0078431372549019607, 0.42745098039215684),
(0.0, 0.011764705882352941, 0.43137254901960786),
(0.0, 0.015686274509803921, 0.4392156862745098),
(0.0, 0.023529411764705882, 0.44705882352941179),
(0.0, 0.027450980392156862, 0.45490196078431372),
(0.0, 0.031372549019607843, 0.46274509803921571),
(0.0, 0.039215686274509803, 0.47058823529411764),
(0.0, 0.043137254901960784, 0.47450980392156861),
(0.0, 0.050980392156862744, 0.48627450980392156),
(0.0, 0.054901960784313725, 0.49019607843137253),
(0.0, 0.058823529411764705, 0.49803921568627452),
(0.0, 0.062745098039215685, 0.50588235294117645),
(0.0, 0.070588235294117646, 0.51372549019607838),
(0.0, 0.074509803921568626, 0.52156862745098043),
(0.0, 0.082352941176470587, 0.52941176470588236),
(0.0, 0.086274509803921567, 0.53333333333333333),
(0.0, 0.090196078431372548, 0.54117647058823526),
(0.0, 0.094117647058823528, 0.5490196078431373),
(0.0, 0.10196078431372549, 0.55686274509803924),
(0.0, 0.10588235294117647, 0.56470588235294117),
(0.0, 0.11372549019607843, 0.5725490196078431),
(0.0039215686274509803, 0.11764705882352941, 0.58039215686274515),
(0.0039215686274509803, 0.12156862745098039, 0.58823529411764708),
(0.0039215686274509803, 0.12941176470588237, 0.59607843137254901),
(0.0039215686274509803, 0.13333333333333333, 0.59999999999999998),
(0.0039215686274509803, 0.13725490196078433, 0.60784313725490191),
(0.0078431372549019607, 0.14509803921568629, 0.61568627450980395),
(0.0078431372549019607, 0.14901960784313725, 0.62352941176470589),
(0.0078431372549019607, 0.15294117647058825, 0.63137254901960782),
(0.0078431372549019607, 0.16078431372549021, 0.63921568627450975),
(0.0078431372549019607, 0.16470588235294117, 0.6470588235294118),
(0.011764705882352941, 0.16862745098039217, 0.65490196078431373),
(0.011764705882352941, 0.17647058823529413, 0.6588235294117647),
(0.011764705882352941, 0.1803921568627451, 0.6705882352941176),
(0.011764705882352941, 0.18431372549019609, 0.67450980392156867),
(0.011764705882352941, 0.19215686274509805, 0.68235294117647061),
(0.015686274509803921, 0.19607843137254902, 0.69019607843137254),
(0.015686274509803921, 0.20000000000000001, 0.69803921568627447),
(0.015686274509803921, 0.20784313725490197, 0.70588235294117652),
(0.015686274509803921, 0.21176470588235294, 0.70980392156862748),
(0.019607843137254902, 0.21568627450980393, 0.71764705882352942),
(0.019607843137254902, 0.22352941176470589, 0.72549019607843135),
(0.019607843137254902, 0.22745098039215686, 0.73333333333333328),
(0.019607843137254902, 0.23137254901960785, 0.74117647058823533),
(0.019607843137254902, 0.23921568627450981, 0.74901960784313726),
(0.023529411764705882, 0.24313725490196078, 0.75686274509803919),
(0.023529411764705882, 0.25098039215686274, 0.76470588235294112),
(0.023529411764705882, 0.25490196078431371, 0.77254901960784317),
(0.023529411764705882, 0.25882352941176473, 0.7803921568627451),
(0.023529411764705882, 0.26666666666666666, 0.78431372549019607),
(0.027450980392156862, 0.27058823529411763, 0.792156862745098),
(0.027450980392156862, 0.27450980392156865, 0.80000000000000004),
(0.027450980392156862, 0.27843137254901962, 0.80784313725490198),
(0.027450980392156862, 0.28627450980392155, 0.81568627450980391),
(0.027450980392156862, 0.29019607843137257, 0.82352941176470584),
(0.031372549019607843, 0.29803921568627451, 0.83137254901960789),
(0.031372549019607843, 0.30196078431372547, 0.83529411764705885),
(0.031372549019607843, 0.30588235294117649, 0.84313725490196079),
(0.031372549019607843, 0.30980392156862746, 0.85098039215686272),
(0.035294117647058823, 0.31764705882352939, 0.85882352941176465),
(0.035294117647058823, 0.32156862745098042, 0.8666666666666667),
(0.035294117647058823, 0.32941176470588235, 0.87450980392156863),
(0.035294117647058823, 0.33333333333333331, 0.88235294117647056),
(0.035294117647058823, 0.33725490196078434, 0.8901960784313725),
(0.039215686274509803, 0.34509803921568627, 0.89411764705882357),
(0.039215686274509803, 0.34901960784313724, 0.90196078431372551),
(0.039215686274509803, 0.35294117647058826, 0.90980392156862744),
(0.039215686274509803, 0.36078431372549019, 0.91764705882352937),
(0.039215686274509803, 0.36470588235294116, 0.92549019607843142),
(0.043137254901960784, 0.36862745098039218, 0.93333333333333335),
(0.043137254901960784, 0.37647058823529411, 0.94117647058823528),
(0.043137254901960784, 0.38039215686274508, 0.94509803921568625),
(0.043137254901960784, 0.38823529411764707, 0.95686274509803926),
(0.043137254901960784, 0.39215686274509803, 0.96078431372549022),
(0.047058823529411764, 0.396078431372549, 0.96862745098039216),
(0.047058823529411764, 0.40000000000000002, 0.97647058823529409),
(0.047058823529411764, 0.40784313725490196, 0.98431372549019602),
(0.047058823529411764, 0.41176470588235292, 0.99215686274509807),
(0.050980392156862744, 0.41960784313725491, 1.0),
(0.050980392156862744, 0.42352941176470588, 1.0),
(0.050980392156862744, 0.42745098039215684, 1.0),
(0.050980392156862744, 0.43137254901960786, 1.0),
(0.050980392156862744, 0.4392156862745098, 1.0),
(0.054901960784313725, 0.44313725490196076, 1.0),
(0.054901960784313725, 0.45098039215686275, 1.0),
(0.054901960784313725, 0.45490196078431372, 1.0),
(0.054901960784313725, 0.45882352941176469, 1.0),
(0.054901960784313725, 0.46666666666666667, 1.0),
(0.058823529411764705, 0.47058823529411764, 1.0),
(0.058823529411764705, 0.47450980392156861, 1.0),
(0.058823529411764705, 0.4823529411764706, 1.0),
(0.058823529411764705, 0.48627450980392156, 1.0),
(0.062745098039215685, 0.49019607843137253, 1.0),
(0.062745098039215685, 0.49803921568627452, 1.0),
(0.062745098039215685, 0.50196078431372548, 1.0),
(0.062745098039215685, 0.50588235294117645, 1.0),
(0.062745098039215685, 0.51372549019607838, 1.0),
(0.066666666666666666, 0.51764705882352946, 1.0),
(0.066666666666666666, 0.52156862745098043, 1.0),
(0.066666666666666666, 0.52941176470588236, 1.0),
(0.066666666666666666, 0.53333333333333333, 1.0),
(0.066666666666666666, 0.54117647058823526, 1.0),
(0.070588235294117646, 0.54509803921568623, 1.0),
(0.070588235294117646, 0.5490196078431373, 1.0),
(0.070588235294117646, 0.55294117647058827, 1.0),
(0.070588235294117646, 0.5607843137254902, 1.0),
(0.070588235294117646, 0.56470588235294117, 1.0),
(0.074509803921568626, 0.56862745098039214, 1.0),
(0.074509803921568626, 0.57647058823529407, 1.0),
(0.074509803921568626, 0.58039215686274515, 1.0),
(0.074509803921568626, 0.58431372549019611, 1.0),
(0.078431372549019607, 0.59215686274509804, 1.0),
(0.078431372549019607, 0.59607843137254901, 1.0),
(0.078431372549019607, 0.59999999999999998, 1.0),
(0.078431372549019607, 0.60784313725490191, 1.0),
(0.078431372549019607, 0.61176470588235299, 1.0),
(0.082352941176470587, 0.61960784313725492, 1.0),
(0.082352941176470587, 0.62352941176470589, 1.0),
(0.082352941176470587, 0.62745098039215685, 1.0),
(0.082352941176470587, 0.63529411764705879, 1.0),
(0.082352941176470587, 0.63921568627450975, 1.0),
(0.086274509803921567, 0.64313725490196083, 1.0),
(0.086274509803921567, 0.65098039215686276, 1.0),
(0.086274509803921567, 0.65490196078431373, 1.0),
(0.086274509803921567, 0.66274509803921566, 1.0),
(0.086274509803921567, 0.66666666666666663, 1.0),
(0.090196078431372548, 0.6705882352941176, 1.0),
(0.090196078431372548, 0.67450980392156867, 1.0),
(0.090196078431372548, 0.68235294117647061, 1.0),
(0.090196078431372548, 0.68627450980392157, 1.0),
(0.094117647058823528, 0.69019607843137254, 1.0),
(0.094117647058823528, 0.69803921568627447, 1.0),
(0.094117647058823528, 0.70196078431372544, 1.0),
(0.094117647058823528, 0.70588235294117652, 1.0),
(0.094117647058823528, 0.71372549019607845, 1.0),
(0.098039215686274508, 0.71764705882352942, 1.0),
(0.098039215686274508, 0.72156862745098038, 1.0),
(0.10196078431372549, 0.72941176470588232, 1.0),
(0.11372549019607843, 0.73333333333333328, 1.0),
(0.12941176470588237, 0.74117647058823533, 1.0),
(0.14117647058823529, 0.74509803921568629, 1.0),
(0.16078431372549021, 0.74901960784313726, 1.0),
(0.17254901960784313, 0.75686274509803919, 1.0),
(0.18823529411764706, 0.76078431372549016, 1.0),
(0.20392156862745098, 0.76470588235294112, 1.0),
(0.21568627450980393, 0.7686274509803922, 1.0),
(0.23137254901960785, 0.77647058823529413, 1.0),
(0.24705882352941178, 0.7803921568627451, 1.0),
(0.2627450980392157, 0.78823529411764703, 1.0),
(0.27450980392156865, 0.792156862745098, 1.0),
(0.29019607843137257, 0.79607843137254897, 1.0),
(0.30588235294117649, 0.80392156862745101, 1.0),
(0.32156862745098042, 0.80784313725490198, 1.0),
(0.33333333333333331, 0.81176470588235294, 1.0),
(0.34901960784313724, 0.81960784313725488, 1.0),
(0.36078431372549019, 0.82352941176470584, 1.0),
(0.38039215686274508, 0.82745098039215681, 1.0),
(0.39215686274509803, 0.83529411764705885, 1.0),
(0.40784313725490196, 0.83921568627450982, 1.0),
(0.41960784313725491, 0.84313725490196079, 1.0),
(0.4392156862745098, 0.85098039215686272, 1.0),
(0.45098039215686275, 0.85490196078431369, 1.0),
(0.46274509803921571, 0.85882352941176465, 1.0),
(0.47843137254901963, 0.8666666666666667, 1.0),
(0.49411764705882355, 0.87058823529411766, 1.0),
(0.50980392156862742, 0.87450980392156863, 1.0),
(0.52549019607843139, 0.88235294117647056, 1.0),
(0.54117647058823526, 0.88627450980392153, 1.0),
(0.55294117647058827, 0.8901960784313725, 1.0),
(0.56862745098039214, 0.89803921568627454, 1.0),
(0.58431372549019611, 0.90196078431372551, 1.0),
(0.59999999999999998, 0.90980392156862744, 1.0),
(0.61176470588235299, 0.9137254901960784, 1.0),
(0.62745098039215685, 0.91764705882352937, 1.0),
(0.63921568627450975, 0.92156862745098034, 1.0),
(0.6588235294117647, 0.92941176470588238, 1.0),
(0.6705882352941176, 0.93333333333333335, 1.0),
(0.68627450980392157, 0.94117647058823528, 1.0),
(0.69803921568627447, 0.94509803921568625, 1.0),
(0.71764705882352942, 0.94901960784313721, 1.0),
(0.72941176470588232, 0.95686274509803926, 1.0),
(0.74117647058823533, 0.96078431372549022, 1.0),
(0.75686274509803919, 0.96470588235294119, 1.0),
(0.77254901960784317, 0.96862745098039216, 1.0),
(0.78823529411764703, 0.97647058823529409, 1.0),
(0.80000000000000004, 0.98039215686274506, 1.0),
(0.81568627450980391, 0.9882352941176471, 1.0),
(0.83137254901960789, 0.99215686274509807, 1.0),
(0.84705882352941175, 0.99607843137254903, 1.0),
(0.85882352941176465, 1.0, 1.0),
(0.87450980392156863, 1.0, 1.0),
(0.8901960784313725, 1.0, 1.0),
(0.90588235294117647, 1.0, 1.0),
(0.91764705882352937, 1.0, 1.0),
(0.93725490196078431, 1.0, 1.0),
(0.94901960784313721, 1.0, 1.0),
(0.96470588235294119, 1.0, 1.0),
(0.97647058823529409, 1.0, 1.0),
)
cmap_ds9_he = (
(0.015686274509803921, 0.0, 0.0039215686274509803), # noqa
(0.12941176470588237, 0.0, 0.031372549019607843),
(0.25882352941176473, 0.0, 0.062745098039215685),
(0.38823529411764707, 0.0, 0.094117647058823528),
(0.49803921568627452, 0.0, 0.13333333333333333),
(0.49803921568627452, 0.0, 0.20784313725490197),
(0.49803921568627452, 0.0, 0.2627450980392157),
(0.49803921568627452, 0.0, 0.33725490196078434),
(0.49803921568627452, 0.0, 0.38039215686274508),
(0.49803921568627452, 0.0, 0.41176470588235292),
(0.49803921568627452, 0.0, 0.4392156862745098),
(0.49803921568627452, 0.0, 0.47058823529411764),
(0.49803921568627452, 0.0, 0.49411764705882355),
(0.49803921568627452, 0.0, 0.52549019607843139),
(0.49803921568627452, 0.0, 0.5490196078431373),
(0.49803921568627452, 0.0, 0.58039215686274515),
(0.49803921568627452, 0.0, 0.60392156862745094),
(0.49803921568627452, 0.015686274509803921, 0.61960784313725492),
(0.49803921568627452, 0.043137254901960784, 0.61176470588235299),
(0.49803921568627452, 0.070588235294117646, 0.60392156862745094),
(0.49803921568627452, 0.10588235294117647, 0.59607843137254901),
(0.49803921568627452, 0.13725490196078433, 0.58823529411764708),
(0.49803921568627452, 0.17254901960784313, 0.58039215686274515),
(0.49803921568627452, 0.20000000000000001, 0.5725490196078431),
(0.49803921568627452, 0.23921568627450981, 0.56470588235294117),
(0.49803921568627452, 0.26666666666666666, 0.55686274509803924),
(0.49803921568627452, 0.30196078431372547, 0.5490196078431373),
(0.49803921568627452, 0.33333333333333331, 0.54117647058823526),
(0.49803921568627452, 0.36862745098039218, 0.53333333333333333),
(0.49803921568627452, 0.40000000000000002, 0.52549019607843139),
(0.49803921568627452, 0.43529411764705883, 0.51764705882352946),
(0.49803921568627452, 0.46666666666666667, 0.50980392156862742),
(0.49803921568627452, 0.49803921568627452, 0.50196078431372548),
(0.49803921568627452, 0.50588235294117645, 0.49411764705882355),
(0.49803921568627452, 0.51372549019607838, 0.48627450980392156),
(0.49803921568627452, 0.52156862745098043, 0.47843137254901963),
(0.49803921568627452, 0.52941176470588236, 0.47058823529411764),
(0.49803921568627452, 0.53725490196078429, 0.46274509803921571),
(0.49803921568627452, 0.54509803921568623, 0.45490196078431372),
(0.49803921568627452, 0.55294117647058827, 0.44705882352941179),
(0.49803921568627452, 0.5607843137254902, 0.4392156862745098),
(0.49803921568627452, 0.56862745098039214, 0.43137254901960786),
(0.49803921568627452, 0.57647058823529407, 0.42352941176470588),
(0.49803921568627452, 0.58431372549019611, 0.41568627450980394),
(0.49803921568627452, 0.59215686274509804, 0.40784313725490196),
(0.49803921568627452, 0.59999999999999998, 0.40000000000000002),
(0.49803921568627452, 0.60784313725490191, 0.39215686274509803),
(0.49803921568627452, 0.61568627450980395, 0.3843137254901961),
(0.49803921568627452, 0.62352941176470589, 0.37647058823529411),
(0.49803921568627452, 0.63137254901960782, 0.36862745098039218),
(0.49803921568627452, 0.63921568627450975, 0.36078431372549019),
(0.49803921568627452, 0.6470588235294118, 0.34901960784313724),
(0.49803921568627452, 0.65490196078431373, 0.3411764705882353),
(0.49803921568627452, 0.66274509803921566, 0.33333333333333331),
(0.49803921568627452, 0.6705882352941176, 0.32549019607843138),
(0.49803921568627452, 0.67843137254901964, 0.32156862745098042),
(0.49803921568627452, 0.68627450980392157, 0.30980392156862746),
(0.49803921568627452, 0.69411764705882351, 0.30588235294117649),
(0.49803921568627452, 0.70196078431372544, 0.29411764705882354),
(0.49803921568627452, 0.70980392156862748, 0.29019607843137257),
(0.49803921568627452, 0.71764705882352942, 0.27843137254901962),
(0.49803921568627452, 0.72549019607843135, 0.27450980392156865),
(0.49803921568627452, 0.73333333333333328, 0.2627450980392157),
(0.49803921568627452, 0.74117647058823533, 0.25882352941176473),
(0.49803921568627452, 0.74901960784313726, 0.24705882352941178),
(0.50196078431372548, 0.74901960784313726, 0.25098039215686274),
(0.50588235294117645, 0.74901960784313726, 0.25490196078431371),
(0.50980392156862742, 0.74901960784313726, 0.25882352941176473),
(0.51372549019607838, 0.75294117647058822, 0.2627450980392157),
(0.51764705882352946, 0.75294117647058822, 0.26666666666666666),
(0.52156862745098043, 0.75294117647058822, 0.27058823529411763),
(0.52549019607843139, 0.75294117647058822, 0.27450980392156865),
(0.52941176470588236, 0.75686274509803919, 0.27843137254901962),
(0.53333333333333333, 0.75686274509803919, 0.28235294117647058),
(0.53725490196078429, 0.75686274509803919, 0.28627450980392155),
(0.54117647058823526, 0.75686274509803919, 0.29019607843137257),
(0.54509803921568623, 0.76078431372549016, 0.29411764705882354),
(0.5490196078431373, 0.76078431372549016, 0.29803921568627451),
(0.55294117647058827, 0.76078431372549016, 0.30196078431372547),
(0.55686274509803924, 0.76078431372549016, 0.30588235294117649),
(0.5607843137254902, 0.76470588235294112, 0.30980392156862746),
(0.56470588235294117, 0.76470588235294112, 0.31372549019607843),
(0.56862745098039214, 0.76470588235294112, 0.31764705882352939),
(0.5725490196078431, 0.76470588235294112, 0.32156862745098042),
(0.57647058823529407, 0.7686274509803922, 0.32549019607843138),
(0.58039215686274515, 0.7686274509803922, 0.32941176470588235),
(0.58431372549019611, 0.7686274509803922, 0.33333333333333331),
(0.58823529411764708, 0.7686274509803922, 0.33725490196078434),
(0.59215686274509804, 0.7686274509803922, 0.3411764705882353),
(0.59607843137254901, 0.77254901960784317, 0.34509803921568627),
(0.59999999999999998, 0.77254901960784317, 0.34901960784313724),
(0.60392156862745094, 0.77254901960784317, 0.35294117647058826),
(0.60784313725490191, 0.77254901960784317, 0.35686274509803922),
(0.61176470588235299, 0.77647058823529413, 0.36078431372549019),
(0.61568627450980395, 0.77647058823529413, 0.36470588235294116),
(0.61960784313725492, 0.77647058823529413, 0.36862745098039218),
(0.62352941176470589, 0.77647058823529413, 0.37254901960784315),
(0.62745098039215685, 0.7803921568627451, 0.37647058823529411),
(0.63137254901960782, 0.7803921568627451, 0.38039215686274508),
(0.63529411764705879, 0.7803921568627451, 0.3843137254901961),
(0.63921568627450975, 0.7803921568627451, 0.38823529411764707),
(0.64313725490196083, 0.78431372549019607, 0.39215686274509803),
(0.6470588235294118, 0.78431372549019607, 0.396078431372549),
(0.65098039215686276, 0.78431372549019607, 0.40000000000000002),
(0.65490196078431373, 0.78431372549019607, 0.40392156862745099),
(0.6588235294117647, 0.78823529411764703, 0.40784313725490196),
(0.66274509803921566, 0.78823529411764703, 0.41176470588235292),
(0.66666666666666663, 0.78823529411764703, 0.41568627450980394),
(0.6705882352941176, 0.78823529411764703, 0.41960784313725491),
(0.67450980392156867, 0.78823529411764703, 0.42352941176470588),
(0.67843137254901964, 0.792156862745098, 0.42745098039215684),
(0.68235294117647061, 0.792156862745098, 0.43137254901960786),
(0.68627450980392157, 0.792156862745098, 0.43529411764705883),
(0.69019607843137254, 0.792156862745098, 0.4392156862745098),
(0.69411764705882351, 0.79607843137254897, 0.44313725490196076),
(0.69803921568627447, 0.79607843137254897, 0.44705882352941179),
(0.70196078431372544, 0.79607843137254897, 0.45098039215686275),
(0.70588235294117652, 0.79607843137254897, 0.45490196078431372),
(0.70980392156862748, 0.80000000000000004, 0.45882352941176469),
(0.71372549019607845, 0.80000000000000004, 0.46274509803921571),
(0.71764705882352942, 0.80000000000000004, 0.46666666666666667),
(0.72156862745098038, 0.80000000000000004, 0.47058823529411764),
(0.72549019607843135, 0.80392156862745101, 0.47450980392156861),
(0.72941176470588232, 0.80392156862745101, 0.47843137254901963),
(0.73333333333333328, 0.80392156862745101, 0.4823529411764706),
(0.73725490196078436, 0.80392156862745101, 0.48627450980392156),
(0.74117647058823533, 0.80784313725490198, 0.49019607843137253),
(0.74509803921568629, 0.80784313725490198, 0.49411764705882355),
(0.74901960784313726, 0.80784313725490198, 0.49803921568627452),
(0.74901960784313726, 0.80784313725490198, 0.50196078431372548),
(0.75294117647058822, 0.81176470588235294, 0.50588235294117645),
(0.75294117647058822, 0.81176470588235294, 0.50980392156862742),
(0.75686274509803919, 0.81568627450980391, 0.51372549019607838),
(0.75686274509803919, 0.81568627450980391, 0.51764705882352946),
(0.76078431372549016, 0.81568627450980391, 0.52156862745098043),
(0.76078431372549016, 0.81960784313725488, 0.52549019607843139),
(0.76470588235294112, 0.81960784313725488, 0.52941176470588236),
(0.76470588235294112, 0.81960784313725488, 0.53333333333333333),
(0.7686274509803922, 0.82352941176470584, 0.53725490196078429),
(0.7686274509803922, 0.82352941176470584, 0.54117647058823526),
(0.77254901960784317, 0.82745098039215681, 0.54509803921568623),
(0.77254901960784317, 0.82745098039215681, 0.5490196078431373),
(0.77647058823529413, 0.82745098039215681, 0.55294117647058827),
(0.77647058823529413, 0.83137254901960789, 0.55686274509803924),
(0.7803921568627451, 0.83137254901960789, 0.5607843137254902),
(0.7803921568627451, 0.83137254901960789, 0.56470588235294117),
(0.78431372549019607, 0.83529411764705885, 0.56862745098039214),
(0.78431372549019607, 0.83529411764705885, 0.5725490196078431),
(0.78823529411764703, 0.83921568627450982, 0.57647058823529407),
(0.78823529411764703, 0.83921568627450982, 0.58039215686274515),
(0.792156862745098, 0.83921568627450982, 0.58431372549019611),
(0.792156862745098, 0.84313725490196079, 0.58823529411764708),
(0.79607843137254897, 0.84313725490196079, 0.59215686274509804),
(0.79607843137254897, 0.84313725490196079, 0.59607843137254901),
(0.80000000000000004, 0.84705882352941175, 0.59999999999999998),
(0.80000000000000004, 0.84705882352941175, 0.60392156862745094),
(0.80392156862745101, 0.85098039215686272, 0.60784313725490191),
(0.80392156862745101, 0.85098039215686272, 0.61176470588235299),
(0.80784313725490198, 0.85098039215686272, 0.61568627450980395),
(0.80784313725490198, 0.85490196078431369, 0.61960784313725492),
(0.81176470588235294, 0.85490196078431369, 0.62352941176470589),
(0.81176470588235294, 0.85490196078431369, 0.62745098039215685),
(0.81568627450980391, 0.85882352941176465, 0.63137254901960782),
(0.81568627450980391, 0.85882352941176465, 0.63529411764705879),
(0.81960784313725488, 0.86274509803921573, 0.63921568627450975),
(0.81960784313725488, 0.86274509803921573, 0.63921568627450975),
(0.82352941176470584, 0.86274509803921573, 0.6470588235294118),
(0.82352941176470584, 0.8666666666666667, 0.6470588235294118),
(0.82745098039215681, 0.8666666666666667, 0.65490196078431373),
(0.82745098039215681, 0.8666666666666667, 0.65490196078431373),
(0.83137254901960789, 0.87058823529411766, 0.66274509803921566),
(0.83137254901960789, 0.87058823529411766, 0.66274509803921566),
(0.83529411764705885, 0.87450980392156863, 0.6705882352941176),
(0.83529411764705885, 0.87450980392156863, 0.6705882352941176),
(0.83921568627450982, 0.87450980392156863, 0.67843137254901964),
(0.83921568627450982, 0.8784313725490196, 0.68235294117647061),
(0.84313725490196079, 0.8784313725490196, 0.68627450980392157),
(0.84313725490196079, 0.88235294117647056, 0.69019607843137254),
(0.84705882352941175, 0.88235294117647056, 0.69411764705882351),
(0.84705882352941175, 0.88235294117647056, 0.69803921568627447),
(0.85098039215686272, 0.88627450980392153, 0.70196078431372544),
(0.85098039215686272, 0.88627450980392153, 0.70588235294117652),
(0.85490196078431369, 0.88627450980392153, 0.70980392156862748),
(0.85490196078431369, 0.8901960784313725, 0.71372549019607845),
(0.85490196078431369, 0.8901960784313725, 0.71372549019607845),
(0.85882352941176465, 0.89411764705882357, 0.72156862745098038),
(0.85882352941176465, 0.89411764705882357, 0.72156862745098038),
(0.86274509803921573, 0.89411764705882357, 0.72941176470588232),
(0.86274509803921573, 0.89803921568627454, 0.72941176470588232),
(0.8666666666666667, 0.89803921568627454, 0.73725490196078436),
(0.8666666666666667, 0.89803921568627454, 0.73725490196078436),
(0.87058823529411766, 0.90196078431372551, 0.74509803921568629),
(0.87058823529411766, 0.90196078431372551, 0.74509803921568629),
(0.87450980392156863, 0.90588235294117647, 0.74901960784313726),
(0.87450980392156863, 0.90588235294117647, 0.75294117647058822),
(0.8784313725490196, 0.90588235294117647, 0.75686274509803919),
(0.8784313725490196, 0.90980392156862744, 0.76078431372549016),
(0.88235294117647056, 0.90980392156862744, 0.76470588235294112),
(0.88235294117647056, 0.90980392156862744, 0.7686274509803922),
(0.88627450980392153, 0.9137254901960784, 0.77254901960784317),
(0.88627450980392153, 0.9137254901960784, 0.77647058823529413),
(0.8901960784313725, 0.91764705882352937, 0.7803921568627451),
(0.8901960784313725, 0.91764705882352937, 0.78431372549019607),
(0.89411764705882357, 0.91764705882352937, 0.78823529411764703),
(0.89411764705882357, 0.92156862745098034, 0.792156862745098),
(0.89803921568627454, 0.92156862745098034, 0.79607843137254897),
(0.89803921568627454, 0.92156862745098034, 0.80000000000000004),
(0.90196078431372551, 0.92549019607843142, 0.80392156862745101),
(0.90196078431372551, 0.92549019607843142, 0.80784313725490198),
(0.90588235294117647, 0.92941176470588238, 0.81176470588235294),
(0.90588235294117647, 0.92941176470588238, 0.81568627450980391),
(0.90980392156862744, 0.92941176470588238, 0.81960784313725488),
(0.90980392156862744, 0.93333333333333335, 0.82352941176470584),
(0.9137254901960784, 0.93333333333333335, 0.82745098039215681),
(0.9137254901960784, 0.93725490196078431, 0.83137254901960789),
(0.91764705882352937, 0.93725490196078431, 0.83529411764705885),
(0.91764705882352937, 0.93725490196078431, 0.83921568627450982),
(0.92156862745098034, 0.94117647058823528, 0.84313725490196079),
(0.92156862745098034, 0.94117647058823528, 0.84705882352941175),
(0.92549019607843142, 0.94117647058823528, 0.85098039215686272),
(0.92549019607843142, 0.94509803921568625, 0.85490196078431369),
(0.92941176470588238, 0.94509803921568625, 0.85882352941176465),
(0.92941176470588238, 0.94509803921568625, 0.86274509803921573),
(0.93333333333333335, 0.94901960784313721, 0.8666666666666667),
(0.93333333333333335, 0.94901960784313721, 0.87058823529411766),
(0.93725490196078431, 0.95294117647058818, 0.87450980392156863),
(0.93725490196078431, 0.95294117647058818, 0.8784313725490196),
(0.94117647058823528, 0.95294117647058818, 0.88235294117647056),
(0.94117647058823528, 0.95686274509803926, 0.88627450980392153),
(0.94509803921568625, 0.95686274509803926, 0.8901960784313725),
(0.94509803921568625, 0.96078431372549022, 0.89411764705882357),
(0.94901960784313721, 0.96078431372549022, 0.89803921568627454),
(0.94901960784313721, 0.96078431372549022, 0.90196078431372551),
(0.95294117647058818, 0.96470588235294119, 0.90588235294117647),
(0.95294117647058818, 0.96470588235294119, 0.90980392156862744),
(0.95686274509803926, 0.96470588235294119, 0.9137254901960784),
(0.95686274509803926, 0.96862745098039216, 0.91764705882352937),
(0.96078431372549022, 0.96862745098039216, 0.92156862745098034),
(0.96078431372549022, 0.97254901960784312, 0.92549019607843142),
(0.96470588235294119, 0.97254901960784312, 0.92941176470588238),
(0.96470588235294119, 0.97254901960784312, 0.93333333333333335),
(0.96862745098039216, 0.97647058823529409, 0.93725490196078431),
(0.96862745098039216, 0.97647058823529409, 0.94117647058823528),
(0.97254901960784312, 0.97647058823529409, 0.94509803921568625),
(0.97254901960784312, 0.98039215686274506, 0.94901960784313721),
(0.97647058823529409, 0.98039215686274506, 0.95294117647058818),
(0.97647058823529409, 0.98431372549019602, 0.95686274509803926),
(0.98039215686274506, 0.98431372549019602, 0.96078431372549022),
(0.98039215686274506, 0.98431372549019602, 0.96470588235294119),
(0.98431372549019602, 0.9882352941176471, 0.96862745098039216),
(0.98431372549019602, 0.9882352941176471, 0.97254901960784312),
(0.9882352941176471, 0.9882352941176471, 0.97647058823529409),
(0.9882352941176471, 0.99215686274509807, 0.98039215686274506),
(0.99215686274509807, 0.99215686274509807, 0.98431372549019602),
(0.99215686274509807, 0.99607843137254903, 0.9882352941176471),
(0.99607843137254903, 0.99607843137254903, 0.99215686274509807),
)
cmap_ds9_i8 = (
(0.0, 0.0, 0.0), # noqa
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(0.0, 1.0, 1.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
)
#
# the "<NAME>" colormap used by ZView
#
cmap_jt = (
(0.0, 0.0, 0.0), # noqa
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.65490196078431373, 0.0, 0.72156862745098038),
(0.65490196078431373, 0.0, 0.72156862745098038),
(0.65490196078431373, 0.0, 0.72156862745098038),
(0.65490196078431373, 0.0, 0.72156862745098038),
(0.59607843137254901, 0.0, 0.73725490196078436),
(0.59607843137254901, 0.0, 0.73725490196078436),
(0.59607843137254901, 0.0, 0.73725490196078436),
(0.59607843137254901, 0.0, 0.73725490196078436),
(0.52941176470588236, 0.0, 0.75686274509803919),
(0.52941176470588236, 0.0, 0.75686274509803919),
(0.52941176470588236, 0.0, 0.75686274509803919),
(0.52941176470588236, 0.0, 0.75686274509803919),
(0.45882352941176469, 0.0, 0.7803921568627451),
(0.45882352941176469, 0.0, 0.7803921568627451),
(0.45882352941176469, 0.0, 0.7803921568627451),
(0.45882352941176469, 0.0, 0.7803921568627451),
(0.36470588235294116, 0.0, 0.81568627450980391),
(0.36470588235294116, 0.0, 0.81568627450980391),
(0.36470588235294116, 0.0, 0.81568627450980391),
(0.36470588235294116, 0.0, 0.81568627450980391),
(0.22352941176470589, 0.0, 0.86274509803921573),
(0.22352941176470589, 0.0, 0.86274509803921573),
(0.22352941176470589, 0.0, 0.86274509803921573),
(0.22352941176470589, 0.0, 0.86274509803921573),
(0.039215686274509803, 0.0, 0.84313725490196079),
(0.039215686274509803, 0.0, 0.84313725490196079),
(0.039215686274509803, 0.0, 0.84313725490196079),
(0.039215686274509803, 0.0, 0.84313725490196079),
(0.035294117647058823, 0.066666666666666666, 0.792156862745098),
(0.035294117647058823, 0.066666666666666666, 0.792156862745098),
(0.035294117647058823, 0.066666666666666666, 0.792156862745098),
(0.035294117647058823, 0.066666666666666666, 0.792156862745098),
(0.031372549019607843, 0.2196078431372549, 0.77254901960784317),
(0.031372549019607843, 0.2196078431372549, 0.77254901960784317),
(0.031372549019607843, 0.2196078431372549, 0.77254901960784317),
(0.031372549019607843, 0.2196078431372549, 0.77254901960784317),
(0.031372549019607843, 0.30980392156862746, 0.77254901960784317),
(0.031372549019607843, 0.30980392156862746, 0.77254901960784317),
(0.031372549019607843, 0.30980392156862746, 0.77254901960784317),
(0.031372549019607843, 0.30980392156862746, 0.77254901960784317),
(0.031372549019607843, 0.3843137254901961, 0.77647058823529413),
(0.031372549019607843, 0.3843137254901961, 0.77647058823529413),
(0.031372549019607843, 0.3843137254901961, 0.77647058823529413),
(0.031372549019607843, 0.3843137254901961, 0.77647058823529413),
(0.027450980392156862, 0.45490196078431372, 0.78431372549019607),
(0.027450980392156862, 0.45490196078431372, 0.78431372549019607),
(0.027450980392156862, 0.45490196078431372, 0.78431372549019607),
(0.027450980392156862, 0.45490196078431372, 0.78431372549019607),
(0.027450980392156862, 0.52156862745098043, 0.79607843137254897),
(0.027450980392156862, 0.52156862745098043, 0.79607843137254897),
(0.027450980392156862, 0.52156862745098043, 0.79607843137254897),
(0.027450980392156862, 0.52156862745098043, 0.79607843137254897),
(0.023529411764705882, 0.58823529411764708, 0.80784313725490198),
(0.023529411764705882, 0.58823529411764708, 0.80784313725490198),
(0.023529411764705882, 0.58823529411764708, 0.80784313725490198),
(0.023529411764705882, 0.58823529411764708, 0.80784313725490198),
(0.023529411764705882, 0.65490196078431373, 0.82352941176470584),
(0.023529411764705882, 0.65490196078431373, 0.82352941176470584),
(0.023529411764705882, 0.65490196078431373, 0.82352941176470584),
(0.023529411764705882, 0.65490196078431373, 0.82352941176470584),
(0.019607843137254902, 0.72549019607843135, 0.83529411764705885),
(0.019607843137254902, 0.72549019607843135, 0.83529411764705885),
(0.019607843137254902, 0.72549019607843135, 0.83529411764705885),
(0.019607843137254902, 0.72549019607843135, 0.83529411764705885),
(0.011764705882352941, 0.72549019607843135, 0.76470588235294112),
(0.011764705882352941, 0.72549019607843135, 0.76470588235294112),
(0.011764705882352941, 0.72549019607843135, 0.76470588235294112),
(0.011764705882352941, 0.72549019607843135, 0.76470588235294112),
(0.0, 0.72549019607843135, 0.69019607843137254),
(0.0, 0.72549019607843135, 0.69019607843137254),
(0.0, 0.72549019607843135, 0.69019607843137254),
(0.0, 0.72549019607843135, 0.69019607843137254),
(0.0, 0.72549019607843135, 0.61176470588235299),
(0.0, 0.72549019607843135, 0.61176470588235299),
(0.0, 0.72549019607843135, 0.61176470588235299),
(0.0, 0.72549019607843135, 0.61176470588235299),
(0.0, 0.72156862745098038, 0.53333333333333333),
(0.0, 0.72156862745098038, 0.53333333333333333),
(0.0, 0.72156862745098038, 0.53333333333333333),
(0.0, 0.72156862745098038, 0.53333333333333333),
(0.0, 0.71764705882352942, 0.45098039215686275),
(0.0, 0.71764705882352942, 0.45098039215686275),
(0.0, 0.71764705882352942, 0.45098039215686275),
(0.0, 0.71764705882352942, 0.45098039215686275),
(0.0, 0.71372549019607845, 0.36078431372549019),
(0.0, 0.71372549019607845, 0.36078431372549019),
(0.0, 0.71372549019607845, 0.36078431372549019),
(0.0, 0.71372549019607845, 0.36078431372549019),
(0.0, 0.70980392156862748, 0.25098039215686274),
(0.0, 0.70980392156862748, 0.25098039215686274),
(0.0, 0.70980392156862748, 0.25098039215686274),
(0.0, 0.70980392156862748, 0.25098039215686274),
(0.0, 0.70588235294117652, 0.043137254901960784),
(0.0, 0.70588235294117652, 0.043137254901960784),
(0.0, 0.70588235294117652, 0.043137254901960784),
(0.0, 0.70588235294117652, 0.043137254901960784),
(0.27450980392156865, 0.71372549019607845, 0.0),
(0.27450980392156865, 0.71372549019607845, 0.0),
(0.27450980392156865, 0.71372549019607845, 0.0),
(0.27450980392156865, 0.71372549019607845, 0.0),
(0.40392156862745099, 0.72156862745098038, 0.0),
(0.40392156862745099, 0.72156862745098038, 0.0),
(0.40392156862745099, 0.72156862745098038, 0.0),
(0.40392156862745099, 0.72156862745098038, 0.0),
(0.50980392156862742, 0.72941176470588232, 0.0),
(0.50980392156862742, 0.72941176470588232, 0.0),
(0.50980392156862742, 0.72941176470588232, 0.0),
(0.50980392156862742, 0.72941176470588232, 0.0),
(0.60392156862745094, 0.73725490196078436, 0.0),
(0.60392156862745094, 0.73725490196078436, 0.0),
(0.60392156862745094, 0.73725490196078436, 0.0),
(0.60392156862745094, 0.73725490196078436, 0.0),
(0.69019607843137254, 0.74509803921568629, 0.0),
(0.69019607843137254, 0.74509803921568629, 0.0),
(0.69019607843137254, 0.74509803921568629, 0.0),
(0.69019607843137254, 0.74509803921568629, 0.0),
(0.7686274509803922, 0.75294117647058822, 0.0),
(0.7686274509803922, 0.75294117647058822, 0.0),
(0.7686274509803922, 0.75294117647058822, 0.0),
(0.7686274509803922, 0.75294117647058822, 0.0),
(0.84705882352941175, 0.76078431372549016, 0.0),
(0.84705882352941175, 0.76078431372549016, 0.0),
(0.84705882352941175, 0.76078431372549016, 0.0),
(0.84705882352941175, 0.76078431372549016, 0.0),
(0.92156862745098034, 0.7686274509803922, 0.0),
(0.92156862745098034, 0.7686274509803922, 0.0),
(0.92156862745098034, 0.7686274509803922, 0.0),
(0.92156862745098034, 0.7686274509803922, 0.0),
(0.96078431372549022, 0.74509803921568629, 0.0),
(0.96078431372549022, 0.74509803921568629, 0.0),
(0.96078431372549022, 0.74509803921568629, 0.0),
(0.96078431372549022, 0.74509803921568629, 0.0),
(0.97647058823529409, 0.70196078431372544, 0.0),
(0.97647058823529409, 0.70196078431372544, 0.0),
(0.97647058823529409, 0.70196078431372544, 0.0),
(0.97647058823529409, 0.70196078431372544, 0.0),
(0.99215686274509807, 0.6588235294117647, 0.0),
(0.99215686274509807, 0.6588235294117647, 0.0),
(0.99215686274509807, 0.6588235294117647, 0.0),
(0.99215686274509807, 0.6588235294117647, 0.0),
(1.0, 0.61568627450980395, 0.0),
(1.0, 0.61568627450980395, 0.0),
(1.0, 0.61568627450980395, 0.0),
(1.0, 0.61568627450980395, 0.0),
(1.0, 0.5725490196078431, 0.0),
(1.0, 0.5725490196078431, 0.0),
(1.0, 0.5725490196078431, 0.0),
(1.0, 0.5725490196078431, 0.0),
(1.0, 0.52941176470588236, 0.0),
(1.0, 0.52941176470588236, 0.0),
(1.0, 0.52941176470588236, 0.0),
(1.0, 0.52941176470588236, 0.0),
(1.0, 0.48627450980392156, 0.0),
(1.0, 0.48627450980392156, 0.0),
(1.0, 0.48627450980392156, 0.0),
(1.0, 0.48627450980392156, 0.0),
(1.0, 0.44313725490196076, 0.0),
(1.0, 0.44313725490196076, 0.0),
(1.0, 0.44313725490196076, 0.0),
(1.0, 0.44313725490196076, 0.0),
(1.0, 0.396078431372549, 0.0),
(1.0, 0.396078431372549, 0.0),
(1.0, 0.396078431372549, 0.0),
(1.0, 0.396078431372549, 0.0),
(1.0, 0.35294117647058826, 0.0),
(1.0, 0.35294117647058826, 0.0),
(1.0, 0.35294117647058826, 0.0),
(1.0, 0.35294117647058826, 0.0),
(1.0, 0.30196078431372547, 0.0),
(1.0, 0.30196078431372547, 0.0),
(1.0, 0.30196078431372547, 0.0),
(1.0, 0.30196078431372547, 0.0),
(1.0, 0.24705882352941178, 0.0),
(1.0, 0.24705882352941178, 0.0),
(1.0, 0.24705882352941178, 0.0),
(1.0, 0.24705882352941178, 0.0),
(1.0, 0.18823529411764706, 0.0),
(1.0, 0.18823529411764706, 0.0),
(1.0, 0.18823529411764706, 0.0),
(1.0, 0.18823529411764706, 0.0),
(1.0, 0.10588235294117647, 0.0),
(1.0, 0.10588235294117647, 0.0),
(1.0, 0.10588235294117647, 0.0),
(1.0, 0.10588235294117647, 0.0),
(1.0, 0.078431372549019607, 0.070588235294117646),
(1.0, 0.078431372549019607, 0.070588235294117646),
(1.0, 0.078431372549019607, 0.070588235294117646),
(1.0, 0.078431372549019607, 0.070588235294117646),
(1.0, 0.16078431372549021, 0.16078431372549021),
(1.0, 0.16078431372549021, 0.16078431372549021),
(1.0, 0.16078431372549021, 0.16078431372549021),
(1.0, 0.16078431372549021, 0.16078431372549021),
(1.0, 0.2196078431372549, 0.22745098039215686),
(1.0, 0.2196078431372549, 0.22745098039215686),
(1.0, 0.2196078431372549, 0.22745098039215686),
(1.0, 0.2196078431372549, 0.22745098039215686),
(1.0, 0.27058823529411763, 0.28235294117647058),
(1.0, 0.27058823529411763, 0.28235294117647058),
(1.0, 0.27058823529411763, 0.28235294117647058),
(1.0, 0.27058823529411763, 0.28235294117647058),
(1.0, 0.32156862745098042, 0.33333333333333331),
(1.0, 0.32156862745098042, 0.33333333333333331),
(1.0, 0.32156862745098042, 0.33333333333333331),
(1.0, 0.32156862745098042, 0.33333333333333331),
(1.0, 0.36862745098039218, 0.3843137254901961),
(1.0, 0.36862745098039218, 0.3843137254901961),
(1.0, 0.36862745098039218, 0.3843137254901961),
(1.0, 0.36862745098039218, 0.3843137254901961),
(1.0, 0.41568627450980394, 0.43529411764705883),
(1.0, 0.41568627450980394, 0.43529411764705883),
(1.0, 0.41568627450980394, 0.43529411764705883),
(1.0, 0.41568627450980394, 0.43529411764705883),
(1.0, 0.46274509803921571, 0.48627450980392156),
(1.0, 0.46274509803921571, 0.48627450980392156),
(1.0, 0.46274509803921571, 0.48627450980392156),
(1.0, 0.46274509803921571, 0.48627450980392156),
(1.0, 0.50980392156862742, 0.53333333333333333),
(1.0, 0.50980392156862742, 0.53333333333333333),
(1.0, 0.50980392156862742, 0.53333333333333333),
(1.0, 0.50980392156862742, 0.53333333333333333),
(1.0, 0.55686274509803924, 0.58431372549019611),
(1.0, 0.55686274509803924, 0.58431372549019611),
(1.0, 0.55686274509803924, 0.58431372549019611),
(1.0, 0.55686274509803924, 0.58431372549019611),
(1.0, 0.60392156862745094, 0.63529411764705879),
(1.0, 0.60392156862745094, 0.63529411764705879),
(1.0, 0.60392156862745094, 0.63529411764705879),
(1.0, 0.60392156862745094, 0.63529411764705879),
(1.0, 0.65098039215686276, 0.68627450980392157),
(1.0, 0.65098039215686276, 0.68627450980392157),
(1.0, 0.65098039215686276, 0.68627450980392157),
(1.0, 0.65098039215686276, 0.68627450980392157),
(1.0, 0.69803921568627447, 0.73725490196078436),
(1.0, 0.69803921568627447, 0.73725490196078436),
(1.0, 0.69803921568627447, 0.73725490196078436),
(1.0, 0.69803921568627447, 0.73725490196078436),
(1.0, 0.74901960784313726, 0.78823529411764703),
(1.0, 0.74901960784313726, 0.78823529411764703),
(1.0, 0.74901960784313726, 0.78823529411764703),
(1.0, 0.74901960784313726, 0.78823529411764703),
(1.0, 0.79607843137254897, 0.83921568627450982),
(1.0, 0.79607843137254897, 0.83921568627450982),
(1.0, 0.79607843137254897, 0.83921568627450982),
(1.0, 0.79607843137254897, 0.83921568627450982),
(1.0, 0.84705882352941175, 0.8901960784313725),
(1.0, 0.84705882352941175, 0.8901960784313725),
(1.0, 0.84705882352941175, 0.8901960784313725),
(1.0, 0.84705882352941175, 0.8901960784313725),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
)
# to be eventually deprecated
cmap_ramp = cmap_gray
# needed length of a ginga color map
min_cmap_len = 256
class ColorMapError(Exception):
pass
class ColorMap(object):
"""Class to handle color maps."""
def __init__(self, name, clst):
self.name = name
self.clst = clst
def add_cmap(name, clst):
"""Add a color map."""
global cmaps
assert len(clst) == min_cmap_len, \
ValueError("color map '%s' length mismatch %d != %d (needed)" % (
name, len(clst), min_cmap_len))
cmaps[name] = ColorMap(name, clst)
def get_cmap(name):
"""Get a color map array.
Will raise a KeyError if a map of the given name does not exist.
"""
return cmaps[name]
def has_cmap(name):
"""Does color map exist? Return True/False
"""
return name in cmaps
def get_names():
"""Get colormap names."""
res = list(cmaps.keys())
res = sorted(res, key=lambda s: s.lower())
return res
def matplotlib_to_ginga_cmap(cm, name=None):
"""Convert matplotlib colormap to Ginga's."""
if name is None:
name = cm.name
arr = cm(np.arange(0, min_cmap_len) / np.float(min_cmap_len - 1))
clst = arr[:, 0:3]
return ColorMap(name, clst)
def ginga_to_matplotlib_cmap(cm, name=None):
"""Convert Ginga colormap to matplotlib's."""
if name is None:
name = cm.name
from matplotlib.colors import ListedColormap
carr = np.asarray(cm.clst)
mpl_cm = ListedColormap(carr, name=name, N=len(carr))
return mpl_cm
def add_matplotlib_cmap(cm, name=None):
"""Add a matplotlib colormap."""
global cmaps
cmap = matplotlib_to_ginga_cmap(cm, name=name)
cmaps[cmap.name] = cmap
def add_matplotlib_cmaps(fail_on_import_error=True):
"""Add all matplotlib colormaps."""
try:
from matplotlib import cm as _cm
from matplotlib.cbook import mplDeprecation
except ImportError:
if fail_on_import_error:
raise
# silently fail
return
for name in _cm.cmap_d:
if not isinstance(name, six.string_types):
continue
try:
# Do not load deprecated colormaps
with warnings.catch_warnings():
warnings.simplefilter('error', mplDeprecation)
cm = _cm.get_cmap(name)
add_matplotlib_cmap(cm, name=name)
except Exception as e:
if fail_on_import_error:
print("Error adding colormap '%s': %s" % (name, str(e)))
# Add colormaps from this file
cmaps = {}
for name, value in list(globals().items()):
if name.startswith('cmap_'):
key = name[5:]
add_cmap(key, value)
# by default add matplotlib colormaps, if available
add_matplotlib_cmaps(fail_on_import_error=False)
# END
| [
"numpy.float",
"warnings.catch_warnings",
"numpy.asarray",
"warnings.simplefilter",
"matplotlib.cm.get_cmap",
"numpy.arange"
] | [((613155, 613174), 'numpy.asarray', 'np.asarray', (['cm.clst'], {}), '(cm.clst)\n', (613165, 613174), True, 'import numpy as np\n'), ((612842, 612868), 'numpy.arange', 'np.arange', (['(0)', 'min_cmap_len'], {}), '(0, min_cmap_len)\n', (612851, 612868), True, 'import numpy as np\n'), ((612871, 612897), 'numpy.float', 'np.float', (['(min_cmap_len - 1)'], {}), '(min_cmap_len - 1)\n', (612879, 612897), True, 'import numpy as np\n'), ((613915, 613940), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (613938, 613940), False, 'import warnings\n'), ((613958, 614004), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'mplDeprecation'], {}), "('error', mplDeprecation)\n", (613979, 614004), False, 'import warnings\n'), ((614026, 614044), 'matplotlib.cm.get_cmap', '_cm.get_cmap', (['name'], {}), '(name)\n', (614038, 614044), True, 'from matplotlib import cm as _cm\n')] |
import numpy as np
import ray
import torch
from gym_ds3.schedulers.models.simple_model import SimpleModel
from gym_ds3.envs.utils.helper_envs import num_pes, get_env
from gym_ds3.envs.utils.helper_training import calculate_returns
@ray.remote
class ACWorker(object):
def __init__(self, args, _id):
self.args = args
self.device = args.device
self.seed = self.args.seed + _id
torch.manual_seed(self.seed)
np.random.seed(self.seed)
self.max_num_jobs = self.args.max_num_jobs
self.num_tasks_in_jobs = self.args.num_tasks_in_job
num_tasks = self.num_tasks_in_jobs * self.max_num_jobs
num_actions = num_pes(args)
self.model = SimpleModel(
num_tasks=num_tasks, num_jobs=self.max_num_jobs,
num_actions=num_actions, device=self.device).to(self.device)
self.gamma = self.args.gamma
self.value_loss_coef = self.args.value_loss_coef
self.entropy_coef = self.args.entropy_coef
def compute_gradients(self, num_gradient_steps, weights, scale):
self.env = get_env()
self.env.reset(self.args)
# synchronize weights
if weights is not None:
self.model.set_weights(weights)
self.model.zero_grad()
total_inj_jobs, total_comp_jobs, total_cum_exec_time, total_energy_consump, edp, avg_latency = \
self.env.run(num_gradient_steps, scale, self.model)
rewards = self.env.simulator.rewards_by_flops
task_completed = self.env.simulator.task_completed
returns = calculate_returns(rewards, self.gamma)
stats = self.update(returns, task_completed)
try:
stats['episode'].append(num_gradient_steps)
stats['latency'].append(avg_latency)
stats['completed_jobs'].append(total_comp_jobs)
stats['injected_jobs'].append(total_inj_jobs)
stats['cumulative_execution_time'].append(total_cum_exec_time)
stats['energy_consumption'].append(total_energy_consump)
stats['edp'].append(edp)
stats['Execution Time'].append(len(rewards))
stats['total_reward'].append(np.sum(rewards))
except:
stats['episode'] = num_gradient_steps
stats['latency'] = avg_latency
stats['completed_jobs'] = total_comp_jobs
stats['injected_jobs'] = total_inj_jobs
stats['cumulative_execution_time'] = total_cum_exec_time
stats['energy_consumption'] = total_energy_consump
stats['edp'] = edp
stats['Execution Time'] = len(rewards)
stats['total_reward'] = np.sum(rewards)
return self.model.get_gradients(), stats
def update(self, returns, task_completed):
summed_losses = {}
for info in task_completed: # Change for online updating maybe.
losses = {}
ti = info[0]
_, log_prob, v = self.model.forward(ti.state_at_scheduling)
G = returns[ti.timestep_of_scheduling]
adv = G - v
losses['actor'] = (-log_prob[ti.probs_idx][ti.action_at_scheduling] * adv)[0]
losses['value'] = self.value_loss_coef * adv.pow(2)[0]
losses['entropy'] = self.entropy_coef * \
(torch.exp(log_prob)[ti.probs_idx] * log_prob[ti.probs_idx]).sum()
combined_loss = torch.tensor(0., dtype=torch.float).to(self.device)
for loss in losses.values():
combined_loss += loss
losses['combined'] = combined_loss
for k, v in losses.items():
if k not in summed_losses:
summed_losses[k] = [v, 1]
for k, v in losses.items():
summed_losses[k][0] += v
summed_losses[k][1] += 1
combined_loss.backward()
if torch.cuda.is_available():
torch.cuda.empty_cache()
averaged_losses = {k: v[0].detach().cpu().numpy() / v[1] for k, v in summed_losses.items()}
return averaged_losses
| [
"torch.manual_seed",
"gym_ds3.envs.utils.helper_envs.get_env",
"gym_ds3.schedulers.models.simple_model.SimpleModel",
"gym_ds3.envs.utils.helper_envs.num_pes",
"torch.exp",
"gym_ds3.envs.utils.helper_training.calculate_returns",
"numpy.sum",
"torch.cuda.is_available",
"torch.tensor",
"numpy.random.... | [((423, 451), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (440, 451), False, 'import torch\n'), ((460, 485), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (474, 485), True, 'import numpy as np\n'), ((691, 704), 'gym_ds3.envs.utils.helper_envs.num_pes', 'num_pes', (['args'], {}), '(args)\n', (698, 704), False, 'from gym_ds3.envs.utils.helper_envs import num_pes, get_env\n'), ((1119, 1128), 'gym_ds3.envs.utils.helper_envs.get_env', 'get_env', ([], {}), '()\n', (1126, 1128), False, 'from gym_ds3.envs.utils.helper_envs import num_pes, get_env\n'), ((1616, 1654), 'gym_ds3.envs.utils.helper_training.calculate_returns', 'calculate_returns', (['rewards', 'self.gamma'], {}), '(rewards, self.gamma)\n', (1633, 1654), False, 'from gym_ds3.envs.utils.helper_training import calculate_returns\n'), ((3941, 3966), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3964, 3966), False, 'import torch\n'), ((735, 845), 'gym_ds3.schedulers.models.simple_model.SimpleModel', 'SimpleModel', ([], {'num_tasks': 'num_tasks', 'num_jobs': 'self.max_num_jobs', 'num_actions': 'num_actions', 'device': 'self.device'}), '(num_tasks=num_tasks, num_jobs=self.max_num_jobs, num_actions=\n num_actions, device=self.device)\n', (746, 845), False, 'from gym_ds3.schedulers.models.simple_model import SimpleModel\n'), ((2225, 2240), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (2231, 2240), True, 'import numpy as np\n'), ((2708, 2723), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (2714, 2723), True, 'import numpy as np\n'), ((3984, 4008), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4006, 4008), False, 'import torch\n'), ((3457, 3493), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'dtype': 'torch.float'}), '(0.0, dtype=torch.float)\n', (3469, 3493), False, 'import torch\n'), ((3362, 3381), 'torch.exp', 'torch.exp', (['log_prob'], {}), '(log_prob)\n', (3371, 3381), False, 'import torch\n')] |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import numpy as np
import spiceminer as sm
from spiceminer.extra import angle
def car2sphere(xyz):
'''Convert cartesian to spherical coordinates.'''
r = np.sqrt(np.sum(xyz ** 2, 0))
theta = np.arccos(xyz[2] / r)
phi = np.arctan(xyz[1] / xyz[0])
return r, theta, phi
# Load the necessary data (this example requires parts of the packages 'base'
# and 'msl', which you can download with getdata.py)
sm.load('data')
# Get necessary bodies
mars = sm.Body('mars')
rover = sm.Body('msl_rover')
# Let's look at a month shortly after landing
times = np.arange(sm.Time(2013), sm.Time(2013, 2), sm.Time.HOUR)
# Get position on mars surface
position = rover.position(times, observer=mars, frame=mars)
dist, theta, phi = car2sphere(position[1:])
X, Y, Z = np.identity(3)
# Get the angle of the mars rover on the surface (obvious method)
rotation = rover.rotation(times, target=mars)
rover_x = np.array([r.dot(X) for r in rotation[1]])
rover_y = np.array([r.dot(-Y) for r in rotation[1]])
rover_z = np.array([r.dot(-Z) for r in rotation[1]])
radians = np.array([angle(z, p) for z, p in zip(rover_z, position[1:].T)])
radians_x = np.array([angle(x, p) - np.pi/2 for x, p in zip(rover_x, position[1:].T)])
radians_y = np.array([angle(y, p) - np.pi/2 for y, p in zip(rover_y, position[1:].T)])
# Get the angle of the mars rover on the surface (fast, absolute)
points = mars.position(times, observer=rover, frame=rover)
radians2 = np.array([angle(Z, p) for p in points[1:].T])
radians2_x = np.array([angle(Z[1:], p) for p in points[1::2].T])
radians2_y = np.array([angle(Z[1:], p) for p in points[2:].T])
# Some nice diagrams
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, 2)
ax = axes[0, 0]
ax.plot(phi, theta, label='position')
ax = axes[1, 0]
ax.plot(dist, label='elevation')
ax = axes[0, 1]
ax.plot(np.degrees(radians), label='Abs')
ax.plot(np.degrees(radians_x), label='X')
ax.plot(np.degrees(radians_y), label='Y')
ax = axes[1, 1]
ax.plot(np.degrees(radians2), label='Abs')
ax.plot(np.degrees(radians2_x), label='X')
ax.plot(np.degrees(radians2_y), label='Y')
for ax in axes.ravel():
ax.legend()
plt.show(fig)
| [
"numpy.identity",
"numpy.arccos",
"numpy.arctan",
"spiceminer.Time",
"spiceminer.load",
"numpy.sum",
"spiceminer.extra.angle",
"numpy.degrees",
"spiceminer.Body",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((465, 480), 'spiceminer.load', 'sm.load', (['"""data"""'], {}), "('data')\n", (472, 480), True, 'import spiceminer as sm\n'), ((512, 527), 'spiceminer.Body', 'sm.Body', (['"""mars"""'], {}), "('mars')\n", (519, 527), True, 'import spiceminer as sm\n'), ((536, 556), 'spiceminer.Body', 'sm.Body', (['"""msl_rover"""'], {}), "('msl_rover')\n", (543, 556), True, 'import spiceminer as sm\n'), ((816, 830), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (827, 830), True, 'import numpy as np\n'), ((1728, 1746), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (1740, 1746), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2191), 'matplotlib.pyplot.show', 'plt.show', (['fig'], {}), '(fig)\n', (2186, 2191), True, 'import matplotlib.pyplot as plt\n'), ((249, 270), 'numpy.arccos', 'np.arccos', (['(xyz[2] / r)'], {}), '(xyz[2] / r)\n', (258, 270), True, 'import numpy as np\n'), ((281, 307), 'numpy.arctan', 'np.arctan', (['(xyz[1] / xyz[0])'], {}), '(xyz[1] / xyz[0])\n', (290, 307), True, 'import numpy as np\n'), ((622, 635), 'spiceminer.Time', 'sm.Time', (['(2013)'], {}), '(2013)\n', (629, 635), True, 'import spiceminer as sm\n'), ((637, 653), 'spiceminer.Time', 'sm.Time', (['(2013)', '(2)'], {}), '(2013, 2)\n', (644, 653), True, 'import spiceminer as sm\n'), ((1874, 1893), 'numpy.degrees', 'np.degrees', (['radians'], {}), '(radians)\n', (1884, 1893), True, 'import numpy as np\n'), ((1916, 1937), 'numpy.degrees', 'np.degrees', (['radians_x'], {}), '(radians_x)\n', (1926, 1937), True, 'import numpy as np\n'), ((1958, 1979), 'numpy.degrees', 'np.degrees', (['radians_y'], {}), '(radians_y)\n', (1968, 1979), True, 'import numpy as np\n'), ((2016, 2036), 'numpy.degrees', 'np.degrees', (['radians2'], {}), '(radians2)\n', (2026, 2036), True, 'import numpy as np\n'), ((2059, 2081), 'numpy.degrees', 'np.degrees', (['radians2_x'], {}), '(radians2_x)\n', (2069, 2081), True, 'import numpy as np\n'), ((2102, 2124), 'numpy.degrees', 'np.degrees', (['radians2_y'], {}), '(radians2_y)\n', (2112, 2124), True, 'import numpy as np\n'), ((216, 235), 'numpy.sum', 'np.sum', (['(xyz ** 2)', '(0)'], {}), '(xyz ** 2, 0)\n', (222, 235), True, 'import numpy as np\n'), ((1121, 1132), 'spiceminer.extra.angle', 'angle', (['z', 'p'], {}), '(z, p)\n', (1126, 1132), False, 'from spiceminer.extra import angle\n'), ((1497, 1508), 'spiceminer.extra.angle', 'angle', (['Z', 'p'], {}), '(Z, p)\n', (1502, 1508), False, 'from spiceminer.extra import angle\n'), ((1556, 1571), 'spiceminer.extra.angle', 'angle', (['Z[1:]', 'p'], {}), '(Z[1:], p)\n', (1561, 1571), False, 'from spiceminer.extra import angle\n'), ((1621, 1636), 'spiceminer.extra.angle', 'angle', (['Z[1:]', 'p'], {}), '(Z[1:], p)\n', (1626, 1636), False, 'from spiceminer.extra import angle\n'), ((1198, 1209), 'spiceminer.extra.angle', 'angle', (['x', 'p'], {}), '(x, p)\n', (1203, 1209), False, 'from spiceminer.extra import angle\n'), ((1285, 1296), 'spiceminer.extra.angle', 'angle', (['y', 'p'], {}), '(y, p)\n', (1290, 1296), False, 'from spiceminer.extra import angle\n')] |
"""Runs a random policy for the random object KukaObjectEnv.
"""
import tigercontrol
import numpy as np
import time
from gym import spaces
class ContinuousDownwardBiasPolicy(object):
"""Policy which takes continuous actions, and is biased to move down.
"""
def __init__(self, height_hack_prob=0.9):
"""Initializes the DownwardBiasPolicy.
Args:
height_hack_prob: The probability of moving down at every move.
"""
self._height_hack_prob = height_hack_prob
self._action_space = spaces.Box(low=-1, high=1, shape=(5,))
def sample_action(self, obs, explore_prob):
"""Implements height hack and grasping threshold hack.
"""
dx, dy, dz, da, close = self._action_space.sample()
if np.random.random() < self._height_hack_prob:
dz = -1
return [dx, dy, dz, da, 0.5]
def test_kuka(verbose=False):
environment = tigercontrol.environment("PyBullet-Kuka")
obs = environment.reset(render=verbose)
policy = ContinuousDownwardBiasPolicy()
t_start = time.time()
while time.time() - t_start < 5:
done = False
episode_rew = 0
while not done:
if verbose:
environment.render(mode='human')
act = policy.sample_action(obs, .1)
obs, rew, done, _ = environment.step(act)
episode_rew += rew
environment.close()
print("test_kuka passed")
if __name__ == '__main__':
#test_kuka(verbose=True)
pass
| [
"numpy.random.random",
"tigercontrol.environment",
"time.time",
"gym.spaces.Box"
] | [((935, 976), 'tigercontrol.environment', 'tigercontrol.environment', (['"""PyBullet-Kuka"""'], {}), "('PyBullet-Kuka')\n", (959, 976), False, 'import tigercontrol\n'), ((1080, 1091), 'time.time', 'time.time', ([], {}), '()\n', (1089, 1091), False, 'import time\n'), ((548, 586), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(5,)'}), '(low=-1, high=1, shape=(5,))\n', (558, 586), False, 'from gym import spaces\n'), ((782, 800), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (798, 800), True, 'import numpy as np\n'), ((1102, 1113), 'time.time', 'time.time', ([], {}), '()\n', (1111, 1113), False, 'import time\n')] |
from utils import initialize
import numpy as np
import random
import copy
import time
import sys
import os
class GA(object):
def __init__(self, terminal_symb, x, y, size, num_generations=400, crossover_rate=0.7, mutation_rate=0.05, early_stop=0.1, history_len=20):
self.primitive_symbol = ['+','-','*','/','sqrt','^','log','sin','cos','tan']
self.terminal_symb = terminal_symb
self.x = x
self.y = y
self.size = size
self.history_len = history_len
self.old_mutation_rate = mutation_rate
self.num_generations = num_generations
self.early_stop = early_stop
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.population = [initialize(self.terminal_symb, self.primitive_symbol) for i in range(self.size)]
self.status = np.zeros((self.size,), dtype=int) #Controladora se um cromossomo foi selecionado para a próxima geração
self.bestCromossome = None
self.loss_history = []
self.duration = None
def fitness(self):
outputs = [self.population[i].run(self.x) for i in range(self.size)]
error = [((outputs[i]-self.y)**2).mean() for i in range(self.size)]
error = np.array(error)
#Trocando nan, inf e overflow por max int proporcional
where_are_NaNs = np.isnan(error)
error[where_are_NaNs] = sys.maxsize/self.size
where_are_infs = (error == np.inf)
error[where_are_infs] = sys.maxsize/self.size
where_are_negative = (error < 0)
error[where_are_negative] = sys.maxsize/self.size
return error
def select_node(self, t):
prob = random.random()
if prob>=0.9:
return t
if (t.left is None) and (t.right is None):
return None
if t.left is not None:
return self.select_node(t.left)
if t.right is not None:
return self.select_node(t.right)
def crossover(self, idx1, idx2):
t1 = copy.deepcopy(self.population[idx1])
t2 = copy.deepcopy(self.population[idx2])
gene1 = self.select_node(t1)
while(gene1 is None):
gene1 = self.select_node(t1)
gene2 = self.select_node(t2)
while(gene2 is None):
gene2 = self.select_node(t2)
p1, p2 = gene1.up, gene2.up #Pai do nó escolhido
if p1 is None: #Cromossomo 1 completamente selecionado
t1 = gene2
#self.population[idx1] = gene2
elif p1.left==gene1 :#gene1 é filho esquerdo
p1.left = gene2
else: #gene1 é filho direito
p1.right = gene2
gene2.up = p1
if p2 is None: #Cromossomo 2 completamente selecionado
t2 = gene1
#self.population[idx2] = gene1
elif p2.left==gene2:
p2.left = gene1
else:
p2.right = gene1
gene1.up = p2
return [t1, t2]
def mutation(self, idx):
t = copy.deepcopy(self.population[idx])
gene = self.select_node(t)
while gene is None:
gene = self.select_node(t)
parent = gene.up
mutated_gene = initialize(self.terminal_symb, self.primitive_symbol)
if parent is None:
t = mutated_gene
elif parent.left == gene:
parent.left = mutated_gene
else:
parent.right = mutated_gene
mutated_gene.up = parent
return [t]
def roulette(self, fit, total_fit):
ticket = np.random.random() #Ticket sorteado
prob = 1-fit/total_fit #Tickets do indivíduo (quanto menor o fitness melhor)
if(ticket<=prob):
return True
return False
def new_cromossome(self, error, error_history):
method_ticket = np.random.random() #Ticket do método sorteado
std = None
if len(error_history) == self.history_len:
std = np.std(error_history)
if std is not None:
if std <= 0.1:
self.mutation_rate += 5E-5*(1-self.mutation_rate-self.crossover_rate)
else:
self.mutation_rate = self.old_mutation_rate
if method_ticket <= self.crossover_rate: #Realiza Crossover
status = False
idx1, idx2 = False, False
while status is False:
idx1 = np.random.randint(0, self.size)
status = self.roulette(error[idx1], error.sum())
status = False
while status is False:
idx2 = np.random.randint(0, self.size)
status = self.roulette(error[idx2], error.sum())
return self.crossover(idx1, idx2)
elif method_ticket <= self.crossover_rate+self.mutation_rate: #Realiza Mutação
idx1 = np.random.randint(0, self.size)
return self.mutation(idx1)
else: #Realiza seleção de Cromossomos para a próxima geração
while True:
idx1 = np.random.randint(0, self.size)
if self.roulette(error[idx1],error.sum()):# and self.status[idx1] == 0: #Cromossomo selecionado não pode estar na próxima geração
self.status[idx1] = 1
return [copy.deepcopy(self.population[idx1])]
def run(self):
started_time = time.time()
print('Genetic History Started!')
error_history = []
curr_generation = 0
for i in range(self.num_generations):
error = self.fitness()
self.status = np.zeros((self.size), dtype=int)
new_population = []
best = np.argmin(error)
error_min = error.min()
self.loss_history.append(error_min)
if len(error_history) < self.history_len:
error_history.append(error_min)
else:
_ = error_history.pop(0)
error_history.append(error_min)
self.bestCromossome = self.population[best]
new_population.append(self.bestCromossome)
while len(new_population) < self.size:
for cromossome in self.new_cromossome(error, error_history):
new_population.append(cromossome)
curr_generation = i+1
if i % int(self.num_generations*0.05)==0:
print('Generation {} of {} -- Best Fitness: {}'.format(i, self.num_generations, error_min))
if error.min() <= self.early_stop:
break
self.population = new_population
duration = time.time() - started_time
self.duration = duration
print('\nDuration: {} seconds'.format(duration))
print('\n{} generations'.format(curr_generation))
| [
"numpy.random.random",
"numpy.std",
"utils.initialize",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.isnan",
"copy.deepcopy",
"numpy.argmin",
"random.random",
"time.time"
] | [((792, 825), 'numpy.zeros', 'np.zeros', (['(self.size,)'], {'dtype': 'int'}), '((self.size,), dtype=int)\n', (800, 825), True, 'import numpy as np\n'), ((1154, 1169), 'numpy.array', 'np.array', (['error'], {}), '(error)\n', (1162, 1169), True, 'import numpy as np\n'), ((1248, 1263), 'numpy.isnan', 'np.isnan', (['error'], {}), '(error)\n', (1256, 1263), True, 'import numpy as np\n'), ((1547, 1562), 'random.random', 'random.random', ([], {}), '()\n', (1560, 1562), False, 'import random\n'), ((1828, 1864), 'copy.deepcopy', 'copy.deepcopy', (['self.population[idx1]'], {}), '(self.population[idx1])\n', (1841, 1864), False, 'import copy\n'), ((1873, 1909), 'copy.deepcopy', 'copy.deepcopy', (['self.population[idx2]'], {}), '(self.population[idx2])\n', (1886, 1909), False, 'import copy\n'), ((2652, 2687), 'copy.deepcopy', 'copy.deepcopy', (['self.population[idx]'], {}), '(self.population[idx])\n', (2665, 2687), False, 'import copy\n'), ((2812, 2865), 'utils.initialize', 'initialize', (['self.terminal_symb', 'self.primitive_symbol'], {}), '(self.terminal_symb, self.primitive_symbol)\n', (2822, 2865), False, 'from utils import initialize\n'), ((3104, 3122), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3120, 3122), True, 'import numpy as np\n'), ((3344, 3362), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3360, 3362), True, 'import numpy as np\n'), ((4615, 4626), 'time.time', 'time.time', ([], {}), '()\n', (4624, 4626), False, 'import time\n'), ((694, 747), 'utils.initialize', 'initialize', (['self.terminal_symb', 'self.primitive_symbol'], {}), '(self.terminal_symb, self.primitive_symbol)\n', (704, 747), False, 'from utils import initialize\n'), ((3460, 3481), 'numpy.std', 'np.std', (['error_history'], {}), '(error_history)\n', (3466, 3481), True, 'import numpy as np\n'), ((4795, 4825), 'numpy.zeros', 'np.zeros', (['self.size'], {'dtype': 'int'}), '(self.size, dtype=int)\n', (4803, 4825), True, 'import numpy as np\n'), ((4863, 4879), 'numpy.argmin', 'np.argmin', (['error'], {}), '(error)\n', (4872, 4879), True, 'import numpy as np\n'), ((5628, 5639), 'time.time', 'time.time', ([], {}), '()\n', (5637, 5639), False, 'import time\n'), ((3811, 3842), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {}), '(0, self.size)\n', (3828, 3842), True, 'import numpy as np\n'), ((3957, 3988), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {}), '(0, self.size)\n', (3974, 3988), True, 'import numpy as np\n'), ((4176, 4207), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {}), '(0, self.size)\n', (4193, 4207), True, 'import numpy as np\n'), ((4331, 4362), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {}), '(0, self.size)\n', (4348, 4362), True, 'import numpy as np\n'), ((4540, 4576), 'copy.deepcopy', 'copy.deepcopy', (['self.population[idx1]'], {}), '(self.population[idx1])\n', (4553, 4576), False, 'import copy\n')] |
# SK model
import math
import pickle
import sys
import numpy as np
import torch
class SKModel():
def __init__(self, n, beta, device, field=0, seed=0):
self.n = n
self.beta = beta
self.field = field
self.seed = seed
if seed > 0:
torch.manual_seed(seed)
self.J = torch.randn([self.n, self.n]) / math.sqrt(n)
# Symmetric matrix, zero diagonal
self.J = torch.triu(self.J, diagonal=1)
self.J += self.J.t()
self.J = self.J.to(device)
self.J.requires_grad = True
self.C_model = []
print('SK model with n = {}, beta = {}, field = {}, seed = {}'.format(
n, beta, field, seed))
def exact(self):
assert self.n <= 20
Z = 0
n = self.n
J = self.J.cpu().to(torch.float64)
beta = self.beta
E_min = 0
n_total = int(math.pow(2, n))
print('Enumerating...')
for d in range(n_total):
s = np.binary_repr(d, width=n)
b = np.array(list(s)).astype(np.float32)
b[b < 0.5] = -1
b = torch.from_numpy(b).view(n, 1).to(torch.float64)
E = -0.5 * b.t() @ J @ b
if E < E_min:
E_min = E
Z += torch.exp(-beta * E)
sys.stdout.write('\r{} / {}'.format(d, n_total))
sys.stdout.flush()
print()
print('Computing...')
self.C_model = torch.zeros([n, n]).to(torch.float64)
for d in range(n_total):
s = np.binary_repr(d, width=n)
b = np.array(list(s)).astype(np.float32)
b[b < 0.5] = -1
b = torch.from_numpy(b).view(n, 1).to(torch.float64)
E = -0.5 * b.t() @ J @ b
prob = torch.exp(-beta * E) / Z
self.C_model += b @ b.t() * prob
sys.stdout.write('\r{} / {}'.format(d, n_total))
sys.stdout.flush()
print()
# print(self.C_model)
print(
'Exact free energy = {:.8f}, paramagnetic free energy = {:.8f}, E_min = {:.8f}'
.format(-torch.log(Z).item() / beta / n, -math.log(2) / beta,
E_min.item() / n))
def energy(self, samples):
"""
Compute energy of samples, samples should be of size [m, n] where n is the number of spins, m is the number of samples.
"""
samples = samples.view(samples.shape[0], -1)
assert samples.shape[1] == self.n
m = samples.shape[0]
return (-0.5 * ((samples @ self.J).view(m, 1, self.n) @ samples.view(
m, self.n, 1)).squeeze() - self.field * torch.sum(samples, 1))
def J_diff(self, J):
"""
Compute difference between true couplings and inferred couplings.
"""
diff = self.J - J
diff = diff * diff
return math.sqrt(torch.mean(diff))
def save(self):
self.J = self.J.cpu()
fsave_name = 'n{}b{:.2f}D{}.pickle'.format(self.n, self.beta,
self.seed)
with open(fsave_name, 'wb') as fsave:
pickle.dump(self, fsave)
print('SK model is saved to', fsave_name)
if __name__ == '__main__':
assert len(sys.argv) >= 4
device = torch.device('cpu')
n = int(sys.argv[1])
beta = float(sys.argv[2])
seed = int(sys.argv[3])
sk = SKModel(n, beta, device, seed=seed)
sk.exact()
sk.save()
| [
"torch.manual_seed",
"torch.triu",
"pickle.dump",
"torch.log",
"math.pow",
"torch.mean",
"math.sqrt",
"numpy.binary_repr",
"torch.exp",
"math.log",
"torch.from_numpy",
"torch.sum",
"torch.zeros",
"sys.stdout.flush",
"torch.randn",
"torch.device"
] | [((3274, 3293), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3286, 3293), False, 'import torch\n'), ((434, 464), 'torch.triu', 'torch.triu', (['self.J'], {'diagonal': '(1)'}), '(self.J, diagonal=1)\n', (444, 464), False, 'import torch\n'), ((288, 311), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (305, 311), False, 'import torch\n'), ((330, 359), 'torch.randn', 'torch.randn', (['[self.n, self.n]'], {}), '([self.n, self.n])\n', (341, 359), False, 'import torch\n'), ((362, 374), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (371, 374), False, 'import math\n'), ((899, 913), 'math.pow', 'math.pow', (['(2)', 'n'], {}), '(2, n)\n', (907, 913), False, 'import math\n'), ((997, 1023), 'numpy.binary_repr', 'np.binary_repr', (['d'], {'width': 'n'}), '(d, width=n)\n', (1011, 1023), True, 'import numpy as np\n'), ((1276, 1296), 'torch.exp', 'torch.exp', (['(-beta * E)'], {}), '(-beta * E)\n', (1285, 1296), False, 'import torch\n'), ((1370, 1388), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1386, 1388), False, 'import sys\n'), ((1546, 1572), 'numpy.binary_repr', 'np.binary_repr', (['d'], {'width': 'n'}), '(d, width=n)\n', (1560, 1572), True, 'import numpy as np\n'), ((1918, 1936), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1934, 1936), False, 'import sys\n'), ((2867, 2883), 'torch.mean', 'torch.mean', (['diff'], {}), '(diff)\n', (2877, 2883), False, 'import torch\n'), ((3126, 3150), 'pickle.dump', 'pickle.dump', (['self', 'fsave'], {}), '(self, fsave)\n', (3137, 3150), False, 'import pickle\n'), ((1459, 1478), 'torch.zeros', 'torch.zeros', (['[n, n]'], {}), '([n, n])\n', (1470, 1478), False, 'import torch\n'), ((1775, 1795), 'torch.exp', 'torch.exp', (['(-beta * E)'], {}), '(-beta * E)\n', (1784, 1795), False, 'import torch\n'), ((2642, 2663), 'torch.sum', 'torch.sum', (['samples', '(1)'], {}), '(samples, 1)\n', (2651, 2663), False, 'import torch\n'), ((2145, 2156), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (2153, 2156), False, 'import math\n'), ((1121, 1140), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (1137, 1140), False, 'import torch\n'), ((1670, 1689), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (1686, 1689), False, 'import torch\n'), ((2112, 2124), 'torch.log', 'torch.log', (['Z'], {}), '(Z)\n', (2121, 2124), False, 'import torch\n')] |
# standard libraries
import os
import shutil
import typing
import unittest
# third party libraries
import h5py
import numpy
# local libraries
from nion.data import Image
_ImageDataType = Image._ImageDataType
class TestImageClass(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_create_rgba_image_from_array(self) -> None:
image_1d_16 = numpy.zeros((16, ), dtype=numpy.double)
image_1d_16x1 = numpy.zeros((16, 1), dtype=numpy.double)
self.assertIsNotNone(Image.create_rgba_image_from_array(image_1d_16))
self.assertIsNotNone(Image.create_rgba_image_from_array(image_1d_16x1))
image_1d_rgb = numpy.zeros((16, 3), dtype=numpy.uint8)
self.assertIsNotNone(Image.create_rgba_image_from_array(image_1d_rgb))
def test_rebin_expand_has_even_expansion(self) -> None:
# NOTE: statistical tests are only valid if expanded length is multiple of src length
src = numpy.arange(0, 10)
expanded = Image.rebin_1d(src, 50)
self.assertAlmostEqual(numpy.mean(src), numpy.mean(expanded))
self.assertAlmostEqual(numpy.var(src), numpy.var(expanded))
src = numpy.arange(0, 10)
expanded = Image.rebin_1d(src, 500)
self.assertAlmostEqual(numpy.mean(src), numpy.mean(expanded))
self.assertAlmostEqual(numpy.var(src), numpy.var(expanded))
# test larger values to make sure linear mapping works (failed once)
src = numpy.arange(0, 200)
expanded = Image.rebin_1d(src, 600)
self.assertAlmostEqual(numpy.mean(src), numpy.mean(expanded))
self.assertAlmostEqual(numpy.var(src), numpy.var(expanded))
def test_scale_cubic_is_symmetry(self) -> None:
src1 = numpy.zeros((8, 8))
src2 = numpy.zeros((9, 9))
src1[3:5, 3:5] = 1
src2[3:6, 3:6] = 1
src1s: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (12, 12), 'cubic')*1000).astype(numpy.int32)
src2s: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (12, 12), 'cubic')*1000).astype(numpy.int32)
src1t: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (13, 13), 'cubic')*1000).astype(numpy.int32)
src2t: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (13, 13), 'cubic')*1000).astype(numpy.int32)
self.assertTrue(numpy.array_equal(src1s[0:6, 0:6], src1s[0:6, 12:5:-1]))
self.assertTrue(numpy.array_equal(src1s[0:6, 0:6], src1s[12:5:-1, 12:5:-1]))
self.assertTrue(numpy.array_equal(src1s[0:6, 0:6], src1s[12:5:-1, 0:6]))
self.assertTrue(numpy.array_equal(src2s[0:6, 0:6], src2s[0:6, 12:5:-1]))
self.assertTrue(numpy.array_equal(src2s[0:6, 0:6], src2s[12:5:-1, 12:5:-1]))
self.assertTrue(numpy.array_equal(src2s[0:6, 0:6], src2s[12:5:-1, 0:6]))
self.assertTrue(numpy.array_equal(src1t[0:6, 0:6], src1t[0:6, 13:6:-1]))
self.assertTrue(numpy.array_equal(src1t[0:6, 0:6], src1t[13:6:-1, 13:6:-1]))
self.assertTrue(numpy.array_equal(src1t[0:6, 0:6], src1t[13:6:-1, 0:6]))
self.assertTrue(numpy.array_equal(src2t[0:6, 0:6], src2t[0:6, 13:6:-1]))
self.assertTrue(numpy.array_equal(src2t[0:6, 0:6], src2t[13:6:-1, 13:6:-1]))
self.assertTrue(numpy.array_equal(src2t[0:6, 0:6], src2t[13:6:-1, 0:6]))
def test_scale_linear_is_symmetry(self) -> None:
src1 = numpy.zeros((8, 8))
src2 = numpy.zeros((9, 9))
src1[3:5, 3:5] = 1
src2[3:6, 3:6] = 1
src1s: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (12, 12), 'linear')*1000).astype(numpy.int32)
src2s: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (12, 12), 'linear')*1000).astype(numpy.int32)
src1t: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (13, 13), 'linear')*1000).astype(numpy.int32)
src2t: _ImageDataType = typing.cast(_ImageDataType, Image.scaled(src1, (13, 13), 'linear')*1000).astype(numpy.int32)
self.assertTrue(numpy.array_equal(src1s[0:6, 0:6], src1s[0:6, 12:5:-1]))
self.assertTrue(numpy.array_equal(src1s[0:6, 0:6], src1s[12:5:-1, 12:5:-1]))
self.assertTrue(numpy.array_equal(src1s[0:6, 0:6], src1s[12:5:-1, 0:6]))
self.assertTrue(numpy.array_equal(src2s[0:6, 0:6], src2s[0:6, 12:5:-1]))
self.assertTrue(numpy.array_equal(src2s[0:6, 0:6], src2s[12:5:-1, 12:5:-1]))
self.assertTrue(numpy.array_equal(src2s[0:6, 0:6], src2s[12:5:-1, 0:6]))
self.assertTrue(numpy.array_equal(src1t[0:6, 0:6], src1t[0:6, 13:6:-1]))
self.assertTrue(numpy.array_equal(src1t[0:6, 0:6], src1t[13:6:-1, 13:6:-1]))
self.assertTrue(numpy.array_equal(src1t[0:6, 0:6], src1t[13:6:-1, 0:6]))
self.assertTrue(numpy.array_equal(src2t[0:6, 0:6], src2t[0:6, 13:6:-1]))
self.assertTrue(numpy.array_equal(src2t[0:6, 0:6], src2t[13:6:-1, 13:6:-1]))
self.assertTrue(numpy.array_equal(src2t[0:6, 0:6], src2t[13:6:-1, 0:6]))
def test_rgba_can_be_created_from_h5py_array(self) -> None:
current_working_directory = os.getcwd()
workspace_dir = os.path.join(current_working_directory, "__Test")
if os.path.exists(workspace_dir):
shutil.rmtree(workspace_dir)
os.makedirs(workspace_dir)
try:
with h5py.File(os.path.join(workspace_dir, "file.h5"), "w") as f:
dataset = f.create_dataset("data", data=numpy.ones((4, 4, 4), dtype=numpy.uint8))
Image.create_rgba_image_from_array(dataset)
finally:
# print(f"rmtree {workspace_dir}")
shutil.rmtree(workspace_dir)
| [
"os.path.exists",
"numpy.mean",
"numpy.ones",
"os.makedirs",
"os.path.join",
"os.getcwd",
"numpy.zeros",
"numpy.var",
"numpy.array_equal",
"shutil.rmtree",
"nion.data.Image.create_rgba_image_from_array",
"nion.data.Image.rebin_1d",
"nion.data.Image.scaled",
"numpy.arange"
] | [((424, 462), 'numpy.zeros', 'numpy.zeros', (['(16,)'], {'dtype': 'numpy.double'}), '((16,), dtype=numpy.double)\n', (435, 462), False, 'import numpy\n'), ((488, 528), 'numpy.zeros', 'numpy.zeros', (['(16, 1)'], {'dtype': 'numpy.double'}), '((16, 1), dtype=numpy.double)\n', (499, 528), False, 'import numpy\n'), ((710, 749), 'numpy.zeros', 'numpy.zeros', (['(16, 3)'], {'dtype': 'numpy.uint8'}), '((16, 3), dtype=numpy.uint8)\n', (721, 749), False, 'import numpy\n'), ((998, 1017), 'numpy.arange', 'numpy.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (1010, 1017), False, 'import numpy\n'), ((1037, 1060), 'nion.data.Image.rebin_1d', 'Image.rebin_1d', (['src', '(50)'], {}), '(src, 50)\n', (1051, 1060), False, 'from nion.data import Image\n'), ((1213, 1232), 'numpy.arange', 'numpy.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (1225, 1232), False, 'import numpy\n'), ((1252, 1276), 'nion.data.Image.rebin_1d', 'Image.rebin_1d', (['src', '(500)'], {}), '(src, 500)\n', (1266, 1276), False, 'from nion.data import Image\n'), ((1506, 1526), 'numpy.arange', 'numpy.arange', (['(0)', '(200)'], {}), '(0, 200)\n', (1518, 1526), False, 'import numpy\n'), ((1546, 1570), 'nion.data.Image.rebin_1d', 'Image.rebin_1d', (['src', '(600)'], {}), '(src, 600)\n', (1560, 1570), False, 'from nion.data import Image\n'), ((1777, 1796), 'numpy.zeros', 'numpy.zeros', (['(8, 8)'], {}), '((8, 8))\n', (1788, 1796), False, 'import numpy\n'), ((1812, 1831), 'numpy.zeros', 'numpy.zeros', (['(9, 9)'], {}), '((9, 9))\n', (1823, 1831), False, 'import numpy\n'), ((3439, 3458), 'numpy.zeros', 'numpy.zeros', (['(8, 8)'], {}), '((8, 8))\n', (3450, 3458), False, 'import numpy\n'), ((3474, 3493), 'numpy.zeros', 'numpy.zeros', (['(9, 9)'], {}), '((9, 9))\n', (3485, 3493), False, 'import numpy\n'), ((5137, 5148), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5146, 5148), False, 'import os\n'), ((5173, 5222), 'os.path.join', 'os.path.join', (['current_working_directory', '"""__Test"""'], {}), "(current_working_directory, '__Test')\n", (5185, 5222), False, 'import os\n'), ((5234, 5263), 'os.path.exists', 'os.path.exists', (['workspace_dir'], {}), '(workspace_dir)\n', (5248, 5263), False, 'import os\n'), ((5314, 5340), 'os.makedirs', 'os.makedirs', (['workspace_dir'], {}), '(workspace_dir)\n', (5325, 5340), False, 'import os\n'), ((558, 605), 'nion.data.Image.create_rgba_image_from_array', 'Image.create_rgba_image_from_array', (['image_1d_16'], {}), '(image_1d_16)\n', (592, 605), False, 'from nion.data import Image\n'), ((636, 685), 'nion.data.Image.create_rgba_image_from_array', 'Image.create_rgba_image_from_array', (['image_1d_16x1'], {}), '(image_1d_16x1)\n', (670, 685), False, 'from nion.data import Image\n'), ((779, 827), 'nion.data.Image.create_rgba_image_from_array', 'Image.create_rgba_image_from_array', (['image_1d_rgb'], {}), '(image_1d_rgb)\n', (813, 827), False, 'from nion.data import Image\n'), ((1092, 1107), 'numpy.mean', 'numpy.mean', (['src'], {}), '(src)\n', (1102, 1107), False, 'import numpy\n'), ((1109, 1129), 'numpy.mean', 'numpy.mean', (['expanded'], {}), '(expanded)\n', (1119, 1129), False, 'import numpy\n'), ((1162, 1176), 'numpy.var', 'numpy.var', (['src'], {}), '(src)\n', (1171, 1176), False, 'import numpy\n'), ((1178, 1197), 'numpy.var', 'numpy.var', (['expanded'], {}), '(expanded)\n', (1187, 1197), False, 'import numpy\n'), ((1308, 1323), 'numpy.mean', 'numpy.mean', (['src'], {}), '(src)\n', (1318, 1323), False, 'import numpy\n'), ((1325, 1345), 'numpy.mean', 'numpy.mean', (['expanded'], {}), '(expanded)\n', (1335, 1345), False, 'import numpy\n'), ((1378, 1392), 'numpy.var', 'numpy.var', (['src'], {}), '(src)\n', (1387, 1392), False, 'import numpy\n'), ((1394, 1413), 'numpy.var', 'numpy.var', (['expanded'], {}), '(expanded)\n', (1403, 1413), False, 'import numpy\n'), ((1602, 1617), 'numpy.mean', 'numpy.mean', (['src'], {}), '(src)\n', (1612, 1617), False, 'import numpy\n'), ((1619, 1639), 'numpy.mean', 'numpy.mean', (['expanded'], {}), '(expanded)\n', (1629, 1639), False, 'import numpy\n'), ((1672, 1686), 'numpy.var', 'numpy.var', (['src'], {}), '(src)\n', (1681, 1686), False, 'import numpy\n'), ((1688, 1707), 'numpy.var', 'numpy.var', (['expanded'], {}), '(expanded)\n', (1697, 1707), False, 'import numpy\n'), ((2406, 2461), 'numpy.array_equal', 'numpy.array_equal', (['src1s[0:6, 0:6]', 'src1s[0:6, 12:5:-1]'], {}), '(src1s[0:6, 0:6], src1s[0:6, 12:5:-1])\n', (2423, 2461), False, 'import numpy\n'), ((2487, 2546), 'numpy.array_equal', 'numpy.array_equal', (['src1s[0:6, 0:6]', 'src1s[12:5:-1, 12:5:-1]'], {}), '(src1s[0:6, 0:6], src1s[12:5:-1, 12:5:-1])\n', (2504, 2546), False, 'import numpy\n'), ((2572, 2627), 'numpy.array_equal', 'numpy.array_equal', (['src1s[0:6, 0:6]', 'src1s[12:5:-1, 0:6]'], {}), '(src1s[0:6, 0:6], src1s[12:5:-1, 0:6])\n', (2589, 2627), False, 'import numpy\n'), ((2653, 2708), 'numpy.array_equal', 'numpy.array_equal', (['src2s[0:6, 0:6]', 'src2s[0:6, 12:5:-1]'], {}), '(src2s[0:6, 0:6], src2s[0:6, 12:5:-1])\n', (2670, 2708), False, 'import numpy\n'), ((2734, 2793), 'numpy.array_equal', 'numpy.array_equal', (['src2s[0:6, 0:6]', 'src2s[12:5:-1, 12:5:-1]'], {}), '(src2s[0:6, 0:6], src2s[12:5:-1, 12:5:-1])\n', (2751, 2793), False, 'import numpy\n'), ((2819, 2874), 'numpy.array_equal', 'numpy.array_equal', (['src2s[0:6, 0:6]', 'src2s[12:5:-1, 0:6]'], {}), '(src2s[0:6, 0:6], src2s[12:5:-1, 0:6])\n', (2836, 2874), False, 'import numpy\n'), ((2900, 2955), 'numpy.array_equal', 'numpy.array_equal', (['src1t[0:6, 0:6]', 'src1t[0:6, 13:6:-1]'], {}), '(src1t[0:6, 0:6], src1t[0:6, 13:6:-1])\n', (2917, 2955), False, 'import numpy\n'), ((2981, 3040), 'numpy.array_equal', 'numpy.array_equal', (['src1t[0:6, 0:6]', 'src1t[13:6:-1, 13:6:-1]'], {}), '(src1t[0:6, 0:6], src1t[13:6:-1, 13:6:-1])\n', (2998, 3040), False, 'import numpy\n'), ((3066, 3121), 'numpy.array_equal', 'numpy.array_equal', (['src1t[0:6, 0:6]', 'src1t[13:6:-1, 0:6]'], {}), '(src1t[0:6, 0:6], src1t[13:6:-1, 0:6])\n', (3083, 3121), False, 'import numpy\n'), ((3147, 3202), 'numpy.array_equal', 'numpy.array_equal', (['src2t[0:6, 0:6]', 'src2t[0:6, 13:6:-1]'], {}), '(src2t[0:6, 0:6], src2t[0:6, 13:6:-1])\n', (3164, 3202), False, 'import numpy\n'), ((3228, 3287), 'numpy.array_equal', 'numpy.array_equal', (['src2t[0:6, 0:6]', 'src2t[13:6:-1, 13:6:-1]'], {}), '(src2t[0:6, 0:6], src2t[13:6:-1, 13:6:-1])\n', (3245, 3287), False, 'import numpy\n'), ((3313, 3368), 'numpy.array_equal', 'numpy.array_equal', (['src2t[0:6, 0:6]', 'src2t[13:6:-1, 0:6]'], {}), '(src2t[0:6, 0:6], src2t[13:6:-1, 0:6])\n', (3330, 3368), False, 'import numpy\n'), ((4072, 4127), 'numpy.array_equal', 'numpy.array_equal', (['src1s[0:6, 0:6]', 'src1s[0:6, 12:5:-1]'], {}), '(src1s[0:6, 0:6], src1s[0:6, 12:5:-1])\n', (4089, 4127), False, 'import numpy\n'), ((4153, 4212), 'numpy.array_equal', 'numpy.array_equal', (['src1s[0:6, 0:6]', 'src1s[12:5:-1, 12:5:-1]'], {}), '(src1s[0:6, 0:6], src1s[12:5:-1, 12:5:-1])\n', (4170, 4212), False, 'import numpy\n'), ((4238, 4293), 'numpy.array_equal', 'numpy.array_equal', (['src1s[0:6, 0:6]', 'src1s[12:5:-1, 0:6]'], {}), '(src1s[0:6, 0:6], src1s[12:5:-1, 0:6])\n', (4255, 4293), False, 'import numpy\n'), ((4319, 4374), 'numpy.array_equal', 'numpy.array_equal', (['src2s[0:6, 0:6]', 'src2s[0:6, 12:5:-1]'], {}), '(src2s[0:6, 0:6], src2s[0:6, 12:5:-1])\n', (4336, 4374), False, 'import numpy\n'), ((4400, 4459), 'numpy.array_equal', 'numpy.array_equal', (['src2s[0:6, 0:6]', 'src2s[12:5:-1, 12:5:-1]'], {}), '(src2s[0:6, 0:6], src2s[12:5:-1, 12:5:-1])\n', (4417, 4459), False, 'import numpy\n'), ((4485, 4540), 'numpy.array_equal', 'numpy.array_equal', (['src2s[0:6, 0:6]', 'src2s[12:5:-1, 0:6]'], {}), '(src2s[0:6, 0:6], src2s[12:5:-1, 0:6])\n', (4502, 4540), False, 'import numpy\n'), ((4566, 4621), 'numpy.array_equal', 'numpy.array_equal', (['src1t[0:6, 0:6]', 'src1t[0:6, 13:6:-1]'], {}), '(src1t[0:6, 0:6], src1t[0:6, 13:6:-1])\n', (4583, 4621), False, 'import numpy\n'), ((4647, 4706), 'numpy.array_equal', 'numpy.array_equal', (['src1t[0:6, 0:6]', 'src1t[13:6:-1, 13:6:-1]'], {}), '(src1t[0:6, 0:6], src1t[13:6:-1, 13:6:-1])\n', (4664, 4706), False, 'import numpy\n'), ((4732, 4787), 'numpy.array_equal', 'numpy.array_equal', (['src1t[0:6, 0:6]', 'src1t[13:6:-1, 0:6]'], {}), '(src1t[0:6, 0:6], src1t[13:6:-1, 0:6])\n', (4749, 4787), False, 'import numpy\n'), ((4813, 4868), 'numpy.array_equal', 'numpy.array_equal', (['src2t[0:6, 0:6]', 'src2t[0:6, 13:6:-1]'], {}), '(src2t[0:6, 0:6], src2t[0:6, 13:6:-1])\n', (4830, 4868), False, 'import numpy\n'), ((4894, 4953), 'numpy.array_equal', 'numpy.array_equal', (['src2t[0:6, 0:6]', 'src2t[13:6:-1, 13:6:-1]'], {}), '(src2t[0:6, 0:6], src2t[13:6:-1, 13:6:-1])\n', (4911, 4953), False, 'import numpy\n'), ((4979, 5034), 'numpy.array_equal', 'numpy.array_equal', (['src2t[0:6, 0:6]', 'src2t[13:6:-1, 0:6]'], {}), '(src2t[0:6, 0:6], src2t[13:6:-1, 0:6])\n', (4996, 5034), False, 'import numpy\n'), ((5277, 5305), 'shutil.rmtree', 'shutil.rmtree', (['workspace_dir'], {}), '(workspace_dir)\n', (5290, 5305), False, 'import shutil\n'), ((5666, 5694), 'shutil.rmtree', 'shutil.rmtree', (['workspace_dir'], {}), '(workspace_dir)\n', (5679, 5694), False, 'import shutil\n'), ((5546, 5589), 'nion.data.Image.create_rgba_image_from_array', 'Image.create_rgba_image_from_array', (['dataset'], {}), '(dataset)\n', (5580, 5589), False, 'from nion.data import Image\n'), ((5381, 5419), 'os.path.join', 'os.path.join', (['workspace_dir', '"""file.h5"""'], {}), "(workspace_dir, 'file.h5')\n", (5393, 5419), False, 'import os\n'), ((1946, 1983), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(12, 12)', '"""cubic"""'], {}), "(src1, (12, 12), 'cubic')\n", (1958, 1983), False, 'from nion.data import Image\n'), ((2070, 2107), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(12, 12)', '"""cubic"""'], {}), "(src1, (12, 12), 'cubic')\n", (2082, 2107), False, 'from nion.data import Image\n'), ((2194, 2231), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(13, 13)', '"""cubic"""'], {}), "(src1, (13, 13), 'cubic')\n", (2206, 2231), False, 'from nion.data import Image\n'), ((2318, 2355), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(13, 13)', '"""cubic"""'], {}), "(src1, (13, 13), 'cubic')\n", (2330, 2355), False, 'from nion.data import Image\n'), ((3608, 3646), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(12, 12)', '"""linear"""'], {}), "(src1, (12, 12), 'linear')\n", (3620, 3646), False, 'from nion.data import Image\n'), ((3733, 3771), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(12, 12)', '"""linear"""'], {}), "(src1, (12, 12), 'linear')\n", (3745, 3771), False, 'from nion.data import Image\n'), ((3858, 3896), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(13, 13)', '"""linear"""'], {}), "(src1, (13, 13), 'linear')\n", (3870, 3896), False, 'from nion.data import Image\n'), ((3983, 4021), 'nion.data.Image.scaled', 'Image.scaled', (['src1', '(13, 13)', '"""linear"""'], {}), "(src1, (13, 13), 'linear')\n", (3995, 4021), False, 'from nion.data import Image\n'), ((5488, 5528), 'numpy.ones', 'numpy.ones', (['(4, 4, 4)'], {'dtype': 'numpy.uint8'}), '((4, 4, 4), dtype=numpy.uint8)\n', (5498, 5528), False, 'import numpy\n')] |
import numpy as np
from bayesnet.array.broadcast import broadcast_to
from bayesnet.math.exp import exp
from bayesnet.math.log import log
from bayesnet.math.sqrt import sqrt
from bayesnet.math.square import square
from bayesnet.random.random import RandomVariable
from bayesnet.tensor.constant import Constant
from bayesnet.tensor.tensor import Tensor
class GaussianMixture(RandomVariable):
"""
Mixture of the Gaussian distribution
p(x|w, mu, std)
= w_1 * N(x|mu_1, std_1) + ... + w_K * N(x|mu_K, std_K)
Parameters
----------
coef : tensor_like
mixing coefficient whose sum along specified axis should equal to 1
mu : tensor_like
mean parameter along specified axis for each component
std : tensor_like
std parameter along specified axis for each component
axis : int
axis along which represents each component
data : tensor_like
realization
p : RandomVariable
original distribution of a model
"""
def __init__(self, coef, mu, std, axis=-1, data=None, p=None):
super().__init__(data, p)
assert axis == -1
self.axis = axis
self.coef, self.mu, self.std = self._check_input(coef, mu, std)
def _check_input(self, coef, mu, std):
coef = self._convert2tensor(coef)
mu = self._convert2tensor(mu)
std = self._convert2tensor(std)
if not coef.shape == mu.shape == std.shape:
shape = np.broadcast(coef.value, mu.value, std.value).shape
if coef.shape != shape:
coef = broadcast_to(coef, shape)
if mu.shape != shape:
mu = broadcast_to(mu, shape)
if std.shape != shape:
std = broadcast_to(std, shape)
self.n_component = coef.shape[self.axis]
return coef, mu, std
@property
def axis(self):
return self.parameter["axis"]
@axis.setter
def axis(self, axis):
if not isinstance(axis, int):
raise TypeError("axis must be int")
self.parameter["axis"] = axis
@property
def coef(self):
return self.parameter["coef"]
@coef.setter
def coef(self, coef):
self._atleast_ndim(coef, 1)
if (coef.value < 0).any():
raise ValueError("value of mixing coefficient must all be positive")
if not np.allclose(coef.value.sum(axis=self.axis), 1):
raise ValueError("sum of mixing coefficients must be 1")
self.parameter["coef"] = coef
@property
def mu(self):
return self.parameter["mu"]
@mu.setter
def mu(self, mu):
self.parameter["mu"] = mu
@property
def std(self):
return self.parameter["std"]
@std.setter
def std(self, std):
self._atleast_ndim(std, 1)
if (std.value < 0).any():
raise ValueError("value of std must all be positive")
self.parameter["std"] = std
@property
def var(self):
return square(self.parameter["std"])
def forward(self):
if self.coef.ndim != 1:
raise NotImplementedError
indices = np.array(
[np.random.choice(self.n_component, p=c) for c in self.coef.value]
)
output = np.random.normal(
loc=self.mu.value[indices],
scale=self.std.value[indices]
)
if (
isinstance(self.coef, Constant)
and isinstance(self.mu, Constant)
and isinstance(self.std, Constant)
):
return Constant(output)
return Tensor(output, function=self)
def backward(self):
raise NotImplementedError
def _pdf(self, x):
gauss = (
exp(-0.5 * square((x - self.mu) / self.std))
/ sqrt(2 * np.pi) / self.std
)
return (self.coef * gauss).sum(axis=self.axis)
def _log_pdf(self, x):
return log(self.pdf(x))
| [
"numpy.random.normal",
"bayesnet.tensor.constant.Constant",
"bayesnet.math.sqrt.sqrt",
"bayesnet.tensor.tensor.Tensor",
"bayesnet.math.square.square",
"numpy.random.choice",
"numpy.broadcast",
"bayesnet.array.broadcast.broadcast_to"
] | [((2993, 3022), 'bayesnet.math.square.square', 'square', (["self.parameter['std']"], {}), "(self.parameter['std'])\n", (2999, 3022), False, 'from bayesnet.math.square import square\n'), ((3251, 3326), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'self.mu.value[indices]', 'scale': 'self.std.value[indices]'}), '(loc=self.mu.value[indices], scale=self.std.value[indices])\n', (3267, 3326), True, 'import numpy as np\n'), ((3585, 3614), 'bayesnet.tensor.tensor.Tensor', 'Tensor', (['output'], {'function': 'self'}), '(output, function=self)\n', (3591, 3614), False, 'from bayesnet.tensor.tensor import Tensor\n'), ((3553, 3569), 'bayesnet.tensor.constant.Constant', 'Constant', (['output'], {}), '(output)\n', (3561, 3569), False, 'from bayesnet.tensor.constant import Constant\n'), ((1462, 1507), 'numpy.broadcast', 'np.broadcast', (['coef.value', 'mu.value', 'std.value'], {}), '(coef.value, mu.value, std.value)\n', (1474, 1507), True, 'import numpy as np\n'), ((1573, 1598), 'bayesnet.array.broadcast.broadcast_to', 'broadcast_to', (['coef', 'shape'], {}), '(coef, shape)\n', (1585, 1598), False, 'from bayesnet.array.broadcast import broadcast_to\n'), ((1654, 1677), 'bayesnet.array.broadcast.broadcast_to', 'broadcast_to', (['mu', 'shape'], {}), '(mu, shape)\n', (1666, 1677), False, 'from bayesnet.array.broadcast import broadcast_to\n'), ((1735, 1759), 'bayesnet.array.broadcast.broadcast_to', 'broadcast_to', (['std', 'shape'], {}), '(std, shape)\n', (1747, 1759), False, 'from bayesnet.array.broadcast import broadcast_to\n'), ((3158, 3197), 'numpy.random.choice', 'np.random.choice', (['self.n_component'], {'p': 'c'}), '(self.n_component, p=c)\n', (3174, 3197), True, 'import numpy as np\n'), ((3787, 3802), 'bayesnet.math.sqrt.sqrt', 'sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3791, 3802), False, 'from bayesnet.math.sqrt import sqrt\n'), ((3739, 3771), 'bayesnet.math.square.square', 'square', (['((x - self.mu) / self.std)'], {}), '((x - self.mu) / self.std)\n', (3745, 3771), False, 'from bayesnet.math.square import square\n')] |
"""combine multiple images into one using a sliding window of vertical strips"""
import glob
import os
import sys
from typing import List, Tuple
import numpy as np
from PIL import Image
def get_files(directory: str, name_filter: str = "*.jpg") -> List:
"""
List files in specified directory matching filter
:param directory: directory in which to search
:param name_filter: wildcard filter for filenames
:return: list of strings corresponding to file paths
"""
path_str = os.path.join(directory, name_filter)
return sorted(glob.glob(path_str))
def read_strip(image_path: str, start: int = None, end: int = None) -> \
Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Read vertical strip from an image
:param image_path: path to image file
:param start: column from which to begin reading
:param end: column at which reading will end (exclusive)
:return: three Numpy arrays corresponding to RGB data from the image
"""
with Image.open(str(image_path)) as image_file:
start = start if start is not None else 0
end = end if end is not None else image_file.width
array = np.array(image_file)
data_r, data_g, data_b = [array[:, :, x]
for x in range(array.shape[-1])]
return (data_r[:, start:end],
data_g[:, start:end],
data_b[:, start:end])
def create_strips(file_paths: List[str], averaging=True) -> Tuple[np.ndarray,
np.ndarray,
np.ndarray]:
"""
Aggregate vertical-strip data from source images
:param file_paths: list of paths to source images, read in order
:param averaging: average strip with surrounding strips
:return: three Numpy arrays corresponding to RGB data for combined image
"""
first_file, *_ = file_paths
first_image_data, *_ = read_strip(first_file)
_, image_width = first_image_data.shape
final_r = np.zeros_like(first_image_data)
final_g = np.zeros_like(first_image_data)
final_b = np.zeros_like(first_image_data)
number_of_files = len(file_paths)
strip_width = image_width // number_of_files
for index, file_path in enumerate(file_paths):
_, filename = os.path.split(file_path)
start = index * strip_width
end = (image_width if index == (number_of_files - 1)
else (index + 1) * strip_width)
if averaging and 1 < index < number_of_files - 2:
left_2_file_path = file_paths[index - 2]
left_file_path = file_paths[index - 1]
right_file_path = file_paths[index + 1]
right_2_file_path = file_paths[index + 2]
left2_r, left2_g, left2_b = read_strip(left_2_file_path, start, end)
left_r, left_g, left_b = read_strip(left_file_path, start, end)
center_r, center_g, center_b = read_strip(file_path, start, end)
right_r, right_g, right_b = read_strip(right_file_path, start, end)
right2_r, right2_g, right2_b = read_strip(right_2_file_path, start, end)
new_r = (left2_r // 5 + left_r // 5 + center_r // 5 + right_r // 5 + right2_r //5)
new_g = (left2_g // 5 + left_g // 5 + center_g // 5 + right_g // 5 + right2_g //5)
new_b = (left2_b // 5 + left_b // 5 + center_b // 5 + right_b // 5 + right2_b //5)
else:
new_r, new_g, new_b = read_strip(file_path, start, end)
final_r[:, start:end] = new_r
final_g[:, start:end] = new_g
final_b[:, start:end] = new_b
mean_r = int(new_r.mean())
mean_g = int(new_g.mean())
mean_b = int(new_b.mean())
message = ("Processed image {0} of {1}: {2}, "
"{3}px to {4}px, mean RGB: ({5}, {6}, {7})"
.format(index + 1, number_of_files, filename,
start, end - 1, mean_r, mean_g, mean_b))
print(message)
return final_r, final_g, final_b
def write_image(source_data: Tuple[np.ndarray, np.ndarray, np.ndarray],
file_path: str) -> None:
"""
Write image data to a file
:param source_data: Numpy arrays containing RGB data
:param file_path: path to which output will be written
"""
image = Image.fromarray(np.dstack(source_data))
image.save(file_path, optimize=True)
def main(directory: str, output_path: str) -> None:
"""
Create an image consisting of components of other images
:param directory: directory of source images
:param output_path: path to which new file will be written
"""
files = get_files(directory)
combined = create_strips(files)
write_image(combined, output_path)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage {0} INPUT_DIRECTORY OUTPUT_PATH".format(sys.argv[0]))
else:
directory, filename = sys.argv[1:]
main(directory, filename)
| [
"numpy.dstack",
"os.path.join",
"os.path.split",
"numpy.array",
"numpy.zeros_like",
"glob.glob"
] | [((506, 542), 'os.path.join', 'os.path.join', (['directory', 'name_filter'], {}), '(directory, name_filter)\n', (518, 542), False, 'import os\n'), ((2073, 2104), 'numpy.zeros_like', 'np.zeros_like', (['first_image_data'], {}), '(first_image_data)\n', (2086, 2104), True, 'import numpy as np\n'), ((2119, 2150), 'numpy.zeros_like', 'np.zeros_like', (['first_image_data'], {}), '(first_image_data)\n', (2132, 2150), True, 'import numpy as np\n'), ((2165, 2196), 'numpy.zeros_like', 'np.zeros_like', (['first_image_data'], {}), '(first_image_data)\n', (2178, 2196), True, 'import numpy as np\n'), ((561, 580), 'glob.glob', 'glob.glob', (['path_str'], {}), '(path_str)\n', (570, 580), False, 'import glob\n'), ((1169, 1189), 'numpy.array', 'np.array', (['image_file'], {}), '(image_file)\n', (1177, 1189), True, 'import numpy as np\n'), ((2359, 2383), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (2372, 2383), False, 'import os\n'), ((4405, 4427), 'numpy.dstack', 'np.dstack', (['source_data'], {}), '(source_data)\n', (4414, 4427), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, datasets
import time, os, argparse
from torch.autograd import Variable
from modules import *
class MNIST:
def __init__(self, bs=1):
dataset_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST('data', train=True, download=True, transform=dataset_transform)
eval_dataset = datasets.MNIST('data', train=False, download=True, transform=dataset_transform)
self.num_classes = 10
self.train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=bs, shuffle=True)
self.eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=bs, shuffle=True)
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Cupy Capsnet')
parser.add_argument('--bs', dest='bs',
help='batch size',
default='100', type=int)
parser.add_argument('--lr', dest='lr',
help='learning rate',
default=1e-2, type=float)
parser.add_argument('--opt', dest='optimizer',
help='optimizer',
default='adam', type=str)
parser.add_argument('--disp', dest='disp_interval',
help='interval to display training loss',
default=1, type=int)
parser.add_argument('--num_epochs', dest='num_epochs',
help='num epochs to train',
default=100, type=int)
parser.add_argument('--val_epoch', dest='val_epoch',
help='num epochs to run validation',
default=1, type=int)
parser.add_argument('--save_epoch', dest='save_epoch',
help='num epochs to save model',
default=1, type=int)
parser.add_argument('--use_cuda', dest='use_cuda',
help='whether or not to use cuda',
default=True, type=bool)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save trained models',
default=True, type=bool)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
mnist = MNIST(bs=args.bs)
# Variables
inputs = torch.FloatTensor(1)
labels = torch.FloatTensor(1)
eye = Variable(torch.eye(mnist.num_classes))
inputs = Variable(inputs)
labels = Variable(labels)
# Model
model = CapsNet(use_cuda=args.use_cuda)
# cuda
if args.use_cuda:
inputs = inputs.cuda()
labels = labels.cuda()
model = model.cuda()
eye = eye.cuda()
params = []
for key, value in dict(model.named_parameters()).items():
if value.requires_grad:
params += [{'params':[value],'lr':args.lr}]
# optimizer
if args.optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters())
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params)
criterion = CapsLoss()
print('Training started!')
for epoch in range(args.num_epochs):
start = time.time()
# train
model.train()
correct = 0
train_loss = 0
for batch_idx, (imgs, targets) in enumerate(mnist.train_dataloader):
if imgs.size(0) != args.bs:
continue
targets = eye.cpu().data.index_select(dim=0, index=targets)
inputs.data.resize_(imgs.size()).copy_(imgs)
labels.data.resize_(targets.size()).copy_(targets)
optimizer.zero_grad()
outputs, reconst = model(inputs)
scores = torch.sqrt((outputs ** 2).sum(2))
loss = criterion(scores, labels, reconst, inputs)
train_loss = loss.data.cpu().numpy()[0]
# backward
loss.backward()
optimizer.step()
scores, classes = F.softmax(scores).max(dim=1)
predicted = eye.index_select(dim=0, index=classes.squeeze(1))
predicted_idx = np.argmax(predicted.data.cpu().numpy(),1)
label_idx = np.argmax(targets.numpy(), 1)
correct = np.sum(predicted_idx == label_idx)
# info
if batch_idx % args.disp_interval == 0:
end = time.time()
print("[epoch %2d][iter %4d] loss: %.4f, acc: %.4f%% (%d/%d)" \
% (epoch, batch_idx, train_loss/(batch_idx+1), 100.*correct/args.bs, correct, args.bs))
save_name = os.path.join(args.save_dir, '{}_{}.pth'.format(project_id, epoch))
if args.save_epoch > 0 and batch_idx % args.save_epoch == 0:
torch.save({
'epoch': epoch,
}, save_name)
# val
if epoch % args.val_epoch == 0:
print('Validating...')
correct = 0
total = 0
model.eval()
for batch_idx, (imgs, targets) in enumerate(mnist.eval_dataloader):
if imgs.size(0) != args.bs:
continue
targets = eye.cpu().data.index_select(dim=0, index=targets)
inputs.data.resize_(imgs.size()).copy_(imgs)
labels.data.resize_(targets.size()).copy_(targets)
outputs, reconst = model(inputs)
scores = torch.sqrt((outputs ** 2).sum(2))
scores, classes = F.softmax(scores).max(dim=1)
predicted = eye.index_select(dim=0, index=classes.squeeze(1))
predicted_idx = np.argmax(predicted.data.cpu().numpy(),1)
label_idx = np.argmax(targets.numpy(), 1)
correct += np.sum(predicted_idx == label_idx)
total += targets.size(0)
print("[epoch %2d] val acc: %.4f%% (%d/%d)" \
% (epoch, 100.*correct/total, correct, total))
| [
"os.path.exists",
"torch.optim.SGD",
"torch.nn.functional.softmax",
"argparse.ArgumentParser",
"os.makedirs",
"torch.eye",
"numpy.sum",
"torchvision.datasets.MNIST",
"torch.utils.data.DataLoader",
"torch.save",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"torch.aut... | [((970, 1021), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Cupy Capsnet"""'}), "(description='Cupy Capsnet')\n", (993, 1021), False, 'import time, os, argparse\n'), ((2632, 2652), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (2649, 2652), False, 'import torch\n'), ((2666, 2686), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (2683, 2686), False, 'import torch\n'), ((2749, 2765), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (2757, 2765), False, 'from torch.autograd import Variable\n'), ((2779, 2795), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (2787, 2795), False, 'from torch.autograd import Variable\n'), ((468, 546), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'dataset_transform'}), "('data', train=True, download=True, transform=dataset_transform)\n", (482, 546), False, 'from torchvision import transforms, datasets\n'), ((570, 649), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""data"""'], {'train': '(False)', 'download': '(True)', 'transform': 'dataset_transform'}), "('data', train=False, download=True, transform=dataset_transform)\n", (584, 649), False, 'from torchvision import transforms, datasets\n'), ((722, 793), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'bs', 'shuffle': '(True)'}), '(train_dataset, batch_size=bs, shuffle=True)\n', (749, 793), False, 'import torch\n'), ((825, 895), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['eval_dataset'], {'batch_size': 'bs', 'shuffle': '(True)'}), '(eval_dataset, batch_size=bs, shuffle=True)\n', (852, 895), False, 'import torch\n'), ((2505, 2534), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (2519, 2534), False, 'import time, os, argparse\n'), ((2544, 2570), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (2555, 2570), False, 'import time, os, argparse\n'), ((2706, 2734), 'torch.eye', 'torch.eye', (['mnist.num_classes'], {}), '(mnist.num_classes)\n', (2715, 2734), False, 'import torch\n'), ((3474, 3485), 'time.time', 'time.time', ([], {}), '()\n', (3483, 3485), False, 'import time, os, argparse\n'), ((3332, 3355), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {}), '(params)\n', (3347, 3355), False, 'import torch\n'), ((4531, 4565), 'numpy.sum', 'np.sum', (['(predicted_idx == label_idx)'], {}), '(predicted_idx == label_idx)\n', (4537, 4565), True, 'import numpy as np\n'), ((5041, 5080), 'torch.save', 'torch.save', (["{'epoch': epoch}", 'save_name'], {}), "({'epoch': epoch}, save_name)\n", (5051, 5080), False, 'import torch\n'), ((332, 353), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (351, 353), False, 'from torchvision import transforms, datasets\n'), ((378, 420), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (398, 420), False, 'from torchvision import transforms, datasets\n'), ((4660, 4671), 'time.time', 'time.time', ([], {}), '()\n', (4669, 4671), False, 'import time, os, argparse\n'), ((6038, 6072), 'numpy.sum', 'np.sum', (['(predicted_idx == label_idx)'], {}), '(predicted_idx == label_idx)\n', (6044, 6072), True, 'import numpy as np\n'), ((4281, 4298), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {}), '(scores)\n', (4290, 4298), True, 'import torch.nn.functional as F\n'), ((5771, 5788), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {}), '(scores)\n', (5780, 5788), True, 'import torch.nn.functional as F\n')] |
import numpy as np
import random
BLOCK_SIZE_IN_BYTE = 16 # bytes
BLOCK_SIZE_IN_HEX = BLOCK_SIZE_IN_BYTE*2 # hex
class Chill:
def __init__(self, plain_text_src = 'text', plain_text = '', plain_text_path = '', key = 'key', mode = 'ECB', cipher_text_path = '', cipher_text=''):
# constructor
if mode.upper() in ['ECB', 'CBC', 'CFB', 'OFB', 'CTR']: self.mode = mode.upper()
else:
print('Error: incorrect mode of operation')
exit(0)
# key related
self.original_key_length = len(key) # original key length
self.key = self.__to_hex(key)
self.__key_padding()
self.round_time = 5 + (self.original_key_length % 6)
self.arr_round_key = self.__generate_round_key()
self.IV = self.__to_hex(open("/dev/urandom","rb").read(BLOCK_SIZE_IN_HEX))
# plain text related
self.plain_text = plain_text
self.plain_text_path = plain_text_path
self.plain_text_src = plain_text_src # ['text', 'file']
# get plain text from file
if self.plain_text_src == 'file':
self.plain_text = self.__load_text('plain')
if self.plain_text == '':
print('Error: 0 bytes plain text')
exit(0)
# cipher text related
self.cipher_text = cipher_text
self.cipher_text_path = cipher_text_path
# get cipher text from file
try:
if self.cipher_text_path != '':
self.cipher_text = self.__load_text('cipher')
if self.plain_text == '':
print('Error: 0 bytes plain text')
exit(0)
except:
pass
def __to_hex(self, content):
# convert content (string) to hex
result = ''
for c in content:
try:
result += format(ord(c), '08b')
except TypeError as e:
result += format(c, '08b')
t = '%08X' % int(result, 2)
if len(t) % 2 == 1: t = '0' + t
return t
def __from_hex(self, content):
# convert content (hex) to string
result = ''
for idx in range (0, len(content), 2):
c = content[idx]+content[idx+1]
result += chr(int(c, 16))
return result
def __key_padding(self):
# padding the key
key_length = len(self.key)
if key_length == BLOCK_SIZE_IN_HEX:
pass
elif key_length < BLOCK_SIZE_IN_HEX:
seed = 0
for idx, k in enumerate(self.key):
if (idx % 2 == 1): seed += ord(k)
random.seed(seed)
while len(self.key) < BLOCK_SIZE_IN_HEX:
pos = random.randrange(0, key_length)
self.key += self.key[pos]
else:
seed = 0
for idx, k in enumerate(self.key):
if (idx % 2 == 0): seed += ord(k)
random.seed(seed)
while len(self.key) > BLOCK_SIZE_IN_HEX:
pos = random.randrange(0, key_length)
self.key = self.key[:pos] + self.key[(pos+1):]
def __load_text(self, mode):
# load file content
# open file
if mode == 'plain': path = self.plain_text_path
else: path = self.cipher_text_path
with open(path, mode='rb') as file:
file_content = file.read()
return file_content
def __xor(self, hex_string1, hex_string2):
# return xor from two strings
t = format(int(hex(int(hex_string1, 16) ^ int(hex_string2, 16)), 0), '02X')
while len(t) % 2 == 1 or len(t) == 30: t = '0' + t
return t
def __xor_matrix(self, hex_matrix1, hex_matrix2):
# return xor from two matrix
# matrix size are equal, return matrix
result = []
for idx_row, rows in enumerate(hex_matrix1):
row_result = []
for idx_col, cols in enumerate(rows):
row_result.append(self.__xor(hex_matrix1[idx_row][idx_col], hex_matrix2[idx_row][idx_col]))
result.append(row_result)
return np.asarray(result)
def __transform_to_matrix(self, data):
# transform data (string) to matrix
result = np.zeros((4, 4), 'U2')
result[0, 0] = data[0] + data[1]
result[0, 1] = data[4] + data[5]
result[0, 2] = data[6] + data[7]
result[0, 3] = data[18] + data[19]
result[1, 0] = data[2] + data[3]
result[1, 1] = data[8] + data[9]
result[1, 2] = data[16] + data[17]
result[1, 3] = data[20] + data[21]
result[2, 0] = data[10] + data[11]
result[2, 1] = data[14] + data[15]
result[2, 2] = data[22] + data[23]
result[2, 3] = data[28] + data[29]
result[3, 0] = data[12] + data[13]
result[3, 1] = data[24] + data[25]
result[3, 2] = data[26] + data[27]
result[3, 3] = data[30] + data[31]
return result
def __transform_to_string(self, data):
# transform data (matrix) to string
result = data[0, 0] + data[1, 0] + data[0, 1] + data[0, 2] + data[1, 1] + data[2, 0] + data[3, 0] + data[2, 1] + data[1, 2] + data[0, 3] + data[1, 3] + data[2, 2] + data[3, 1] + data[3, 2] + data[2, 3] + data[3, 3]
return result
def __subX(self, mode, input):
# SubX method
# input is matrix, return matrix
result = np.copy(input)
for idx_row, rows in enumerate(result):
for idx_col, cols in enumerate(rows):
int_result = abs(((int(result[idx_row][idx_col][0]+'0', 16) - 16) % 256) + (1 if mode == 'plus' else -1) * ((int(result[idx_row][idx_col][1], 16) - 1) % 16))
result[idx_row][idx_col] = format(int(hex(int_result), 0), '02X')
return result
def __l_transposition(self, input):
# L Transposition method
# input is matrix, return matrix
result = np.copy(input)
result[0, 0], result[3, 1] = result[3, 1], result[0, 0]
result[0, 1], result[3, 0] = result[3, 0], result[0, 1]
result[0, 2], result[3, 3] = result[3, 3], result[0, 2]
result[0, 3], result[3, 2] = result[3, 2], result[0, 3]
result[1, 0], result[2, 3] = result[2, 3], result[1, 0]
result[2, 0], result[1, 3] = result[1, 3], result[2, 0]
result[1, 1], result[2, 2] = result[2, 2], result[1, 1]
return result
def __shift_col(self, input):
# ShiftCol method
# input is matrix, return matrix
sum_col = [0, 0, 0, 0]
for idx_row, rows in enumerate(input):
for idx_col, cols in enumerate(rows):
sum_col[idx_col] += int(input[idx_row][idx_col], 16)
result_temp = np.copy(input.T)
# shift
result = []
for idx_row, rows in enumerate(result_temp):
result.append(np.roll(rows, (sum_col[idx_row] % 4) * (-1 if idx_row % 2 == 1 else 1)))
return np.asarray(result).T
def __rot_mod(self, key):
# RotMod method
# key is matrix, return matrix
return np.rot90(key, -1 * (self.original_key_length % 4))
def __xor_col(self, input):
# XorCol method
# input is matrix, return matrix
result = np.copy(input)
for idx_row, rows in enumerate(input):
idx_col2 = 1
for idx_col1, cols in enumerate(rows):
result[idx_row][idx_col1] = self.__xor(input[idx_row][idx_col1], input[idx_row][idx_col2])
if idx_col2 == len(rows)-1: idx_col2 = 0
else: idx_col2 += 1
return result
def __round_function(self, right_block, round_key):
# Feistel round function
# right_block and round_key are matrix, return matrix
# SubX+
result = self.__subX('plus', right_block)
# L Transposition
result = self.__l_transposition(result)
# ShiftCol
result = self.__shift_col(result)
# XOR with key
result = self.__xor_matrix(result, round_key)
return result
def __generate_round_key(self):
# Generate n matrix of round key, n = round time
# round_key is matrix, return array of matrix
round_key = self.__transform_to_matrix(self.key)
result = []
result.append(round_key)
for i in range(1, self.round_time):
# RotMod
round_key_temp = self.__rot_mod(result[i-1])
# SubX-
round_key_temp = self.__subX('minus', round_key_temp)
# XorCol
round_key_temp = self.__xor_col(round_key_temp)
result.append(round_key_temp)
return np.asarray(result)
def __feistel(self, mode, left_block_matrix, right_block_matrix):
# Feistel Structure implementation
round_idx = 0
while round_idx < self.round_time:
if mode == 'encrypt': round_key_matrix = self.arr_round_key[round_idx]
else: round_key_matrix = self.arr_round_key[self.round_time - 1 - round_idx] # mode == 'decrypt'
right_block_matrix_new = self.__xor_matrix(left_block_matrix, self.__round_function(right_block_matrix, round_key_matrix))
left_block_matrix_new = np.copy(right_block_matrix)
right_block_matrix = np.copy(right_block_matrix_new)
left_block_matrix = np.copy(left_block_matrix_new)
round_idx += 1
return left_block_matrix, right_block_matrix
def __plain_pad(self, s):
_len = BLOCK_SIZE_IN_HEX
if hasattr(s, 'encode'):
# s is a string
s = s.encode()
return s + ((_len - len(s) % _len) * chr(_len - len(s) % _len)).encode()
def __plain_unpad(self, s):
return s[:-ord(s[len(s)-1:])]
def __counter_iv(self):
self.IV = hex(int(self.IV, 16) + 1)[2:-1].upper()
def encrypt(self):
# ENCRYPTION
# preprocess
self.plain_text = self.__plain_pad(self.plain_text)
self.plain_text = self.__to_hex(self.plain_text)
self.cipher_text = ''
# feistel
# init feistel loop
done = False
idx_left_block = BLOCK_SIZE_IN_HEX
idx_right_block = 0
processed_block = 2
if self.mode in ['CBC', 'CFB', 'OFB', 'CTR']:
self.cipher_text += self.IV
while not done:
# init round
if self.mode in ['ECB', 'CBC']:
right_block = self.plain_text[idx_right_block:idx_right_block+BLOCK_SIZE_IN_HEX]
left_block = self.plain_text[idx_left_block:idx_left_block+BLOCK_SIZE_IN_HEX]
elif self.mode in ['CFB', 'OFB', 'CTR']:
right_block = self.IV[:BLOCK_SIZE_IN_HEX]
left_block = self.IV[BLOCK_SIZE_IN_HEX:]
if self.mode == 'CBC':
right_block_IV = self.IV[:BLOCK_SIZE_IN_HEX]
left_block_IV = self.IV[BLOCK_SIZE_IN_HEX:]
right_block = self.__xor(right_block, right_block_IV)
left_block = self.__xor(left_block, left_block_IV)
right_block_matrix = self.__transform_to_matrix(right_block)
left_block_matrix = self.__transform_to_matrix(left_block)
left_block_matrix, right_block_matrix = self.__feistel('encrypt', left_block_matrix, right_block_matrix)
right_block = self.__transform_to_string(right_block_matrix)
left_block = self.__transform_to_string(left_block_matrix)
if self.mode == 'OFB':
self.IV = right_block + left_block
if self.mode in ['CFB', 'OFB', 'CTR']:
right_block_IV = self.plain_text[idx_right_block:idx_right_block+BLOCK_SIZE_IN_HEX]
left_block_IV = self.plain_text[idx_left_block:idx_left_block+BLOCK_SIZE_IN_HEX]
right_block = self.__xor(right_block, right_block_IV)
left_block = self.__xor(left_block, left_block_IV)
result = right_block + left_block
if self.mode in ['CBC', 'CFB']:
self.IV = result
elif self.mode == 'CTR':
self.__counter_iv()
self.cipher_text += result
if processed_block == (len(self.plain_text) / BLOCK_SIZE_IN_HEX): done = True
if not done:
idx_left_block += 2*BLOCK_SIZE_IN_HEX
idx_right_block += 2*BLOCK_SIZE_IN_HEX
processed_block += 2
# convert cipher text from hex to string
self.cipher_text = self.__from_hex(self.cipher_text)
# result stored in self.cipher_text
# dump cipher to file txt
if self.cipher_text_path != '':
f = open(self.cipher_text_path, 'wb')
f.write(self.cipher_text.encode())
f.close()
def decrypt(self):
# DECRYPTION
# preprocess
self.cipher_text = self.__to_hex(self.cipher_text)
self.plain_text = ''
# feistel
# init feistel loop
done = False
idx_left_block = BLOCK_SIZE_IN_HEX
idx_right_block = 0
processed_block = 2
if self.mode in ['OFB', 'CTR']:
self.IV = self.cipher_text[:BLOCK_SIZE_IN_HEX*2]
self.cipher_text = self.cipher_text[BLOCK_SIZE_IN_HEX*2:]
# print self.cipher_text
while not done:
if self.mode in ['CBC', 'CFB']:
self.IV = self.cipher_text[-1*(idx_left_block*2+BLOCK_SIZE_IN_HEX*2)+idx_right_block : -1*(idx_left_block*2)+idx_right_block]
if self.IV == '': break
# init round
if self.mode in ['ECB', 'CBC']:
if idx_right_block == 0:
right_block = self.cipher_text[-1*(idx_right_block+BLOCK_SIZE_IN_HEX):]
else:
right_block = self.cipher_text[-1*(idx_right_block+BLOCK_SIZE_IN_HEX) : -1*idx_right_block]
left_block = self.cipher_text[-1*(idx_left_block+BLOCK_SIZE_IN_HEX) : -1*(idx_left_block)]
elif self.mode in ['CFB', 'OFB', 'CTR']:
right_block = self.IV[:BLOCK_SIZE_IN_HEX]
left_block = self.IV[BLOCK_SIZE_IN_HEX:]
# decrypt function
right_block_matrix = self.__transform_to_matrix(right_block)
left_block_matrix = self.__transform_to_matrix(left_block)
if self.mode in ['ECB', 'CBC']:
left_block_matrix, right_block_matrix = self.__feistel('decrypt', left_block_matrix, right_block_matrix)
elif self.mode in ['CFB', 'OFB', 'CTR']:
left_block_matrix, right_block_matrix = self.__feistel('encrypt', left_block_matrix, right_block_matrix)
left_block = self.__transform_to_string(left_block_matrix)
right_block = self.__transform_to_string(right_block_matrix)
if self.mode == 'OFB':
self.IV = right_block + left_block
elif self.mode == 'CTR':
self.__counter_iv()
elif self.mode == 'CBC':
right_block_IV = self.IV[BLOCK_SIZE_IN_HEX:]
left_block_IV = self.IV[:BLOCK_SIZE_IN_HEX]
right_block = self.__xor(right_block, right_block_IV)
left_block = self.__xor(left_block, left_block_IV)
if self.mode in ['OFB', 'CTR']:
right_block_IV = self.cipher_text[idx_right_block:(idx_right_block+BLOCK_SIZE_IN_HEX)]
left_block_IV = self.cipher_text[idx_left_block:(idx_left_block+BLOCK_SIZE_IN_HEX)]
right_block = self.__xor(right_block, right_block_IV)
left_block = self.__xor(left_block, left_block_IV)
right_block, left_block = left_block, right_block
elif self.mode in ['CFB']:
if idx_right_block == 0:
right_block_IV = self.cipher_text[-1*(idx_right_block+BLOCK_SIZE_IN_HEX):]
else:
right_block_IV = self.cipher_text[-1*(idx_right_block+BLOCK_SIZE_IN_HEX) : -1*idx_right_block]
left_block_IV = self.cipher_text[-1*(idx_left_block+BLOCK_SIZE_IN_HEX) : -1*(idx_left_block)]
right_block_IV, left_block_IV = left_block_IV, right_block_IV
right_block = self.__xor(right_block, right_block_IV)
left_block = self.__xor(left_block, left_block_IV)
right_block, left_block = left_block, right_block
if self.mode in ['OFB', 'CTR']:
self.plain_text = self.plain_text + left_block + right_block
else:
self.plain_text = left_block + right_block + self.plain_text
if processed_block == (len(self.cipher_text) / BLOCK_SIZE_IN_HEX): done = True
if not done:
idx_left_block += 2*BLOCK_SIZE_IN_HEX
idx_right_block += 2*BLOCK_SIZE_IN_HEX
processed_block += 2
# convert plain text from hex to string
self.plain_text = self.__from_hex(self.plain_text)
# remove padding from plain text
self.plain_text = self.__plain_unpad(self.plain_text)
self.plain_text = self.plain_text.strip('\00')
# result stored in self.plain_text
| [
"numpy.copy",
"numpy.roll",
"random.randrange",
"numpy.asarray",
"random.seed",
"numpy.zeros",
"numpy.rot90"
] | [((3627, 3645), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (3637, 3645), True, 'import numpy as np\n'), ((3741, 3763), 'numpy.zeros', 'np.zeros', (['(4, 4)', '"""U2"""'], {}), "((4, 4), 'U2')\n", (3749, 3763), True, 'import numpy as np\n'), ((4817, 4831), 'numpy.copy', 'np.copy', (['input'], {}), '(input)\n', (4824, 4831), True, 'import numpy as np\n'), ((5296, 5310), 'numpy.copy', 'np.copy', (['input'], {}), '(input)\n', (5303, 5310), True, 'import numpy as np\n'), ((6034, 6050), 'numpy.copy', 'np.copy', (['input.T'], {}), '(input.T)\n', (6041, 6050), True, 'import numpy as np\n'), ((6348, 6398), 'numpy.rot90', 'np.rot90', (['key', '(-1 * (self.original_key_length % 4))'], {}), '(key, -1 * (self.original_key_length % 4))\n', (6356, 6398), True, 'import numpy as np\n'), ((6500, 6514), 'numpy.copy', 'np.copy', (['input'], {}), '(input)\n', (6507, 6514), True, 'import numpy as np\n'), ((7754, 7772), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (7764, 7772), True, 'import numpy as np\n'), ((6232, 6250), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (6242, 6250), True, 'import numpy as np\n'), ((8277, 8304), 'numpy.copy', 'np.copy', (['right_block_matrix'], {}), '(right_block_matrix)\n', (8284, 8304), True, 'import numpy as np\n'), ((8332, 8363), 'numpy.copy', 'np.copy', (['right_block_matrix_new'], {}), '(right_block_matrix_new)\n', (8339, 8363), True, 'import numpy as np\n'), ((8390, 8420), 'numpy.copy', 'np.copy', (['left_block_matrix_new'], {}), '(left_block_matrix_new)\n', (8397, 8420), True, 'import numpy as np\n'), ((2311, 2328), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2322, 2328), False, 'import random\n'), ((2570, 2587), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2581, 2587), False, 'import random\n'), ((6148, 6217), 'numpy.roll', 'np.roll', (['rows', '(sum_col[idx_row] % 4 * (-1 if idx_row % 2 == 1 else 1))'], {}), '(rows, sum_col[idx_row] % 4 * (-1 if idx_row % 2 == 1 else 1))\n', (6155, 6217), True, 'import numpy as np\n'), ((2390, 2421), 'random.randrange', 'random.randrange', (['(0)', 'key_length'], {}), '(0, key_length)\n', (2406, 2421), False, 'import random\n'), ((2649, 2680), 'random.randrange', 'random.randrange', (['(0)', 'key_length'], {}), '(0, key_length)\n', (2665, 2680), False, 'import random\n')] |
# Authors: <NAME>
# License: BSD 3 Clause
"""
PyMF Principal Component Analysis.
PCA: Class for Principal Component Analysis
"""
import numpy as np
from .base import PyMFBase
from .svd import SVD
__all__ = ["PCA"]
class PCA(PyMFBase):
"""
PCA(data, num_bases=4, center_mean=True)
Principal Component Analysis. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. W is set to the eigenvectors of the
data covariance. PCA used pymf's SVD, thus, it might be more efficient
to use it directly.
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
center_mean: bool, True
Make sure that the data is centred around the mean.
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying PCA to some rather stupid data set:
>>> import numpy as np
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
>>> pca_mdl = PCA(data, num_bases=2)
>>> pca_mdl.factorize()
The basis vectors are now stored in pca_mdl.W, the coefficients in pca_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to pca_mdl.W, and set compute_w to False:
>>> data = np.array([[1.5], [1.2]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> pca_mdl = PCA(data, num_bases=2)
>>> pca_mdl.W = W
>>> pca_mdl.factorize(compute_w=False)
The result is a set of coefficients pca_mdl.H, s.t. data = W * pca_mdl.H.
"""
def __init__(self, data, num_bases=0, center_mean=True, **kwargs):
PyMFBase.__init__(self, data, num_bases=num_bases)
# center the data around the mean first
self._center_mean = center_mean
if self._center_mean:
# copy the data before centering it
self._data_orig = data
self._meanv = self._data_orig[:,:].mean(axis=1).reshape(-1,1)
self.data = self._data_orig - self._meanv
else:
self.data = data
def _init_h(self):
pass
def _init_w(self):
pass
def _update_h(self):
self.H = np.dot(self.W.T, self.data[:,:])
def _update_w(self):
# compute eigenvectors and eigenvalues using SVD
svd_mdl = SVD(self.data)
svd_mdl.factorize()
# argsort sorts in ascending order -> do reverese indexing
# for accesing values in descending order
S = np.diag(svd_mdl.S)
order = np.argsort(S)[::-1]
# select only a few eigenvectors ...
if self._num_bases >0:
order = order[:self._num_bases]
self.W = svd_mdl.U[:,order]
self.eigenvalues = S[order]
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, niter=1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
PyMFBase.factorize(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| [
"numpy.argsort",
"numpy.dot",
"doctest.testmod",
"numpy.diag"
] | [((4222, 4239), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4237, 4239), False, 'import doctest\n'), ((2555, 2588), 'numpy.dot', 'np.dot', (['self.W.T', 'self.data[:, :]'], {}), '(self.W.T, self.data[:, :])\n', (2561, 2588), True, 'import numpy as np\n'), ((2898, 2916), 'numpy.diag', 'np.diag', (['svd_mdl.S'], {}), '(svd_mdl.S)\n', (2905, 2916), True, 'import numpy as np\n'), ((2933, 2946), 'numpy.argsort', 'np.argsort', (['S'], {}), '(S)\n', (2943, 2946), True, 'import numpy as np\n')] |
from flask import Flask, render_template, request
import pickle
import numpy as np
model = pickle.load(open('Regressor_task2_model.pkl','rb'))
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
Hr = float(request.form['Hours'])
data = np.array(Hr)
data= data.reshape(1,-1)
my_prediction = model.predict(data)
output = round(my_prediction[0],2)
return render_template('result.html', prediction_text = output)
if __name__ == '__main__':
app.run(debug = True) | [
"flask.render_template",
"numpy.array",
"flask.Flask"
] | [((151, 166), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (156, 166), False, 'from flask import Flask, render_template, request\n'), ((206, 235), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (221, 235), False, 'from flask import Flask, render_template, request\n'), ((407, 419), 'numpy.array', 'np.array', (['Hr'], {}), '(Hr)\n', (415, 419), True, 'import numpy as np\n'), ((555, 609), 'flask.render_template', 'render_template', (['"""result.html"""'], {'prediction_text': 'output'}), "('result.html', prediction_text=output)\n", (570, 609), False, 'from flask import Flask, render_template, request\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes for detecting stars in an astronomical
image. The convention is that all star-finding classes are subclasses of
an abstract base class called ``StarFinderBase``. Each star-finding
class should define a method called ``find_stars`` that finds stars in
an image.
"""
import abc
import math
import warnings
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.table import Table
from astropy.utils import lazyproperty
import numpy as np
from .core import find_peaks
from ..utils._convolution import _filter_data
from ..utils._moments import _moments, _moments_central
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['StarFinderBase', 'DAOStarFinder', 'IRAFStarFinder']
class _StarFinderKernel:
"""
Class to calculate a 2D Gaussian density enhancement kernel.
The kernel has negative wings and sums to zero. It is used by both
`DAOStarFinder` and `IRAFStarFinder`.
Parameters
----------
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor and major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel, measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``]. The default is 1.5.
normalize_zerosum : bool, optional
Whether to normalize the Gaussian kernel to have zero sum, The
default is `True`, which generates a density-enhancement kernel.
Notes
-----
The class attributes include the dimensions of the elliptical kernel
and the coefficients of a 2D elliptical Gaussian function expressed
as:
``f(x,y) = A * exp(-g(x,y))``
where
``g(x,y) = a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2``
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5,
normalize_zerosum=True):
if fwhm < 0:
raise ValueError('fwhm must be positive.')
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be positive and less or equal '
'than 1.')
if sigma_radius <= 0:
raise ValueError('sigma_radius must be positive.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.xsigma = self.fwhm * gaussian_fwhm_to_sigma
self.ysigma = self.xsigma * self.ratio
theta_radians = np.deg2rad(self.theta)
cost = np.cos(theta_radians)
sint = np.sin(theta_radians)
xsigma2 = self.xsigma**2
ysigma2 = self.ysigma**2
self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2))
# CCW
self.b = 0.5 * cost * sint * ((1.0 / xsigma2) - (1.0 / ysigma2))
self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2))
# find the extent of an ellipse with radius = sigma_radius*sigma;
# solve for the horizontal and vertical tangents of an ellipse
# defined by g(x,y) = f
self.f = self.sigma_radius**2 / 2.0
denom = (self.a * self.c) - self.b**2
# nx and ny are always odd
self.nx = 2 * int(max(2, math.sqrt(self.c * self.f / denom))) + 1
self.ny = 2 * int(max(2, math.sqrt(self.a * self.f / denom))) + 1
self.xc = self.xradius = self.nx // 2
self.yc = self.yradius = self.ny // 2
# define the kernel on a 2D grid
yy, xx = np.mgrid[0:self.ny, 0:self.nx]
self.circular_radius = np.sqrt((xx - self.xc)**2 + (yy - self.yc)**2)
self.elliptical_radius = (self.a * (xx - self.xc)**2 +
2.0 * self.b * (xx - self.xc) *
(yy - self.yc) +
self.c * (yy - self.yc)**2)
self.mask = np.where(
(self.elliptical_radius <= self.f) |
(self.circular_radius <= 2.0), 1, 0).astype(int)
self.npixels = self.mask.sum()
# NOTE: the central (peak) pixel of gaussian_kernel has a value of 1.
self.gaussian_kernel_unmasked = np.exp(-self.elliptical_radius)
self.gaussian_kernel = self.gaussian_kernel_unmasked * self.mask
# denom = variance * npixels
denom = ((self.gaussian_kernel**2).sum() -
(self.gaussian_kernel.sum()**2 / self.npixels))
self.relerr = 1.0 / np.sqrt(denom)
# normalize the kernel to zero sum
if normalize_zerosum:
self.data = ((self.gaussian_kernel -
(self.gaussian_kernel.sum() / self.npixels)) /
denom) * self.mask
else:
self.data = self.gaussian_kernel
self.shape = self.data.shape
class _StarCutout:
"""
Class to hold a 2D image cutout of a single star for the star finder
classes.
Parameters
----------
data : 2D array_like
The cutout 2D image from the input unconvolved 2D image.
convdata : 2D array_like
The cutout 2D image from the convolved 2D image.
slices : tuple of two slices
A tuple of two slices representing the minimal box of the cutout
from the original image.
xpeak, ypeak : float
The (x, y) pixel coordinates of the peak pixel.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``data``.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold value input to the star finder
class multiplied by the kernel relerr.
"""
def __init__(self, data, convdata, slices, xpeak, ypeak, kernel,
threshold_eff):
self.data = data
self.convdata = convdata
self.slices = slices
self.xpeak = xpeak
self.ypeak = ypeak
self.kernel = kernel
self.threshold_eff = threshold_eff
self.shape = data.shape
self.nx = self.shape[1] # always odd
self.ny = self.shape[0] # always odd
self.cutout_xcenter = int(self.nx // 2)
self.cutout_ycenter = int(self.ny // 2)
self.xorigin = self.slices[1].start # in original image
self.yorigin = self.slices[0].start # in original image
self.mask = kernel.mask # kernel mask
self.npixels = kernel.npixels # unmasked pixels
self.data_masked = self.data * self.mask
class _DAOFindProperties:
"""
Class to calculate the properties of each detected star, as defined
by `DAOFIND`_.
Parameters
----------
star_cutout : `_StarCutout`
A `_StarCutout` object containing the image cutout for the star.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``star_cutout``.
sky : float, optional
The local sky level around the source. ``sky`` is used only to
calculate the source peak value, flux, and magnitude. The
default is 0.
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
"""
def __init__(self, star_cutout, kernel, sky=0.):
if not isinstance(star_cutout, _StarCutout):
raise ValueError('data must be an _StarCutout object')
if star_cutout.data.shape != kernel.shape:
raise ValueError('cutout and kernel must have the same shape')
self.cutout = star_cutout
self.kernel = kernel
self.sky = sky # DAOFIND has no sky input -> same as sky=0.
self.data = star_cutout.data
self.data_masked = star_cutout.data_masked
self.npixels = star_cutout.npixels # unmasked pixels
self.nx = star_cutout.nx
self.ny = star_cutout.ny
self.xcenter = star_cutout.cutout_xcenter
self.ycenter = star_cutout.cutout_ycenter
@lazyproperty
def data_peak(self):
return self.data[self.ycenter, self.xcenter]
@lazyproperty
def conv_peak(self):
return self.cutout.convdata[self.ycenter, self.xcenter]
@lazyproperty
def roundness1(self):
# set the central (peak) pixel to zero
cutout_conv = self.cutout.convdata.copy()
cutout_conv[self.ycenter, self.xcenter] = 0.0 # for sum4
# calculate the four roundness quadrants.
# the cutout size always matches the kernel size, which have odd
# dimensions.
# quad1 = bottom right
# quad2 = bottom left
# quad3 = top left
# quad4 = top right
# 3 3 4 4 4
# 3 3 4 4 4
# 3 3 x 1 1
# 2 2 2 1 1
# 2 2 2 1 1
quad1 = cutout_conv[0:self.ycenter + 1, self.xcenter + 1:]
quad2 = cutout_conv[0:self.ycenter, 0:self.xcenter + 1]
quad3 = cutout_conv[self.ycenter:, 0:self.xcenter]
quad4 = cutout_conv[self.ycenter + 1:, self.xcenter:]
sum2 = -quad1.sum() + quad2.sum() - quad3.sum() + quad4.sum()
if sum2 == 0:
return 0.
sum4 = np.abs(cutout_conv).sum()
if sum4 <= 0:
return None
return 2.0 * sum2 / sum4
@lazyproperty
def sharpness(self):
npixels = self.npixels - 1 # exclude the peak pixel
data_mean = (np.sum(self.data_masked) - self.data_peak) / npixels
return (self.data_peak - data_mean) / self.conv_peak
def daofind_marginal_fit(self, axis=0):
"""
Fit 1D Gaussians, defined from the marginal x/y kernel
distributions, to the marginal x/y distributions of the original
(unconvolved) image.
These fits are used calculate the star centroid and roundness
("GROUND") properties.
Parameters
----------
axis : {0, 1}, optional
The axis for which the marginal fit is performed:
* 0: for the x axis
* 1: for the y axis
Returns
-------
dx : float
The fractional shift in x or y (depending on ``axis`` value)
of the image centroid relative to the maximum pixel.
hx : float
The height of the best-fitting Gaussian to the marginal x or
y (depending on ``axis`` value) distribution of the
unconvolved source data.
"""
# define triangular weighting functions along each axis, peaked
# in the middle and equal to one at the edge
x = self.xcenter - np.abs(np.arange(self.nx) - self.xcenter) + 1
y = self.ycenter - np.abs(np.arange(self.ny) - self.ycenter) + 1
xwt, ywt = np.meshgrid(x, y)
if axis == 0: # marginal distributions along x axis
wt = xwt[0] # 1D
wts = ywt # 2D
size = self.nx
center = self.xcenter
sigma = self.kernel.xsigma
dxx = center - np.arange(size)
elif axis == 1: # marginal distributions along y axis
wt = np.transpose(ywt)[0] # 1D
wts = xwt # 2D
size = self.ny
center = self.ycenter
sigma = self.kernel.ysigma
dxx = np.arange(size) - center
# compute marginal sums for given axis
wt_sum = np.sum(wt)
dx = center - np.arange(size)
# weighted marginal sums
kern_sum_1d = np.sum(self.kernel.gaussian_kernel_unmasked * wts,
axis=axis)
kern_sum = np.sum(kern_sum_1d * wt)
kern2_sum = np.sum(kern_sum_1d**2 * wt)
dkern_dx = kern_sum_1d * dx
dkern_dx_sum = np.sum(dkern_dx * wt)
dkern_dx2_sum = np.sum(dkern_dx**2 * wt)
kern_dkern_dx_sum = np.sum(kern_sum_1d * dkern_dx * wt)
data_sum_1d = np.sum(self.data * wts, axis=axis)
data_sum = np.sum(data_sum_1d * wt)
data_kern_sum = np.sum(data_sum_1d * kern_sum_1d * wt)
data_dkern_dx_sum = np.sum(data_sum_1d * dkern_dx * wt)
data_dx_sum = np.sum(data_sum_1d * dxx * wt)
# perform linear least-squares fit (where data = sky + hx*kernel)
# to find the amplitude (hx)
# reject the star if the fit amplitude is not positive
hx_numer = data_kern_sum - (data_sum * kern_sum) / wt_sum
if hx_numer <= 0.:
return np.nan, np.nan
hx_denom = kern2_sum - (kern_sum**2 / wt_sum)
if hx_denom <= 0.:
return np.nan, np.nan
# compute fit amplitude
hx = hx_numer / hx_denom
# sky = (data_sum - (hx * kern_sum)) / wt_sum
# compute centroid shift
dx = ((kern_dkern_dx_sum -
(data_dkern_dx_sum - dkern_dx_sum*data_sum)) /
(hx * dkern_dx2_sum / sigma**2))
hsize = size / 2.
if abs(dx) > hsize:
if data_sum == 0.:
dx = 0.0
else:
dx = data_dx_sum / data_sum
if abs(dx) > hsize:
dx = 0.0
return dx, hx
@lazyproperty
def dx_hx(self):
return self.daofind_marginal_fit(axis=0)
@lazyproperty
def dy_hy(self):
return self.daofind_marginal_fit(axis=1)
@lazyproperty
def dx(self):
return self.dx_hx[0]
@lazyproperty
def dy(self):
return self.dy_hy[0]
@lazyproperty
def xcentroid(self):
return self.cutout.xpeak + self.dx
@lazyproperty
def ycentroid(self):
return self.cutout.ypeak + self.dy
@lazyproperty
def hx(self):
return self.dx_hx[1]
@lazyproperty
def hy(self):
return self.dy_hy[1]
@lazyproperty
def roundness2(self):
"""
The star roundness.
This roundness parameter represents the ratio of the difference
in the height of the best fitting Gaussian function in x minus
the best fitting Gaussian function in y, divided by the average
of the best fitting Gaussian functions in x and y. A circular
source will have a zero roundness. A source extended in x or y
will have a negative or positive roundness, respectively.
"""
if np.isnan(self.hx) or np.isnan(self.hy):
return np.nan
else:
return 2.0 * (self.hx - self.hy) / (self.hx + self.hy)
@lazyproperty
def peak(self):
return self.data_peak - self.sky
@lazyproperty
def npix(self):
"""
The total number of pixels in the rectangular cutout image.
"""
return self.data.size
@lazyproperty
def flux(self):
return ((self.conv_peak / self.cutout.threshold_eff) -
(self.sky * self.npix))
@lazyproperty
def mag(self):
if self.flux <= 0:
return np.nan
else:
return -2.5 * np.log10(self.flux)
class _IRAFStarFindProperties:
"""
Class to calculate the properties of each detected star, as defined
by IRAF's ``starfind`` task.
Parameters
----------
star_cutout : `_StarCutout`
A `_StarCutout` object containing the image cutout for the star.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``star_cutout``.
sky : `None` or float, optional
The local sky level around the source. If sky is ``None``, then
a local sky level will be (crudely) estimated using the IRAF
``starfind`` calculation.
"""
def __init__(self, star_cutout, kernel, sky=None):
if not isinstance(star_cutout, _StarCutout):
raise ValueError('data must be an _StarCutout object')
if star_cutout.data.shape != kernel.shape:
raise ValueError('cutout and kernel must have the same shape')
self.cutout = star_cutout
self.kernel = kernel
if sky is None:
skymask = ~self.kernel.mask.astype(bool) # 1=sky, 0=obj
nsky = np.count_nonzero(skymask)
if nsky == 0:
mean_sky = (np.max(self.cutout.data) -
np.max(self.cutout.convdata))
else:
mean_sky = np.sum(self.cutout.data * skymask) / nsky
self.sky = mean_sky
else:
self.sky = sky
@lazyproperty
def data(self):
cutout = np.array((self.cutout.data - self.sky) * self.cutout.mask)
# IRAF starfind discards negative pixels
cutout = np.where(cutout > 0, cutout, 0)
return cutout
@lazyproperty
def moments(self):
return _moments(self.data, order=1)
@lazyproperty
def cutout_xcentroid(self):
return self.moments[0, 1] / self.moments[0, 0]
@lazyproperty
def cutout_ycentroid(self):
return self.moments[1, 0] / self.moments[0, 0]
@lazyproperty
def xcentroid(self):
return self.cutout_xcentroid + self.cutout.xorigin
@lazyproperty
def ycentroid(self):
return self.cutout_ycentroid + self.cutout.yorigin
@lazyproperty
def npix(self):
return np.count_nonzero(self.data)
@lazyproperty
def sky(self):
return self.sky
@lazyproperty
def peak(self):
return np.max(self.data)
@lazyproperty
def flux(self):
return np.sum(self.data)
@lazyproperty
def mag(self):
return -2.5 * np.log10(self.flux)
@lazyproperty
def moments_central(self):
return _moments_central(
self.data, (self.cutout_xcentroid, self.cutout_ycentroid),
order=2) / self.moments[0, 0]
@lazyproperty
def mu_sum(self):
return self.moments_central[0, 2] + self.moments_central[2, 0]
@lazyproperty
def mu_diff(self):
return self.moments_central[0, 2] - self.moments_central[2, 0]
@lazyproperty
def fwhm(self):
return 2.0 * np.sqrt(np.log(2.0) * self.mu_sum)
@lazyproperty
def sharpness(self):
return self.fwhm / self.kernel.fwhm
@lazyproperty
def roundness(self):
return np.sqrt(self.mu_diff**2 +
4.0 * self.moments_central[1, 1]**2) / self.mu_sum
@lazyproperty
def pa(self):
pa = np.rad2deg(0.5 * np.arctan2(2.0 * self.moments_central[1, 1],
self.mu_diff))
if pa < 0.:
pa += 180.
return pa
def _find_stars(data, kernel, threshold_eff, min_separation=None,
mask=None, exclude_border=False):
"""
Find stars in an image.
Parameters
----------
data : 2D array_like
The 2D array of the image.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
objects : list of `_StarCutout`
A list of `_StarCutout` objects containing the image cutout for
each source.
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
.. _starfind: https://iraf.net/irafhelp.php?val=starfind
"""
convolved_data = _filter_data(data, kernel.data, mode='constant',
fill_value=0.0, check_normalization=False)
# define a local footprint for the peak finder
if min_separation is None: # daofind
footprint = kernel.mask.astype(bool)
else:
# define a circular footprint
idx = np.arange(-min_separation, min_separation + 1)
xx, yy = np.meshgrid(idx, idx)
footprint = np.array((xx**2 + yy**2) <= min_separation**2, dtype=int)
# pad the data and convolved image by the kernel x/y radius to allow
# for detections near the edges
if not exclude_border:
ypad = kernel.yradius
xpad = kernel.xradius
pad = ((ypad, ypad), (xpad, xpad))
# mode must be a string for numpy < 0.11
# (see https://github.com/numpy/numpy/issues/7112)
mode = str('constant')
data = np.pad(data, pad, mode=mode, constant_values=[0.])
if mask is not None:
mask = np.pad(mask, pad, mode=mode, constant_values=[0.])
convolved_data = np.pad(convolved_data, pad, mode=mode,
constant_values=[0.])
# find local peaks in the convolved data
with warnings.catch_warnings():
# suppress any NoDetectionsWarning from find_peaks
warnings.filterwarnings('ignore', category=NoDetectionsWarning)
tbl = find_peaks(convolved_data, threshold_eff, footprint=footprint,
mask=mask)
if tbl is None:
return None
coords = np.transpose([tbl['y_peak'], tbl['x_peak']])
star_cutouts = []
for (ypeak, xpeak) in coords:
# now extract the object from the data, centered on the peak
# pixel in the convolved image, with the same size as the kernel
x0 = xpeak - kernel.xradius
x1 = xpeak + kernel.xradius + 1
y0 = ypeak - kernel.yradius
y1 = ypeak + kernel.yradius + 1
if x0 < 0 or x1 > data.shape[1]:
continue # pragma: no cover
if y0 < 0 or y1 > data.shape[0]:
continue # pragma: no cover
slices = (slice(y0, y1), slice(x0, x1))
data_cutout = data[slices]
convdata_cutout = convolved_data[slices]
# correct pixel values for the previous image padding
if not exclude_border:
x0 -= kernel.xradius
x1 -= kernel.xradius
y0 -= kernel.yradius
y1 -= kernel.yradius
xpeak -= kernel.xradius
ypeak -= kernel.yradius
slices = (slice(y0, y1), slice(x0, x1))
star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices,
xpeak, ypeak, kernel, threshold_eff))
return star_cutouts
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table`
A table of found stars. If no stars are found then an empty
table is returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class DAOStarFinder(StarFinderBase):
"""
Detect stars in an image using the DAOFIND (`Stetson 1987
<https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract>`_)
algorithm.
DAOFIND (`Stetson 1987; PASP 99, 191
<https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract>`_)
searches images for local density maxima that have a peak amplitude
greater than ``threshold`` (approximately; ``threshold`` is applied
to a convolved image) and have a size and shape similar to the
defined 2D Gaussian kernel. The Gaussian kernel is defined by the
``fwhm``, ``ratio``, ``theta``, and ``sigma_radius`` input
parameters.
``DAOStarFinder`` finds the object centroid by fitting the marginal x
and y 1D distributions of the Gaussian kernel to the marginal x and
y distributions of the input (unconvolved) ``data`` image.
``DAOStarFinder`` calculates the object roundness using two methods. The
``roundlo`` and ``roundhi`` bounds are applied to both measures of
roundness. The first method (``roundness1``; called ``SROUND`` in
`DAOFIND`_) is based on the source symmetry and is the ratio of a
measure of the object's bilateral (2-fold) to four-fold symmetry.
The second roundness statistic (``roundness2``; called ``GROUND`` in
`DAOFIND`_) measures the ratio of the difference in the height of
the best fitting Gaussian function in x minus the best fitting
Gaussian function in y, divided by the average of the best fitting
Gaussian functions in x and y. A circular source will have a zero
roundness. A source extended in x or y will have a negative or
positive roundness, respectively.
The sharpness statistic measures the ratio of the difference between
the height of the central pixel and the mean of the surrounding
non-bad pixels in the convolved image, to the height of the best
fitting Gaussian function at that point.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor to major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
(2.0*sqrt(2.0*log(2.0)))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Setting ``sky`` affects
only the output values of the object ``peak``, ``flux``, and
``mag`` values. The default is 0.0, which should be used to
replicate the results from `DAOFIND`_.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `DAOFIND`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`DAOStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
See Also
--------
IRAFStarFinder
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in `DAOFIND`_ are
``boundary='constant'`` and ``constant=0.0``.
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
References
----------
.. [1] <NAME>. 1987; PASP 99, 191
(https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract)
.. [2] https://iraf.net/irafhelp.php?val=daofind
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
"""
def __init__(self, threshold, fwhm, ratio=1.0, theta=0.0,
sigma_radius=1.5, sharplo=0.2, sharphi=1.0, roundlo=-1.0,
roundhi=1.0, sky=0.0, exclude_border=False,
brightest=None, peakmax=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
self.threshold = threshold
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.kernel = _StarFinderKernel(self.fwhm, self.ratio, self.theta,
self.sigma_radius)
self.threshold_eff = self.threshold * self.kernel.relerr
self.brightest = brightest
self.peakmax = peakmax
self._star_cutouts = None
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``sharpness``: object sharpness.
* ``roundness1``: object roundness based on symmetry.
* ``roundness2``: object roundness based on marginal Gaussian
fits.
* ``npix``: the total number of pixels in the Gaussian kernel
array.
* ``sky``: the input ``sky`` parameter.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object flux calculated as the peak density in
the convolved image divided by the detection threshold. This
derivation matches that of `DAOFIND`_ if ``sky`` is 0.0.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``. The derivation matches that of
`DAOFIND`_ if ``sky`` is 0.0.
`None` is returned if no stars are found.
"""
star_cutouts = _find_stars(data, self.kernel, self.threshold_eff,
mask=mask,
exclude_border=self.exclude_border)
if star_cutouts is None:
warnings.warn('No sources were found.', NoDetectionsWarning)
return None
self._star_cutouts = star_cutouts
star_props = []
for star_cutout in star_cutouts:
props = _DAOFindProperties(star_cutout, self.kernel, self.sky)
if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any():
continue
if (props.sharpness <= self.sharplo or
props.sharpness >= self.sharphi):
continue
if (props.roundness1 <= self.roundlo or
props.roundness1 >= self.roundhi):
continue
if (props.roundness2 <= self.roundlo or
props.roundness2 >= self.roundhi):
continue
if self.peakmax is not None and props.peak >= self.peakmax:
continue
star_props.append(props)
nstars = len(star_props)
if nstars == 0:
warnings.warn('Sources were found, but none pass the sharpness '
'and roundness criteria.', NoDetectionsWarning)
return None
if self.brightest is not None:
fluxes = [props.flux for props in star_props]
idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist())
star_props = [star_props[k] for k in idx]
nstars = len(star_props)
table = Table()
table['id'] = np.arange(nstars) + 1
columns = ('xcentroid', 'ycentroid', 'sharpness', 'roundness1',
'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag')
for column in columns:
table[column] = [getattr(props, column) for props in star_props]
return table
class IRAFStarFinder(StarFinderBase):
"""
Detect stars in an image using IRAF's "starfind" algorithm.
`IRAFStarFinder` searches images for local density maxima that have
a peak amplitude greater than ``threshold`` above the local
background and have a PSF full-width at half-maximum similar to the
input ``fwhm``. The objects' centroid, roundness (ellipticity), and
sharpness are calculated using image moments.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the 2D circular Gaussian
kernel in units of pixels.
minsep_fwhm : float, optional
The minimum separation for detected objects in units of
``fwhm``.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Inputing a ``sky`` value
will override the background sky estimate. Setting ``sky``
affects only the output values of the object ``peak``, ``flux``,
and ``mag`` values. The default is ``None``, which means the
sky value will be estimated using the `starfind`_ method.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `starfind`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`IRAFStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in IRAF's `starfind`_ are
``boundary='constant'`` and ``constant=0.0``.
IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as
input parameters. The equivalent input values for
`IRAFStarFinder` are:
* ``fwhm = hwhmpsf * 2``
* ``sigma_radius = fradius * sqrt(2.0*log(2.0))``
* ``minsep_fwhm = 0.5 * sepmin``
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
See Also
--------
DAOStarFinder
References
----------
.. [1] https://iraf.net/irafhelp.php?val=starfind
.. _starfind: https://iraf.net/irafhelp.php?val=starfind
"""
def __init__(self, threshold, fwhm, sigma_radius=1.5, minsep_fwhm=2.5,
sharplo=0.5, sharphi=2.0, roundlo=0.0, roundhi=0.2, sky=None,
exclude_border=False, brightest=None, peakmax=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
self.threshold = threshold
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.fwhm = fwhm
self.sigma_radius = sigma_radius
self.minsep_fwhm = minsep_fwhm
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.min_separation = max(2, int((self.fwhm * self.minsep_fwhm) +
0.5))
self.kernel = _StarFinderKernel(self.fwhm, ratio=1.0, theta=0.0,
sigma_radius=self.sigma_radius)
self.brightest = brightest
self.peakmax = peakmax
self._star_cutouts = None
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found objects with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``fwhm``: object FWHM.
* ``sharpness``: object sharpness.
* ``roundness``: object roundness.
* ``pa``: object position angle (degrees counter clockwise from
the positive x axis).
* ``npix``: the total number of (positive) unmasked pixels.
* ``sky``: the local ``sky`` value.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object instrumental flux.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``.
`None` is returned if no stars are found.
"""
star_cutouts = _find_stars(data, self.kernel, self.threshold,
min_separation=self.min_separation,
mask=mask,
exclude_border=self.exclude_border)
if star_cutouts is None:
warnings.warn('No sources were found.', NoDetectionsWarning)
return None
self._star_cutouts = star_cutouts
star_props = []
for star_cutout in star_cutouts:
props = _IRAFStarFindProperties(star_cutout, self.kernel,
self.sky)
# star cutout needs more than one non-zero value
if np.count_nonzero(props.data) <= 1:
continue
if (props.sharpness <= self.sharplo or
props.sharpness >= self.sharphi):
continue
if (props.roundness <= self.roundlo or
props.roundness >= self.roundhi):
continue
if self.peakmax is not None and props.peak >= self.peakmax:
continue
star_props.append(props)
nstars = len(star_props)
if nstars == 0:
warnings.warn('Sources were found, but none pass the sharpness '
'and roundness criteria.', NoDetectionsWarning)
return None
if self.brightest is not None:
fluxes = [props.flux for props in star_props]
idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist())
star_props = [star_props[k] for k in idx]
nstars = len(star_props)
table = Table()
table['id'] = np.arange(nstars) + 1
columns = ('xcentroid', 'ycentroid', 'fwhm', 'sharpness', 'roundness',
'pa', 'npix', 'sky', 'peak', 'flux', 'mag')
for column in columns:
table[column] = [getattr(props, column) for props in star_props]
return table
| [
"numpy.log10",
"numpy.sqrt",
"astropy.table.Table",
"numpy.log",
"math.sqrt",
"numpy.count_nonzero",
"numpy.array",
"numpy.argsort",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"numpy.isscalar",
"numpy.where",
"numpy.max",
"numpy.exp",
"warnings.warn",
"numpy.meshgrid",
"numpy.ab... | [((21867, 21911), 'numpy.transpose', 'np.transpose', (["[tbl['y_peak'], tbl['x_peak']]"], {}), "([tbl['y_peak'], tbl['x_peak']])\n", (21879, 21911), True, 'import numpy as np\n'), ((3089, 3111), 'numpy.deg2rad', 'np.deg2rad', (['self.theta'], {}), '(self.theta)\n', (3099, 3111), True, 'import numpy as np\n'), ((3127, 3148), 'numpy.cos', 'np.cos', (['theta_radians'], {}), '(theta_radians)\n', (3133, 3148), True, 'import numpy as np\n'), ((3164, 3185), 'numpy.sin', 'np.sin', (['theta_radians'], {}), '(theta_radians)\n', (3170, 3185), True, 'import numpy as np\n'), ((4156, 4206), 'numpy.sqrt', 'np.sqrt', (['((xx - self.xc) ** 2 + (yy - self.yc) ** 2)'], {}), '((xx - self.xc) ** 2 + (yy - self.yc) ** 2)\n', (4163, 4206), True, 'import numpy as np\n'), ((4744, 4775), 'numpy.exp', 'np.exp', (['(-self.elliptical_radius)'], {}), '(-self.elliptical_radius)\n', (4750, 4775), True, 'import numpy as np\n'), ((11243, 11260), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (11254, 11260), True, 'import numpy as np\n'), ((11867, 11877), 'numpy.sum', 'np.sum', (['wt'], {}), '(wt)\n', (11873, 11877), True, 'import numpy as np\n'), ((11972, 12033), 'numpy.sum', 'np.sum', (['(self.kernel.gaussian_kernel_unmasked * wts)'], {'axis': 'axis'}), '(self.kernel.gaussian_kernel_unmasked * wts, axis=axis)\n', (11978, 12033), True, 'import numpy as np\n'), ((12082, 12106), 'numpy.sum', 'np.sum', (['(kern_sum_1d * wt)'], {}), '(kern_sum_1d * wt)\n', (12088, 12106), True, 'import numpy as np\n'), ((12127, 12156), 'numpy.sum', 'np.sum', (['(kern_sum_1d ** 2 * wt)'], {}), '(kern_sum_1d ** 2 * wt)\n', (12133, 12156), True, 'import numpy as np\n'), ((12215, 12236), 'numpy.sum', 'np.sum', (['(dkern_dx * wt)'], {}), '(dkern_dx * wt)\n', (12221, 12236), True, 'import numpy as np\n'), ((12261, 12287), 'numpy.sum', 'np.sum', (['(dkern_dx ** 2 * wt)'], {}), '(dkern_dx ** 2 * wt)\n', (12267, 12287), True, 'import numpy as np\n'), ((12314, 12349), 'numpy.sum', 'np.sum', (['(kern_sum_1d * dkern_dx * wt)'], {}), '(kern_sum_1d * dkern_dx * wt)\n', (12320, 12349), True, 'import numpy as np\n'), ((12373, 12407), 'numpy.sum', 'np.sum', (['(self.data * wts)'], {'axis': 'axis'}), '(self.data * wts, axis=axis)\n', (12379, 12407), True, 'import numpy as np\n'), ((12427, 12451), 'numpy.sum', 'np.sum', (['(data_sum_1d * wt)'], {}), '(data_sum_1d * wt)\n', (12433, 12451), True, 'import numpy as np\n'), ((12476, 12514), 'numpy.sum', 'np.sum', (['(data_sum_1d * kern_sum_1d * wt)'], {}), '(data_sum_1d * kern_sum_1d * wt)\n', (12482, 12514), True, 'import numpy as np\n'), ((12543, 12578), 'numpy.sum', 'np.sum', (['(data_sum_1d * dkern_dx * wt)'], {}), '(data_sum_1d * dkern_dx * wt)\n', (12549, 12578), True, 'import numpy as np\n'), ((12601, 12631), 'numpy.sum', 'np.sum', (['(data_sum_1d * dxx * wt)'], {}), '(data_sum_1d * dxx * wt)\n', (12607, 12631), True, 'import numpy as np\n'), ((16950, 17008), 'numpy.array', 'np.array', (['((self.cutout.data - self.sky) * self.cutout.mask)'], {}), '((self.cutout.data - self.sky) * self.cutout.mask)\n', (16958, 17008), True, 'import numpy as np\n'), ((17075, 17106), 'numpy.where', 'np.where', (['(cutout > 0)', 'cutout', '(0)'], {}), '(cutout > 0, cutout, 0)\n', (17083, 17106), True, 'import numpy as np\n'), ((17688, 17715), 'numpy.count_nonzero', 'np.count_nonzero', (['self.data'], {}), '(self.data)\n', (17704, 17715), True, 'import numpy as np\n'), ((17832, 17849), 'numpy.max', 'np.max', (['self.data'], {}), '(self.data)\n', (17838, 17849), True, 'import numpy as np\n'), ((17904, 17921), 'numpy.sum', 'np.sum', (['self.data'], {}), '(self.data)\n', (17910, 17921), True, 'import numpy as np\n'), ((20660, 20706), 'numpy.arange', 'np.arange', (['(-min_separation)', '(min_separation + 1)'], {}), '(-min_separation, min_separation + 1)\n', (20669, 20706), True, 'import numpy as np\n'), ((20724, 20745), 'numpy.meshgrid', 'np.meshgrid', (['idx', 'idx'], {}), '(idx, idx)\n', (20735, 20745), True, 'import numpy as np\n'), ((20766, 20827), 'numpy.array', 'np.array', (['(xx ** 2 + yy ** 2 <= min_separation ** 2)'], {'dtype': 'int'}), '(xx ** 2 + yy ** 2 <= min_separation ** 2, dtype=int)\n', (20774, 20827), True, 'import numpy as np\n'), ((21218, 21269), 'numpy.pad', 'np.pad', (['data', 'pad'], {'mode': 'mode', 'constant_values': '[0.0]'}), '(data, pad, mode=mode, constant_values=[0.0])\n', (21224, 21269), True, 'import numpy as np\n'), ((21393, 21454), 'numpy.pad', 'np.pad', (['convolved_data', 'pad'], {'mode': 'mode', 'constant_values': '[0.0]'}), '(convolved_data, pad, mode=mode, constant_values=[0.0])\n', (21399, 21454), True, 'import numpy as np\n'), ((21541, 21566), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21564, 21566), False, 'import warnings\n'), ((21635, 21698), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'NoDetectionsWarning'}), "('ignore', category=NoDetectionsWarning)\n", (21658, 21698), False, 'import warnings\n'), ((33870, 33877), 'astropy.table.Table', 'Table', ([], {}), '()\n', (33875, 33877), False, 'from astropy.table import Table\n'), ((42252, 42259), 'astropy.table.Table', 'Table', ([], {}), '()\n', (42257, 42259), False, 'from astropy.table import Table\n'), ((5031, 5045), 'numpy.sqrt', 'np.sqrt', (['denom'], {}), '(denom)\n', (5038, 5045), True, 'import numpy as np\n'), ((11900, 11915), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (11909, 11915), True, 'import numpy as np\n'), ((14759, 14776), 'numpy.isnan', 'np.isnan', (['self.hx'], {}), '(self.hx)\n', (14767, 14776), True, 'import numpy as np\n'), ((14780, 14797), 'numpy.isnan', 'np.isnan', (['self.hy'], {}), '(self.hy)\n', (14788, 14797), True, 'import numpy as np\n'), ((16569, 16594), 'numpy.count_nonzero', 'np.count_nonzero', (['skymask'], {}), '(skymask)\n', (16585, 16594), True, 'import numpy as np\n'), ((17982, 18001), 'numpy.log10', 'np.log10', (['self.flux'], {}), '(self.flux)\n', (17990, 18001), True, 'import numpy as np\n'), ((18665, 18731), 'numpy.sqrt', 'np.sqrt', (['(self.mu_diff ** 2 + 4.0 * self.moments_central[1, 1] ** 2)'], {}), '(self.mu_diff ** 2 + 4.0 * self.moments_central[1, 1] ** 2)\n', (18672, 18731), True, 'import numpy as np\n'), ((21317, 21368), 'numpy.pad', 'np.pad', (['mask', 'pad'], {'mode': 'mode', 'constant_values': '[0.0]'}), '(mask, pad, mode=mode, constant_values=[0.0])\n', (21323, 21368), True, 'import numpy as np\n'), ((29746, 29768), 'numpy.isscalar', 'np.isscalar', (['threshold'], {}), '(threshold)\n', (29757, 29768), True, 'import numpy as np\n'), ((29886, 29903), 'numpy.isscalar', 'np.isscalar', (['fwhm'], {}), '(fwhm)\n', (29897, 29903), True, 'import numpy as np\n'), ((32456, 32516), 'warnings.warn', 'warnings.warn', (['"""No sources were found."""', 'NoDetectionsWarning'], {}), "('No sources were found.', NoDetectionsWarning)\n", (32469, 32516), False, 'import warnings\n'), ((33429, 33547), 'warnings.warn', 'warnings.warn', (['"""Sources were found, but none pass the sharpness and roundness criteria."""', 'NoDetectionsWarning'], {}), "(\n 'Sources were found, but none pass the sharpness and roundness criteria.',\n NoDetectionsWarning)\n", (33442, 33547), False, 'import warnings\n'), ((33900, 33917), 'numpy.arange', 'np.arange', (['nstars'], {}), '(nstars)\n', (33909, 33917), True, 'import numpy as np\n'), ((38291, 38313), 'numpy.isscalar', 'np.isscalar', (['threshold'], {}), '(threshold)\n', (38302, 38313), True, 'import numpy as np\n'), ((38431, 38448), 'numpy.isscalar', 'np.isscalar', (['fwhm'], {}), '(fwhm)\n', (38442, 38448), True, 'import numpy as np\n'), ((40888, 40948), 'warnings.warn', 'warnings.warn', (['"""No sources were found."""', 'NoDetectionsWarning'], {}), "('No sources were found.', NoDetectionsWarning)\n", (40901, 40948), False, 'import warnings\n'), ((41811, 41929), 'warnings.warn', 'warnings.warn', (['"""Sources were found, but none pass the sharpness and roundness criteria."""', 'NoDetectionsWarning'], {}), "(\n 'Sources were found, but none pass the sharpness and roundness criteria.',\n NoDetectionsWarning)\n", (41824, 41929), False, 'import warnings\n'), ((42282, 42299), 'numpy.arange', 'np.arange', (['nstars'], {}), '(nstars)\n', (42291, 42299), True, 'import numpy as np\n'), ((4466, 4552), 'numpy.where', 'np.where', (['((self.elliptical_radius <= self.f) | (self.circular_radius <= 2.0))', '(1)', '(0)'], {}), '((self.elliptical_radius <= self.f) | (self.circular_radius <= 2.0),\n 1, 0)\n', (4474, 4552), True, 'import numpy as np\n'), ((9687, 9706), 'numpy.abs', 'np.abs', (['cutout_conv'], {}), '(cutout_conv)\n', (9693, 9706), True, 'import numpy as np\n'), ((9919, 9943), 'numpy.sum', 'np.sum', (['self.data_masked'], {}), '(self.data_masked)\n', (9925, 9943), True, 'import numpy as np\n'), ((11508, 11523), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (11517, 11523), True, 'import numpy as np\n'), ((15421, 15440), 'numpy.log10', 'np.log10', (['self.flux'], {}), '(self.flux)\n', (15429, 15440), True, 'import numpy as np\n'), ((18832, 18890), 'numpy.arctan2', 'np.arctan2', (['(2.0 * self.moments_central[1, 1])', 'self.mu_diff'], {}), '(2.0 * self.moments_central[1, 1], self.mu_diff)\n', (18842, 18890), True, 'import numpy as np\n'), ((41283, 41311), 'numpy.count_nonzero', 'np.count_nonzero', (['props.data'], {}), '(props.data)\n', (41299, 41311), True, 'import numpy as np\n'), ((11604, 11621), 'numpy.transpose', 'np.transpose', (['ywt'], {}), '(ywt)\n', (11616, 11621), True, 'import numpy as np\n'), ((11777, 11792), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (11786, 11792), True, 'import numpy as np\n'), ((16649, 16673), 'numpy.max', 'np.max', (['self.cutout.data'], {}), '(self.cutout.data)\n', (16655, 16673), True, 'import numpy as np\n'), ((16704, 16732), 'numpy.max', 'np.max', (['self.cutout.convdata'], {}), '(self.cutout.convdata)\n', (16710, 16732), True, 'import numpy as np\n'), ((16779, 16813), 'numpy.sum', 'np.sum', (['(self.cutout.data * skymask)'], {}), '(self.cutout.data * skymask)\n', (16785, 16813), True, 'import numpy as np\n'), ((18491, 18502), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (18497, 18502), True, 'import numpy as np\n'), ((3827, 3861), 'math.sqrt', 'math.sqrt', (['(self.c * self.f / denom)'], {}), '(self.c * self.f / denom)\n', (3836, 3861), False, 'import math\n'), ((3901, 3935), 'math.sqrt', 'math.sqrt', (['(self.a * self.f / denom)'], {}), '(self.a * self.f / denom)\n', (3910, 3935), False, 'import math\n'), ((11112, 11130), 'numpy.arange', 'np.arange', (['self.nx'], {}), '(self.nx)\n', (11121, 11130), True, 'import numpy as np\n'), ((11185, 11203), 'numpy.arange', 'np.arange', (['self.ny'], {}), '(self.ny)\n', (11194, 11203), True, 'import numpy as np\n'), ((32741, 32762), 'numpy.isnan', 'np.isnan', (['props.dx_hx'], {}), '(props.dx_hx)\n', (32749, 32762), True, 'import numpy as np\n'), ((32772, 32793), 'numpy.isnan', 'np.isnan', (['props.dy_hy'], {}), '(props.dy_hy)\n', (32780, 32793), True, 'import numpy as np\n'), ((33715, 33733), 'numpy.argsort', 'np.argsort', (['fluxes'], {}), '(fluxes)\n', (33725, 33733), True, 'import numpy as np\n'), ((42097, 42115), 'numpy.argsort', 'np.argsort', (['fluxes'], {}), '(fluxes)\n', (42107, 42115), True, 'import numpy as np\n')] |
from __future__ import division
'''
***********************************************************
File: softmaxModels.py
Allows for the creation, and use of Softmax functions
Version 1.3.0: Added Discretization function
Version 1.3.1: Added Likelihood weighted Importance sampling
***********************************************************
'''
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.3.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import numpy as np;
import random;
from random import random;
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal as mvn
import warnings
import math
import copy
import time
from numpy.linalg import inv,det,svd,solve
from gaussianMixtures import Gaussian
from gaussianMixtures import GM
from mpl_toolkits.mplot3d import Axes3D
from scipy import compress
import scipy.linalg as linalg
from copy import deepcopy
from scipy import sparse
from sklearn.linear_model import LogisticRegression
class Softmax:
def __init__(self,weights= None,bias = None):
'''
Initialize with either:
1. Nothing, for empty softmax model
2. Vector of weights (n x d) and bias (nx1)
'''
self.weights = weights;
self.bias = bias;
if(self.weights is not None):
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def nullspace(self,A,atol=1e-13,rtol=0):
'''
Finds the nullspace of a matrix
'''
A = np.atleast_2d(A)
u, s, vh = svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns;
def distance(self,x1,y1,x2,y2):
'''
The distance formula for 2d systems
'''
dist = (x1-x2)*(x1-x2) + (y1-y2)*(y1-y2);
dist = math.sqrt(dist);
return dist;
def buildRectangleModel(self,recBounds,steepness = 1):
'''
Builds a softmax model in 2 dimensions with a rectangular interior class
Inputs
recBounds: A 2x2 list, with the coordinates of the lower left and upper right corners of the rectangle
steepness: A scalar determining how steep the bounds between softmax classes are
'''
B = np.matrix([-1,0,recBounds[0][0],1,0,-recBounds[1][0],0,1,-recBounds[1][1],0,-1,recBounds[0][1]]).T;
M = np.zeros(shape=(12,15));
#Boundry: Left|Near
rowSB = 0;
classNum1 = 1;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Right|Near
rowSB = 1;
classNum1 = 2;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Up|Near
rowSB = 2;
classNum1 = 3;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Down|Near
rowSB = 3;
classNum1 = 4;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
A = np.hstack((M,B));
# print(np.linalg.matrix_rank(A))
# print(np.linalg.matrix_rank(M))
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//3):
weight.append([Theta[3*i][0],Theta[3*i+1][0]]);
bias.append(Theta[3*i+2][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildOrientedRecModel(self,centroid,orient,length,width,steepness = 1):
'''
Builds a rectangular model at the specified centroid with the parameters given
'''
theta1 = orient*math.pi/180;
h = math.sqrt((width/2)*(width/2) + (length/2)*(length/2));
theta2 = math.asin((width/2)/h);
s1 = h*math.sin(theta1+theta2);
s2 = h*math.cos(theta1+theta2);
s3 = h*math.sin(theta1-theta2);
s4 = h*math.cos(theta1-theta2);
points = [];
points = [[centroid[0]+s2,centroid[1]+s1],[centroid[0]+s4,centroid[1]+s3],[centroid[0]-s2,centroid[1]-s1],[centroid[0]-s4,centroid[1]-s3]];
self.buildPointsModel(points,steepness=steepness);
def buildGeneralModel(self,dims,numClasses,boundries,B,steepness=1):
'''
Builds a softmax model according to the full specification of boudries and a normal vector
Inputs
dims: the dimensionality of the model
numClasses: the number of classes in the model
boundries: a list of [2x1] lists which spec out the boundries required in the model
B: a list of normals and constants for each boundry
steepness: A scalar determining how steep the bounds between softmax classes are
'''
M = np.zeros(shape=(len(boundries)*(dims+1),numClasses*(dims+1)));
for j in range(0,len(boundries)):
for i in range(0,dims+1):
M[(dims+1)*j+i,(dims+1)*boundries[j][1]+i] = -1;
M[(dims+1)*j+i,(dims+1)*boundries[j][0]+i] = 1;
A = np.hstack((M,B));
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//(dims+1)):
wtmp=[];
for j in range(0,dims):
wtmp.append(Theta[(dims+1)*i+j][0])
weight.append(wtmp);
bias.append(Theta[(dims+1)*i+dims][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildPointsModel(self,points,steepness=1):
'''
Builds a 2D softmax model by constructing an interior class from the given points
Inputs
points: list of 2D points that construct a convex polygon
steepness: A scalar determining how steep the bounds between softmax classes are
'''
dims = 2;
pointsx = [p[0] for p in points];
pointsy = [p[1] for p in points];
centroid = [sum(pointsx)/len(points),sum(pointsy)/len(points)];
#for each point to the next, find the normal between them.
B = [];
for i in range(0,len(points)):
p1 = points[i];
if(i == len(points)-1):
p2 = points[0];
else:
p2 = points[i+1];
mid = [];
for i in range(0,len(p1)):
mid.append((p1[i]+p2[i])/2)
H = np.matrix([[p1[0],p1[1],1],[p2[0],p2[1],1],[mid[0],mid[1],1]]);
Hnull = (self.nullspace(H)).tolist();
distMed1 = self.distance(mid[0]+Hnull[0][0],mid[1]+Hnull[1][0],centroid[0],centroid[1]);
distMed2 = self.distance(mid[0]-Hnull[0][0],mid[1]-Hnull[1][0],centroid[0],centroid[1]);
if(distMed1 < distMed2):
Hnull[0][0] = -Hnull[0][0];
Hnull[1][0] = -Hnull[1][0];
Hnull[2][0] = -Hnull[2][0];
for j in Hnull:
B.append(j[0]);
B = np.matrix(B).T;
numClasses = len(points)+1;
boundries = [];
for i in range(1,numClasses):
boundries.append([i,0]);
M = np.zeros(shape=(len(boundries)*(dims+1),numClasses*(dims+1)));
for j in range(0,len(boundries)):
for i in range(0,dims+1):
M[(dims+1)*j+i,(dims+1)*boundries[j][1]+i] = -1;
M[(dims+1)*j+i,(dims+1)*boundries[j][0]+i] = 1;
A = np.hstack((M,B));
#print(np.linalg.matrix_rank(A))
#print(np.linalg.matrix_rank(M))
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//(dims+1)):
weight.append([Theta[(dims+1)*i][0],Theta[(dims+1)*i+1][0]]);
bias.append(Theta[(dims+1)*i+dims][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildTriView(self,pose,length = 3,steepness = 2):
l = length;
#Without Cutting
triPoints = [[pose[0],pose[1]],[pose[0]+l*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*0.261799+math.radians(pose[2]))]];
#With Cutting
lshort = 0.5
triPoints = [[pose[0]+lshort*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+lshort*math.sin(2*0.261799+math.radians(pose[2]))],[pose[0]+lshort*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+lshort*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*0.261799+math.radians(pose[2]))]];
self.buildPointsModel(triPoints,steepness=steepness);
def Estep(self,weight,bias,prior_mean,prior_var,alpha = 0.5,zeta_c = 1,softClassNum=0):
'''
Runs the Expectation step of the Variational Bayes algorithm
'''
#start the VB EM step
lamb = [0]*len(weight);
for i in range(0,len(weight)):
lamb[i] = self._lambda(zeta_c[i]);
hj = 0;
suma = 0;
for c in range(0,len(weight)):
if(softClassNum != c):
suma += weight[c];
tmp2 = 0;
for c in range(0,len(weight)):
tmp2+=lamb[c]*(alpha-bias[c])*weight[c];
hj = 0.5*(weight[softClassNum]-suma)+2*tmp2;
Kj = 0;
for c in range(0,len(weight)):
Kj += lamb[c]*weight[c]*weight[c];
Kj = Kj*2;
Kp = prior_var**-1;
hp = Kp*prior_mean;
Kl = Kp+Kj;
hl = hp+hj;
mean = (Kl**-1)*hl;
var = Kl**-1;
yc = [0]*len(weight);
yc2= [0]*len(weight);
for c in range(0,len(weight)):
yc[c] = weight[c]*mean + bias[c];
yc2[c] = weight[c]*(var + mean*mean)*weight[c] + 2*weight[c]*mean*bias[c] + bias[c]**2;
return [mean,var,yc,yc2];
def Mstep(self,m,yc,yc2,zeta_c,alpha,steps):
'''
Runs the Maximization Step of the Variational Bayes algorithm
'''
z = zeta_c;
a = alpha;
for i in range(0,steps):
for c in range(0,len(yc)):
z[c] = math.sqrt(yc2[c] + a**2 - 2*a*yc[c]);
num_sum = 0;
den_sum = 0;
for c in range(0,len(yc)):
num_sum += self._lambda(z[c])*yc[c];
den_sum += self._lambda(z[c]);
a = ((m-2)/4 + num_sum)/den_sum;
return [z,a]
def _lambda(self, zeta_c):
return 1 / (2 * zeta_c) * ( (1 / (1 + np.exp(-zeta_c))) - 0.5)
def calcCHat(self,prior_mean,prior_var,mean,var,alpha,zeta_c,yc,yc2,mod):
prior_var = np.matrix(prior_var);
prior_mean = np.matrix(prior_mean);
var_hat = np.matrix(var);
mu_hat = np.matrix(mean);
#KLD = 0.5*(np.log(prior_var/var) + prior_var**-1*var + (prior_mean-mean)*(prior_var**-1)*(prior_mean-mean));
KLD = 0.5 * (np.log(det(prior_var) / det(var_hat)) +
np.trace(inv(prior_var) .dot (var_hat)) +
(prior_mean - mu_hat).T .dot (inv(prior_var)) .dot
(prior_mean - mu_hat));
suma = 0;
for c in range(0,len(zeta_c)):
suma += 0.5 * (alpha + zeta_c[c] - yc[c]) \
- self._lambda(zeta_c[c]) * (yc2[c] - 2 * alpha
* yc[c] + alpha ** 2 - zeta_c[c] ** 2) \
- np.log(1 + np.exp(zeta_c[c]))
return yc[mod] - alpha + suma - KLD + 1;
def numericalProduct(self,prior,meas,low=0,high=5,res =100,vis = True):
'''
Multiplies a 1D softmax model by a 1D gaussian mixture over a range
For comparison to VB
'''
[x,softmax] = self.plot1D(low,high,res,vis = False);
prod = [0 for i in range(0,len(x))];
for i in range(0,len(x)):
prod[i] = prior.pointEval(x[i])*softmax[meas][i];
if(vis == False):
return [x,prod];
else:
plt.plot(x,prod);
plt.show();
def vb_update(self, measurement, prior_mean,prior_var):
'''
Runs the variational Bayes update
'''
w = np.array(self.weights)
b = np.array(self.bias)
m = len(w);
j = measurement;
xis = self.zeta_c;
alpha = self.alpha;
prior_var = np.array(prior_var);
prior_mean = np.array(prior_mean);
converged = False
EM_step = 0
while not converged and EM_step < 10000:
################################################################
# STEP 1 - EXPECTATION
################################################################
# PART A #######################################################
# find g_j
sum1 = 0
for c in range(m):
if c != j:
sum1 += b[c]
sum2 = 0
for c in range(m):
sum2 = xis[c] / 2 \
+ self._lambda(xis[c]) * (xis[c] ** 2 - (b[c] - alpha) ** 2) \
- np.log(1 + np.exp(xis[c]))
g_j = 0.5 * (b[j] - sum1) + alpha * (m / 2 - 1) + sum2
# find h_j
sum1 = 0
for c in range(m):
if c != j:
sum1 += w[c]
sum2 = 0
for c in range(m):
sum2 += self._lambda(xis[c]) * (alpha - b[c]) * w[c]
h_j = 0.5 * (w[j] - sum1) + 2 * sum2
# find K_j
sum1 = 0
for c in range(m):
sum1 += self._lambda(xis[c]) * np.outer(w[c], (w[c]))
K_j = 2 * sum1
K_p = inv(prior_var)
g_p = -0.5 * (np.log(np.linalg.det(2 * np.pi * prior_var))) \
+ prior_mean.T .dot (K_p) .dot (prior_var)
h_p = K_p .dot (prior_mean)
g_l = g_p + g_j
h_l = h_p + h_j
K_l = K_p + K_j
mu_hat = inv(K_l) .dot (h_l)
var_hat = inv(K_l)
# PART B #######################################################
y_cs = np.zeros(m)
y_cs_squared = np.zeros(m)
for c in range(m):
y_cs[c] = w[c].T .dot (mu_hat) + b[c]
y_cs_squared[c] = w[c].T .dot \
(var_hat + np.outer(mu_hat, mu_hat.T)) .dot (w[c]) \
+ 2 * w[c].T .dot (mu_hat) * b[c] + b[c] ** 2
################################################################
# STEP 2 - MAXIMIZATION
################################################################
for i in range(100): # n_{lc}
# PART A ######################################################
# Find xis
for c in range(m):
xis[c] = np.sqrt(y_cs_squared[c] + alpha ** 2 - 2 * alpha
* y_cs[c])
# PART B ######################################################
# Find alpha
num_sum = 0
den_sum = 0
for c in range(m):
num_sum += self._lambda(xis[c]) * y_cs[c]
den_sum += self._lambda(xis[c])
alpha = ((m - 2) / 4 + num_sum) / den_sum
################################################################
# STEP 3 - CONVERGENCE CHECK
################################################################
if EM_step == 0:
prev_log_c_hat = -1000 # Arbitrary value
KLD = 0.5 * (np.log(det(prior_var) / det(var_hat)) +
np.trace(inv(prior_var) .dot (var_hat)) +
(prior_mean - mu_hat).T .dot (inv(prior_var)) .dot
(prior_mean - mu_hat))
sum1 = 0
for c in range(m):
sum1 += 0.5 * (alpha + xis[c] - y_cs[c]) \
- self._lambda(xis[c]) * (y_cs_squared[c] - 2 * alpha
* y_cs[c] + alpha ** 2 - xis[c] ** 2) \
- np.log(1 + np.exp(xis[c]))
# <>TODO: don't forget Mun - unobserved parents!
# <>CHECK - WHY DO WE ADD +1 HERE??
log_c_hat = y_cs[j] - alpha + sum1 - KLD + 1
if np.abs(log_c_hat - prev_log_c_hat) < 0.001:
break
prev_log_c_hat = log_c_hat
EM_step += 1
# Resize parameters
if mu_hat.size == 1:
mu_post = mu_hat[0]
else:
mu_post = mu_hat
if var_hat.size == 1:
var_post = var_hat[0][0]
else:
var_post = var_hat
return mu_post, var_post, log_c_hat
def runVB(self,prior,softClassNum):
#For the one dimensional case only
post = GM();
weight = self.weights;
bias = self.bias;
alpha = self.alpha;
zeta_c = self.zeta_c;
for g in prior.Gs:
prevLogCHat = -1000;
count = 0;
while(count < 100000):
count = count+1;
[mean,var,yc,yc2] = self.Estep(weight,bias,g.mean,g.var,alpha,zeta_c,softClassNum = softClassNum);
[zeta_c,alpha] = self.Mstep(len(weight),yc,yc2,zeta_c,alpha,steps = 100);
logCHat = self.calcCHat(g.mean,g.var,mean,var,alpha,zeta_c,yc,yc2,mod=softClassNum);
if(abs(prevLogCHat - logCHat) < 0.00001):
break;
else:
prevLogCHat = logCHat;
post.addG(Gaussian(mean,var,g.weight*np.exp(logCHat).tolist()[0][0]))
return post;
def runVBND(self,prior,softClassNum):
#For the N dimensional Case
#Note: Cannot run 1D
post = GM();
for g in prior.Gs:
[mu,var,logCHat] = self.vb_update(softClassNum,g.mean,g.var);
mu = mu.tolist();
var = var.tolist();
post.addG(Gaussian(mu,var,g.weight*np.exp(logCHat)));
return post;
def pointEvalND(self,softClass,point):
#Evaluates the function at a point in any dimensionality.
topIn = 0;
for i in range(0,len(self.weights[0])):
topIn+=self.weights[softClass][i]*point[i];
top = np.exp(topIn+self.bias[softClass]);
bottom = 0;
for i in range(0,self.size):
bottomIn = 0;
for j in range(0,len(self.weights[0])):
bottomIn += self.weights[i][j]*point[j];
bottom+=np.exp(bottomIn + self.bias[i]);
return top/bottom;
def plot1D(self,low=0,high = 5,res = 100,labels = None,vis = True):
x = [(i*(high-low)/res + low) for i in range(0,res)];
suma = [0]*len(x);
softmax = [[0 for i in range(0,len(x))] for j in range(0,len(self.weights))];
for i in range(0,len(x)):
tmp = 0;
for j in range(0,len(self.weights)):
tmp += math.exp(self.weights[j]*x[i] + self.bias[j]);
for j in range(0,len(self.weights)):
softmax[j][i] = math.exp(self.weights[j]*x[i] + self.bias[j]) /tmp;
if(vis ==True):
for i in range(0,len(self.weights)):
plt.plot(x,softmax[i]);
plt.ylim([0,1.1])
plt.xlim([low,high]);
if(labels is not None):
plt.legend(labels);
plt.show();
else:
return [x,softmax];
def plot2D(self,low = [0,0],high = [5,5],labels = None,vis = True,delta=0.1):
x, y = np.mgrid[low[0]:high[0]:delta, low[1]:high[1]:delta]
pos = np.dstack((x, y))
resx = int((high[0]-low[0])//delta)+1;
resy = int((high[1]-low[1])//delta)+1;
model = [[[0 for i in range(0,resy)] for j in range(0,resx)] for k in range(0,len(self.weights))];
for m in range(0,len(self.weights)):
for i in range(0,resx):
xx = (i*(high[0]-low[0])/resx + low[0]);
for j in range(0,resy):
yy = (j*(high[1]-low[1])/resy + low[1])
dem = 0;
for k in range(0,len(self.weights)):
dem+=np.exp(self.weights[k][0]*xx + self.weights[k][1]*yy + self.bias[k]);
model[m][i][j] = np.exp(self.weights[m][0]*xx + self.weights[m][1]*yy + self.bias[m])/dem;
dom = [[0 for i in range(0,resy)] for j in range(0,resx)];
for m in range(0,len(self.weights)):
for i in range(0,resx):
for j in range(0,resy):
dom[i][j] = np.argmax([model[h][i][j] for h in range(0,len(self.weights))]);
if(vis):
plt.contourf(x,y,dom,cmap = 'inferno');
fig = plt.figure()
ax = fig.gca(projection='3d');
colors = ['b','g','r','c','m','y','k','w','b','g'];
for i in range(0,len(model)):
ax.plot_surface(x,y,np.array(model[i]),color = colors[i]);
ax.set_xlabel('X/East Location (m)');
ax.set_ylabel('Y/West Location (m)');
ax.set_zlabel('Likelihood');
plt.show();
else:
return x,y,dom;
def plot3D(self,low=[-5,-5,-5],high=[5,5,5]):
fig = plt.figure();
ax = fig.add_subplot(111,projection='3d');
ax.set_xlabel('X Axis');
ax.set_ylabel('Y Axis');
ax.set_zlabel('Z Axis');
ax.set_xlim([low[0],high[0]]);
ax.set_ylim([low[1],high[1]]);
ax.set_zlim([low[2],high[2]]);
ax.set_title("3D Scatter of Dominant Softmax Classes")
for clas in range(1,self.size):
shapeEdgesX = [];
shapeEdgesY = [];
shapeEdgesZ = [];
#-5 to 5 on all dims
data = np.zeros(shape=(21,21,21));
for i in range(0,21):
for j in range(0,21):
for k in range(0,21):
data[i][j][k] = self.pointEvalND(clas,[(i-10)/2,(j-10)/2,(k-10)/2]);
if(data[i][j][k] > 0.1):
shapeEdgesX.append((i-10)/2);
shapeEdgesY.append((j-10)/2);
shapeEdgesZ.append((k-10)/2);
ax.scatter(shapeEdgesX,shapeEdgesY,shapeEdgesZ);
plt.show();
def logRegress(self,X,t,steepness = 1):
dim = len(X[0]);
fitter = LogisticRegression(solver = 'newton-cg',multi_class = 'multinomial');
fitter.fit(X,t);
newCoef = fitter.coef_.tolist();
weights = [];
for i in range(0,len(newCoef)):
weights.append(newCoef[i]);
bias = [];
newBias = fitter.intercept_.tolist();
for i in range(0,len(newBias)):
bias.append(newBias[i]);
ze = [0]*dim;
weights.append(ze);
bias.append(0);
self.weights = (np.array(weights)*steepness).tolist();
self.bias = (np.array(bias)*steepness).tolist();
if(self.weights is not None):
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def discretize2D(self,softClass,low = [0,0],high = [5,5],delta=0.1):
x, y = np.mgrid[low[0]:high[0]:delta, low[1]:high[1]:delta]
pos = np.dstack((x, y))
resx = int((high[0]-low[0])//delta)+1;
resy = int((high[1]-low[1])//delta)+1;
likelihood = [[0 for i in range(0,resy)] for j in range(0,resx)];
for m in softClass:
for i in range(0,resx):
xx = (i*(high[0]-low[0])/resx + low[0]);
for j in range(0,resy):
yy = (j*(high[1]-low[1])/resy + low[1])
dem = 0;
for k in range(0,len(self.weights)):
dem+=np.exp(self.weights[k][0]*xx + self.weights[k][1]*yy + self.bias[k]);
likelihood[i][j] += np.exp(self.weights[m][0]*xx + self.weights[m][1]*yy + self.bias[m])/dem;
return likelihood;
def lwisUpdate(self,prior,softClass,numSamples,inverse = False):
#Runs a likelihood weighted importance sampling update on a given gaussian
q = GM();
q.addG(Gaussian(prior.mean,prior.var,1));
p = GM();
p.addG(prior);
x = q.sample(numSamples);
w = np.zeros(numSamples);
for i in range(0,numSamples):
if(not inverse):
w[i] = p.pointEval(x[i])*self.pointEvalND(softClass,x[i])/q.pointEval(x[i]);
else:
w[i] = p.pointEval(x[i])*(1-self.pointEvalND(softClass,x[i]))/q.pointEval(x[i]);
suma = sum(w);
for i in range(0,len(w)):
w[i] = w[i]/suma;
muHat = np.zeros(len(prior.mean));
for i in range(0,numSamples):
muHat = muHat + np.dot(x[i],w[i]);
varHat = np.zeros(shape = (len(prior.mean),len(prior.mean)));
for i in range(0,numSamples):
xi = np.asarray(x[i]);
varHat = varHat + w[i]*np.outer(xi,xi);
varHat = varHat - np.outer(muHat,muHat);
muHat = muHat.tolist();
varHat = varHat.tolist();
if(len(prior.mean) == 1):
muHat = muHat[0];
if(len(prior.var)==1):
varHat = varHat[0][0];
#Calculate Weights
#sample a bunch from the prior
tmp = GM();
tmp.addG(Gaussian(prior.mean,prior.var,1));
tmpSamps = tmp.sample(500);
#Find the likelihood at each sampled point
probs = np.zeros(500).tolist()
for i in range(0,500):
if(not inverse):
probs[i] = self.pointEvalND(softClass,tmpSamps[i]);
else:
probs[i] = 1-self.pointEvalND(softClass,tmpSamps[i]);
#Find the average likelihood, which is the weight factor
sumSamp = sum(probs)/500;
#Multiply the sampled weight factor by the previous weight
#or add in log space
logSamps = np.log(sumSamp);
logWeight = np.log(prior.weight)+logSamps;
#Extract final weight
weight = np.exp(logWeight);
post = Gaussian(muHat,varHat,weight);
return post;
def test1DSoftmax():
# weight = [-30,-20,-10,0];
# bias = [60,50,30,0];
weight = [-5,0];
bias = [5,0];
softClass = 0;
low = 0;
high = 5;
res = 100;
#Define Likelihood Model
a = Softmax(weight,bias);
#build a prior gaussian
prior = GM([2,4],[1,0.5],[1,0.5]);
#Get the posterior
post = a.runVB(prior,softClassNum = softClass);
a.plot1D(res=res,low = 0, high = 5);
#Plot Everything
[x0,classes] = a.plot1D(res = res,vis = False);
[x1,numApprox] = a.numericalProduct(prior,softClass,low=low,high=high,res = res,vis= False);
softClassLabels = ['Far left','Left','Far Right','Right'];
labels = ['likelihood','prior','VB Posterior','Numerical Posterior'];
[x2,pri] = prior.plot(low = low, high = high,num = res,vis = False);
[x3,pos] = post.plot(low = low, high = high,num = res,vis = False);
plt.plot(x0,classes[softClass]);
plt.plot(x2,pri);
plt.plot(x3,pos);
plt.plot(x1,numApprox);
plt.ylim([0,1.1])
plt.xlim([low,high])
plt.title("Fusion of prior with: " + softClassLabels[softClass]);
plt.legend(labels);
plt.show();
def test2DSoftmax():
#Specify Parameters
#2 1D robots obs model
#weight = [[0.6963,-0.6963],[-0.6963,0.6963],[0,0]];
#bias = [-0.3541,-0.3541,0];
#Colinear Problem
weight = [[-1.3926,1.3926],[-0.6963,0.6963],[0,0]];
bias = [0,.1741,0];
low = [0,0];
high = [5,5];
#Differencing Problem
#weight = [[0,1],[-1,1],[1,1],[0,2],[0,0]]
#bias = [1,0,0,0,0];
# low = [-5,-5];
# high = [5,5];
MMS = True;
softClass = 2;
detect = 0;
res = 100;
steep = 2;
for i in range(0,len(weight)):
for j in range(0,len(weight[i])):
weight[i][j] = weight[i][j]*steep;
bias[i] = bias[i]*steep;
#Define Likelihood Model
a = Softmax(weight,bias);
[x1,y1,dom] = a.plot2D(low=low,high=high,res=res,vis=False);
a.plot2D(low=low,high=high,res=res,vis=True);
#Define a prior
prior = GM();
prior.addG(Gaussian([2,4],[[1,0],[0,1]],1));
prior.addG(Gaussian([4,2],[[1,0],[0,1]],1));
prior.addG(Gaussian([1,3],[[1,0],[0,1]],1));
[x2,y2,c2] = prior.plot2D(low = low,high = high,res = res, vis = False);
if(MMS):
#run Variational Bayes
if(detect == 0):
post1 = a.runVBND(prior,0);
post2 = a.runVBND(prior,2);
post1.addGM(post2);
else:
post1 = a.runVBND(prior,1);
else:
post1 = a.runVBND(prior,softClass)
post1.normalizeWeights();
[x3,y3,c3] = post1.plot2D(low = low,high = high,res = res, vis = False);
post1.display();
softClassLabels = ['Near','Left','Right','Up','Down'];
detectLabels = ['No Detection','Detection']
#plot everything together
fig,axarr = plt.subplots(3,sharex= True,sharey = True);
axarr[0].contourf(x2,y2,c2,cmap = 'viridis');
axarr[0].set_title('Prior GM');
axarr[1].contourf(x1,y1,dom,cmap = 'viridis');
axarr[1].set_title('Likelihood Softmax');
axarr[2].contourf(x3,y3,c3,cmap = 'viridis');
if(MMS):
axarr[2].set_title('Posterior GM with observation:' + detectLabels[detect]);
else:
axarr[2].set_title('Posterior GM with observation:' + softClassLabels[softClass]);
fig.suptitle('2D Fusion of a Gaussian Prior with a Softmax Likelihood')
plt.show();
def testRectangleModel():
pz = Softmax();
pz.buildRectangleModel([[2,2],[3,4]],1);
#print('Plotting Observation Model');
#pz.plot2D(low=[0,0],high=[10,5],vis=True);
prior = GM();
for i in range(0,10):
for j in range(0,5):
prior.addG(Gaussian([i,j],[[1,0],[0,1]],1));
# prior.addG(Gaussian([4,3],[[1,0],[0,1]],1));
# prior.addG(Gaussian([7,2],[[4,1],[1,4]],3))
prior.normalizeWeights();
dela = 0.1;
x, y = np.mgrid[0:10:dela, 0:5:dela]
fig,axarr = plt.subplots(6);
axarr[0].contourf(x,y,prior.discretize2D(low=[0,0],high=[10,5],delta=dela));
axarr[0].set_title('Prior');
titles = ['Inside','Left','Right','Up','Down'];
for i in range(0,5):
post = pz.runVBND(prior,i);
c = post.discretize2D(low=[0,0],high=[10,5],delta=dela);
axarr[i+1].contourf(x,y,c,cmap='viridis');
axarr[i+1].set_title('Post: ' + titles[i]);
plt.show();
def testGeneralModel():
pz = Softmax();
pz.buildGeneralModel(2,4,[[1,0],[2,0],[3,0]],np.matrix([-1,1,-1,1,1,-1,0,-1,-1]).T);
#print('Plotting Observation Model');
#pz.plot2D(low=[0,0],high=[10,5],vis=True);
prior = GM();
for i in range(0,10):
for j in range(0,5):
prior.addG(Gaussian([i,j],[[1,0],[0,1]],1));
# prior.addG(Gaussian([4,3],[[1,0],[0,1]],1));
# prior.addG(Gaussian([7,2],[[4,1],[1,4]],3))
prior.normalizeWeights();
dela = 0.1;
x, y = np.mgrid[0:10:dela, 0:5:dela]
fig,axarr = plt.subplots(5);
axarr[0].contourf(x,y,prior.discretize2D(low=[0,0],high=[10,5],delta=dela));
axarr[0].set_title('Prior');
titles = ['Inside','Left','Right','Down'];
for i in range(0,4):
post = pz.runVBND(prior,i);
c = post.discretize2D(low=[0,0],high=[10,5],delta=dela);
axarr[i+1].contourf(x,y,c,cmap='viridis');
axarr[i+1].set_title('Post: ' + titles[i]);
plt.show();
def testPointsModel():
dims = 2;
#points = [[2,2],[2,4],[3,4],[3,2]];
#points = [[-2,-2],[-2,-1],[0,1],[2,-1],[2,-2]];
points = [[1,1],[1,2],[3,2],[6,1],[4,-1]];
#points = [[1,1],[3,5],[4,1],[3,0],[4,-2]];
pz = Softmax();
pz.buildPointsModel(points,steepness=5);
pz.plot2D(low=[-10,-10],high=[10,10],delta = 0.1,vis=True);
def testPlot3D():
dims = 3;
steep = 10;
'''
#Trapezoidal Pyramid Specs
numClasses = 7;
boundries = [[1,0],[2,0],[3,0],[4,0],[5,0],[6,0]];
B = np.matrix([0,0,-1,-1,-1,0,.5,-1,0,1,.5,-1,1,0,.5,-1,0,-1,.5,-1,0,0,1,-1]).T;
'''
#Octohedron Specs
numClasses = 9;
boundries = [];
for i in range(1,numClasses):
boundries.append([i,0]);
B = np.matrix([-1,-1,0.5,-1,-1,1,0.5,-1,1,1,0.5,-1,1,-1,0.5,-1,-1,-1,-0.5,-1,-1,1,-0.5,-1,1,1,-0.5,-1,1,-1,-0.5,-1]).T;
pz = Softmax();
pz.buildGeneralModel(dims=dims,numClasses=numClasses,boundries=boundries,B=B,steepness=steep);
pz2 = Softmax(deepcopy(pz.weights),deepcopy(pz.bias));
pz3 = Softmax(deepcopy(pz.weights),deepcopy(pz.bias));
pz4 = Softmax(deepcopy(pz.weights),deepcopy(pz.bias));
for i in range(0,len(pz2.weights)):
pz2.weights[i] = [pz2.weights[i][0],pz2.weights[i][2]]
for i in range(0,len(pz3.weights)):
pz3.weights[i] = [pz3.weights[i][1],pz3.weights[i][2]]
for i in range(0,len(pz4.weights)):
pz4.weights[i] = [pz4.weights[i][0],pz4.weights[i][1]]
fig = plt.figure();
[x,y,c] = pz2.plot2D(low=[-5,-5],high=[5,5],vis = False);
plt.contourf(x,y,c);
plt.xlabel('X Axis');
plt.ylabel('Z Axis');
plt.title('Slice Across Y Axis')
fig = plt.figure();
[x,y,c] = pz3.plot2D(low=[-5,-5],high=[5,5],vis = False);
plt.contourf(x,y,c);
plt.xlabel('Y Axis');
plt.ylabel('Z Axis');
plt.title('Slice Across X axis')
fig = plt.figure();
[x,y,c] = pz4.plot2D(low=[-5,-5],high=[5,5],vis = False);
plt.contourf(x,y,c);
plt.xlabel('X Axis');
plt.ylabel('Y Axis');
plt.title('Slice Across Z Axis');
pz.plot3D();
def testOrientRecModel():
cent = [4,4];
length = 3;
width = 2;
orient = 0;
pz = Softmax();
pz.buildOrientedRecModel(cent,orient,length,width);
pz.plot2D(low=[0,0],high=[10,10]);
def testTriView():
pz = Softmax();
pose = [2,1.4,15.3];
pz.buildTriView(pose,length=2,steepness=5);
pz.plot2D(low=[0,0],high=[10,10]);
def testMakeNear():
pzIn = Softmax();
pzOut = Softmax();
cent = [4,4];
orient = 0;
nearness = 2;
lengthIn = 3;
lengthOut = lengthIn+nearness;
widthIn = 2;
widthOut = widthIn+nearness;
pzIn.buildOrientedRecModel(cent,orient,lengthIn,widthIn,steepness=10);
pzOut.buildOrientedRecModel(cent,orient,lengthOut,widthOut,steepness=10);
#pzIn.plot2D(low=[0,0],high=[10,10]);
#pzOut.plot2D(low=[0,0],high=[10,10]);
b = GM();
for i in range(0,10):
for j in range(0,10):
b.addG(Gaussian([i,j],[[1,0],[0,1]],1));
b.normalizeWeights();
b1 = GM();
for i in range(1,5):
b1.addGM(pzIn.runVBND(b,i));
b1.normalizeWeights();
b2 = GM();
b2.addGM(pzOut.runVBND(b1,0));
b2.normalizeWeights();
fig,axarr = plt.subplots(3);
[x,y,c] = b.plot2D(low=[0,0],high=[10,10],vis=False);
axarr[0].contourf(x,y,c);
[x,y,c] = b1.plot2D(low=[0,0],high=[10,10],vis=False);
axarr[1].contourf(x,y,c);
[x,y,c] = b2.plot2D(low=[0,0],high=[10,10],vis=False);
axarr[2].contourf(x,y,c);
plt.show();
def testLogisticRegression():
X = [[1,3],[2,4],[2,2],[4,3]];
t = [0,0,1,1];
cols = ['r','b','g','y','w','k','m'];
a = Softmax();
a.logRegress(X,t,1);
#a.plot2D(vis = True);
[x,y,c] = a.plot2D(vis = False);
plt.contourf(x,y,c);
for i in range(0,len(X)):
plt.scatter(X[i][0],X[i][1],c=cols[t[i]]);
testPoint = [1,2];
winPercent = a.pointEvalND(1,testPoint);
lossPercent = a.pointEvalND(0,testPoint);
print('Win:' + str(winPercent),'Loss:' + str(lossPercent));
plt.show();
def testDiscritization():
centroid = [0,0];
orientation = 35;
steep = 10;
length = 3;
width = 2;
softClass = [1];
pz = Softmax();
pz.buildOrientedRecModel(centroid,orientation,length,width,steepness=steep);
[x,y,c] = pz.plot2D(low=[-5,-5],high=[5,5],vis=False);
fig,axarr = plt.subplots(2);
axarr[0].contourf(x,y,c);
c=pz.discretize2D(softClass,low=[-5,-5],high=[5,5]);
axarr[1].contourf(x,y,c);
plt.show();
def testLWIS():
pz = Softmax();
pose = [0,0,0];
pz.buildTriView(pose,length=2,steepness=10);
prior = GM();
#prior.addG(Gaussian([1,0],[[1,0],[0,1]],1));
for i in range(0,100):
prior.addG(Gaussian([np.random.random()*4-2,np.random.random()*4-2],[[0.1,0],[0,0.1]],1))
prior.normalizeWeights();
post = GM();
for g in prior:
post.addG(pz.lwisUpdate(g,0,500,inverse=True));
#post.display();
[x1,y1,c1] = prior.plot2D(low=[-5,-5],high=[5,5],vis=False);
[x3,y3,c3] = pz.plot2D(low=[-5,-5],high=[5,5],vis=False);
[x2,y2,c2] = post.plot2D(low=[-5,-5],high=[5,5],vis=False);
diffs = c2-c1;
print(np.amax(c2));
print(np.amax(diffs));
print(np.amin(diffs));
fig,axarr = plt.subplots(4);
axarr[0].contourf(x1,y1,c1);
axarr[0].set_title('Prior');
axarr[1].contourf(x3,y3,c3);
axarr[1].set_title('Likelihood');
axarr[2].contourf(x2,y2,c2);
axarr[2].set_title('Posterior');
axarr[3].contourf(x2,y2,diffs);
axarr[3].set_title('Diffs');
plt.show();
if __name__ == "__main__":
test1DSoftmax();
#test2DSoftmax();
#test4DSoftmax();
#testRectangleModel();
#testGeneralModel();
#testPointsModel();
#testPlot3D();
#testOrientRecModel();
#testTriView();
#testMakeNear();
#testLogisticRegression();
#testDiscritization();
#testLWIS();
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"numpy.log",
"math.sqrt",
"math.cos",
"numpy.array",
"copy.deepcopy",
"math.exp",
"matplotlib.pyplot.contourf",
"numpy.atleast_2d",
"scipy.linalg.lstsq",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",... | [((23704, 23734), 'gaussianMixtures.GM', 'GM', (['[2, 4]', '[1, 0.5]', '[1, 0.5]'], {}), '([2, 4], [1, 0.5], [1, 0.5])\n', (23706, 23734), False, 'from gaussianMixtures import GM\n'), ((24283, 24315), 'matplotlib.pyplot.plot', 'plt.plot', (['x0', 'classes[softClass]'], {}), '(x0, classes[softClass])\n', (24291, 24315), True, 'import matplotlib.pyplot as plt\n'), ((24318, 24335), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'pri'], {}), '(x2, pri)\n', (24326, 24335), True, 'import matplotlib.pyplot as plt\n'), ((24337, 24354), 'matplotlib.pyplot.plot', 'plt.plot', (['x3', 'pos'], {}), '(x3, pos)\n', (24345, 24354), True, 'import matplotlib.pyplot as plt\n'), ((24357, 24380), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'numApprox'], {}), '(x1, numApprox)\n', (24365, 24380), True, 'import matplotlib.pyplot as plt\n'), ((24383, 24401), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (24391, 24401), True, 'import matplotlib.pyplot as plt\n'), ((24402, 24423), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[low, high]'], {}), '([low, high])\n', (24410, 24423), True, 'import matplotlib.pyplot as plt\n'), ((24424, 24488), 'matplotlib.pyplot.title', 'plt.title', (["('Fusion of prior with: ' + softClassLabels[softClass])"], {}), "('Fusion of prior with: ' + softClassLabels[softClass])\n", (24433, 24488), True, 'import matplotlib.pyplot as plt\n'), ((24492, 24510), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {}), '(labels)\n', (24502, 24510), True, 'import matplotlib.pyplot as plt\n'), ((24514, 24524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24522, 24524), True, 'import matplotlib.pyplot as plt\n'), ((25337, 25341), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (25339, 25341), False, 'from gaussianMixtures import GM\n'), ((26051, 26092), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, sharex=True, sharey=True)\n', (26063, 26092), True, 'import matplotlib.pyplot as plt\n'), ((26575, 26585), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26583, 26585), True, 'import matplotlib.pyplot as plt\n'), ((26774, 26778), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (26776, 26778), False, 'from gaussianMixtures import GM\n'), ((27067, 27082), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)'], {}), '(6)\n', (27079, 27082), True, 'import matplotlib.pyplot as plt\n'), ((27453, 27463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27461, 27463), True, 'import matplotlib.pyplot as plt\n'), ((27694, 27698), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (27696, 27698), False, 'from gaussianMixtures import GM\n'), ((27987, 28002), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)'], {}), '(5)\n', (27999, 28002), True, 'import matplotlib.pyplot as plt\n'), ((28368, 28378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28376, 28378), True, 'import matplotlib.pyplot as plt\n'), ((29780, 29792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29790, 29792), True, 'import matplotlib.pyplot as plt\n'), ((29856, 29877), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'c'], {}), '(x, y, c)\n', (29868, 29877), True, 'import matplotlib.pyplot as plt\n'), ((29879, 29899), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Axis"""'], {}), "('X Axis')\n", (29889, 29899), True, 'import matplotlib.pyplot as plt\n'), ((29903, 29923), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z Axis"""'], {}), "('Z Axis')\n", (29913, 29923), True, 'import matplotlib.pyplot as plt\n'), ((29927, 29959), 'matplotlib.pyplot.title', 'plt.title', (['"""Slice Across Y Axis"""'], {}), "('Slice Across Y Axis')\n", (29936, 29959), True, 'import matplotlib.pyplot as plt\n'), ((29968, 29980), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29978, 29980), True, 'import matplotlib.pyplot as plt\n'), ((30044, 30065), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'c'], {}), '(x, y, c)\n', (30056, 30065), True, 'import matplotlib.pyplot as plt\n'), ((30067, 30087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Y Axis"""'], {}), "('Y Axis')\n", (30077, 30087), True, 'import matplotlib.pyplot as plt\n'), ((30091, 30111), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z Axis"""'], {}), "('Z Axis')\n", (30101, 30111), True, 'import matplotlib.pyplot as plt\n'), ((30114, 30146), 'matplotlib.pyplot.title', 'plt.title', (['"""Slice Across X axis"""'], {}), "('Slice Across X axis')\n", (30123, 30146), True, 'import matplotlib.pyplot as plt\n'), ((30155, 30167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30165, 30167), True, 'import matplotlib.pyplot as plt\n'), ((30231, 30252), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'c'], {}), '(x, y, c)\n', (30243, 30252), True, 'import matplotlib.pyplot as plt\n'), ((30254, 30274), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Axis"""'], {}), "('X Axis')\n", (30264, 30274), True, 'import matplotlib.pyplot as plt\n'), ((30278, 30298), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Axis"""'], {}), "('Y Axis')\n", (30288, 30298), True, 'import matplotlib.pyplot as plt\n'), ((30301, 30333), 'matplotlib.pyplot.title', 'plt.title', (['"""Slice Across Z Axis"""'], {}), "('Slice Across Z Axis')\n", (30310, 30333), True, 'import matplotlib.pyplot as plt\n'), ((31133, 31137), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (31135, 31137), False, 'from gaussianMixtures import GM\n'), ((31263, 31267), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (31265, 31267), False, 'from gaussianMixtures import GM\n'), ((31356, 31360), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (31358, 31360), False, 'from gaussianMixtures import GM\n'), ((31435, 31450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {}), '(3)\n', (31447, 31450), True, 'import matplotlib.pyplot as plt\n'), ((31708, 31718), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31716, 31718), True, 'import matplotlib.pyplot as plt\n'), ((31945, 31966), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'c'], {}), '(x, y, c)\n', (31957, 31966), True, 'import matplotlib.pyplot as plt\n'), ((32213, 32223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32221, 32223), True, 'import matplotlib.pyplot as plt\n'), ((32526, 32541), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (32538, 32541), True, 'import matplotlib.pyplot as plt\n'), ((32659, 32669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32667, 32669), True, 'import matplotlib.pyplot as plt\n'), ((32782, 32786), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (32784, 32786), False, 'from gaussianMixtures import GM\n'), ((32991, 32995), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (32993, 32995), False, 'from gaussianMixtures import GM\n'), ((33376, 33391), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)'], {}), '(4)\n', (33388, 33391), True, 'import matplotlib.pyplot as plt\n'), ((33655, 33665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33663, 33665), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1634), 'numpy.atleast_2d', 'np.atleast_2d', (['A'], {}), '(A)\n', (1631, 1634), True, 'import numpy as np\n'), ((1648, 1654), 'numpy.linalg.svd', 'svd', (['A'], {}), '(A)\n', (1651, 1654), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((1887, 1902), 'math.sqrt', 'math.sqrt', (['dist'], {}), '(dist)\n', (1896, 1902), False, 'import math\n'), ((2379, 2403), 'numpy.zeros', 'np.zeros', ([], {'shape': '(12, 15)'}), '(shape=(12, 15))\n', (2387, 2403), True, 'import numpy as np\n'), ((3089, 3106), 'numpy.hstack', 'np.hstack', (['(M, B)'], {}), '((M, B))\n', (3098, 3106), True, 'import numpy as np\n'), ((3877, 3939), 'math.sqrt', 'math.sqrt', (['(width / 2 * (width / 2) + length / 2 * (length / 2))'], {}), '(width / 2 * (width / 2) + length / 2 * (length / 2))\n', (3886, 3939), False, 'import math\n'), ((3945, 3969), 'math.asin', 'math.asin', (['(width / 2 / h)'], {}), '(width / 2 / h)\n', (3954, 3969), False, 'import math\n'), ((5075, 5092), 'numpy.hstack', 'np.hstack', (['(M, B)'], {}), '((M, B))\n', (5084, 5092), True, 'import numpy as np\n'), ((7250, 7267), 'numpy.hstack', 'np.hstack', (['(M, B)'], {}), '((M, B))\n', (7259, 7267), True, 'import numpy as np\n'), ((10416, 10436), 'numpy.matrix', 'np.matrix', (['prior_var'], {}), '(prior_var)\n', (10425, 10436), True, 'import numpy as np\n'), ((10454, 10475), 'numpy.matrix', 'np.matrix', (['prior_mean'], {}), '(prior_mean)\n', (10463, 10475), True, 'import numpy as np\n'), ((10490, 10504), 'numpy.matrix', 'np.matrix', (['var'], {}), '(var)\n', (10499, 10504), True, 'import numpy as np\n'), ((10518, 10533), 'numpy.matrix', 'np.matrix', (['mean'], {}), '(mean)\n', (10527, 10533), True, 'import numpy as np\n'), ((11736, 11758), 'numpy.array', 'np.array', (['self.weights'], {}), '(self.weights)\n', (11744, 11758), True, 'import numpy as np\n'), ((11765, 11784), 'numpy.array', 'np.array', (['self.bias'], {}), '(self.bias)\n', (11773, 11784), True, 'import numpy as np\n'), ((11879, 11898), 'numpy.array', 'np.array', (['prior_var'], {}), '(prior_var)\n', (11887, 11898), True, 'import numpy as np\n'), ((11916, 11936), 'numpy.array', 'np.array', (['prior_mean'], {}), '(prior_mean)\n', (11924, 11936), True, 'import numpy as np\n'), ((15558, 15562), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (15560, 15562), False, 'from gaussianMixtures import GM\n'), ((16343, 16347), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (16345, 16347), False, 'from gaussianMixtures import GM\n'), ((16773, 16809), 'numpy.exp', 'np.exp', (['(topIn + self.bias[softClass])'], {}), '(topIn + self.bias[softClass])\n', (16779, 16809), True, 'import numpy as np\n'), ((17895, 17912), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (17904, 17912), True, 'import numpy as np\n'), ((19244, 19256), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19254, 19256), True, 'import matplotlib.pyplot as plt\n'), ((20071, 20081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20079, 20081), True, 'import matplotlib.pyplot as plt\n'), ((20162, 20227), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""newton-cg"""', 'multi_class': '"""multinomial"""'}), "(solver='newton-cg', multi_class='multinomial')\n", (20180, 20227), False, 'from sklearn.linear_model import LogisticRegression\n'), ((21012, 21029), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (21021, 21029), True, 'import numpy as np\n'), ((21764, 21768), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (21766, 21768), False, 'from gaussianMixtures import GM\n'), ((21825, 21829), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (21827, 21829), False, 'from gaussianMixtures import GM\n'), ((21887, 21907), 'numpy.zeros', 'np.zeros', (['numSamples'], {}), '(numSamples)\n', (21895, 21907), True, 'import numpy as np\n'), ((22752, 22756), 'gaussianMixtures.GM', 'GM', ([], {}), '()\n', (22754, 22756), False, 'from gaussianMixtures import GM\n'), ((23271, 23286), 'numpy.log', 'np.log', (['sumSamp'], {}), '(sumSamp)\n', (23277, 23286), True, 'import numpy as np\n'), ((23370, 23387), 'numpy.exp', 'np.exp', (['logWeight'], {}), '(logWeight)\n', (23376, 23387), True, 'import numpy as np\n'), ((23400, 23431), 'gaussianMixtures.Gaussian', 'Gaussian', (['muHat', 'varHat', 'weight'], {}), '(muHat, varHat, weight)\n', (23408, 23431), False, 'from gaussianMixtures import Gaussian\n'), ((25356, 25393), 'gaussianMixtures.Gaussian', 'Gaussian', (['[2, 4]', '[[1, 0], [0, 1]]', '(1)'], {}), '([2, 4], [[1, 0], [0, 1]], 1)\n', (25364, 25393), False, 'from gaussianMixtures import Gaussian\n'), ((25403, 25440), 'gaussianMixtures.Gaussian', 'Gaussian', (['[4, 2]', '[[1, 0], [0, 1]]', '(1)'], {}), '([4, 2], [[1, 0], [0, 1]], 1)\n', (25411, 25440), False, 'from gaussianMixtures import Gaussian\n'), ((25450, 25487), 'gaussianMixtures.Gaussian', 'Gaussian', (['[1, 3]', '[[1, 0], [0, 1]]', '(1)'], {}), '([1, 3], [[1, 0], [0, 1]], 1)\n', (25458, 25487), False, 'from gaussianMixtures import Gaussian\n'), ((29082, 29230), 'numpy.matrix', 'np.matrix', (['[-1, -1, 0.5, -1, -1, 1, 0.5, -1, 1, 1, 0.5, -1, 1, -1, 0.5, -1, -1, -1, -\n 0.5, -1, -1, 1, -0.5, -1, 1, 1, -0.5, -1, 1, -1, -0.5, -1]'], {}), '([-1, -1, 0.5, -1, -1, 1, 0.5, -1, 1, 1, 0.5, -1, 1, -1, 0.5, -1, \n -1, -1, -0.5, -1, -1, 1, -0.5, -1, 1, 1, -0.5, -1, 1, -1, -0.5, -1])\n', (29091, 29230), True, 'import numpy as np\n'), ((29332, 29352), 'copy.deepcopy', 'deepcopy', (['pz.weights'], {}), '(pz.weights)\n', (29340, 29352), False, 'from copy import deepcopy\n'), ((29353, 29370), 'copy.deepcopy', 'deepcopy', (['pz.bias'], {}), '(pz.bias)\n', (29361, 29370), False, 'from copy import deepcopy\n'), ((29388, 29408), 'copy.deepcopy', 'deepcopy', (['pz.weights'], {}), '(pz.weights)\n', (29396, 29408), False, 'from copy import deepcopy\n'), ((29409, 29426), 'copy.deepcopy', 'deepcopy', (['pz.bias'], {}), '(pz.bias)\n', (29417, 29426), False, 'from copy import deepcopy\n'), ((29444, 29464), 'copy.deepcopy', 'deepcopy', (['pz.weights'], {}), '(pz.weights)\n', (29452, 29464), False, 'from copy import deepcopy\n'), ((29465, 29482), 'copy.deepcopy', 'deepcopy', (['pz.bias'], {}), '(pz.bias)\n', (29473, 29482), False, 'from copy import deepcopy\n'), ((31996, 32039), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[i][0]', 'X[i][1]'], {'c': 'cols[t[i]]'}), '(X[i][0], X[i][1], c=cols[t[i]])\n', (32007, 32039), True, 'import matplotlib.pyplot as plt\n'), ((33298, 33309), 'numpy.amax', 'np.amax', (['c2'], {}), '(c2)\n', (33305, 33309), True, 'import numpy as np\n'), ((33320, 33334), 'numpy.amax', 'np.amax', (['diffs'], {}), '(diffs)\n', (33327, 33334), True, 'import numpy as np\n'), ((33345, 33359), 'numpy.amin', 'np.amin', (['diffs'], {}), '(diffs)\n', (33352, 33359), True, 'import numpy as np\n'), ((2269, 2381), 'numpy.matrix', 'np.matrix', (['[-1, 0, recBounds[0][0], 1, 0, -recBounds[1][0], 0, 1, -recBounds[1][1], 0,\n -1, recBounds[0][1]]'], {}), '([-1, 0, recBounds[0][0], 1, 0, -recBounds[1][0], 0, 1, -recBounds\n [1][1], 0, -1, recBounds[0][1]])\n', (2278, 2381), True, 'import numpy as np\n'), ((3983, 4008), 'math.sin', 'math.sin', (['(theta1 + theta2)'], {}), '(theta1 + theta2)\n', (3991, 4008), False, 'import math\n'), ((4018, 4043), 'math.cos', 'math.cos', (['(theta1 + theta2)'], {}), '(theta1 + theta2)\n', (4026, 4043), False, 'import math\n'), ((4054, 4079), 'math.sin', 'math.sin', (['(theta1 - theta2)'], {}), '(theta1 - theta2)\n', (4062, 4079), False, 'import math\n'), ((4089, 4114), 'math.cos', 'math.cos', (['(theta1 - theta2)'], {}), '(theta1 - theta2)\n', (4097, 4114), False, 'import math\n'), ((6398, 6468), 'numpy.matrix', 'np.matrix', (['[[p1[0], p1[1], 1], [p2[0], p2[1], 1], [mid[0], mid[1], 1]]'], {}), '([[p1[0], p1[1], 1], [p2[0], p2[1], 1], [mid[0], mid[1], 1]])\n', (6407, 6468), True, 'import numpy as np\n'), ((6865, 6877), 'numpy.matrix', 'np.matrix', (['B'], {}), '(B)\n', (6874, 6877), True, 'import numpy as np\n'), ((11589, 11606), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'prod'], {}), '(x, prod)\n', (11597, 11606), True, 'import matplotlib.pyplot as plt\n'), ((11611, 11621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11619, 11621), True, 'import matplotlib.pyplot as plt\n'), ((12935, 12949), 'numpy.linalg.inv', 'inv', (['prior_var'], {}), '(prior_var)\n', (12938, 12949), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((13200, 13208), 'numpy.linalg.inv', 'inv', (['K_l'], {}), '(K_l)\n', (13203, 13208), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((13297, 13308), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (13305, 13308), True, 'import numpy as np\n'), ((13327, 13338), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (13335, 13338), True, 'import numpy as np\n'), ((16974, 17005), 'numpy.exp', 'np.exp', (['(bottomIn + self.bias[i])'], {}), '(bottomIn + self.bias[i])\n', (16980, 17005), True, 'import numpy as np\n'), ((17602, 17620), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (17610, 17620), True, 'import matplotlib.pyplot as plt\n'), ((17623, 17644), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[low, high]'], {}), '([low, high])\n', (17631, 17644), True, 'import matplotlib.pyplot as plt\n'), ((17700, 17710), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17708, 17710), True, 'import matplotlib.pyplot as plt\n'), ((18776, 18815), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'dom'], {'cmap': '"""inferno"""'}), "(x, y, dom, cmap='inferno')\n", (18788, 18815), True, 'import matplotlib.pyplot as plt\n'), ((18830, 18842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18840, 18842), True, 'import matplotlib.pyplot as plt\n'), ((19148, 19158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19156, 19158), True, 'import matplotlib.pyplot as plt\n'), ((19685, 19713), 'numpy.zeros', 'np.zeros', ([], {'shape': '(21, 21, 21)'}), '(shape=(21, 21, 21))\n', (19693, 19713), True, 'import numpy as np\n'), ((21780, 21814), 'gaussianMixtures.Gaussian', 'Gaussian', (['prior.mean', 'prior.var', '(1)'], {}), '(prior.mean, prior.var, 1)\n', (21788, 21814), False, 'from gaussianMixtures import Gaussian\n'), ((22424, 22440), 'numpy.asarray', 'np.asarray', (['x[i]'], {}), '(x[i])\n', (22434, 22440), True, 'import numpy as np\n'), ((22507, 22529), 'numpy.outer', 'np.outer', (['muHat', 'muHat'], {}), '(muHat, muHat)\n', (22515, 22529), True, 'import numpy as np\n'), ((22770, 22804), 'gaussianMixtures.Gaussian', 'Gaussian', (['prior.mean', 'prior.var', '(1)'], {}), '(prior.mean, prior.var, 1)\n', (22778, 22804), False, 'from gaussianMixtures import Gaussian\n'), ((23303, 23323), 'numpy.log', 'np.log', (['prior.weight'], {}), '(prior.weight)\n', (23309, 23323), True, 'import numpy as np\n'), ((27557, 27600), 'numpy.matrix', 'np.matrix', (['[-1, 1, -1, 1, 1, -1, 0, -1, -1]'], {}), '([-1, 1, -1, 1, 1, -1, 0, -1, -1])\n', (27566, 27600), True, 'import numpy as np\n'), ((3650, 3658), 'random.random', 'random', ([], {}), '()\n', (3656, 3658), False, 'from random import random\n'), ((5634, 5642), 'random.random', 'random', ([], {}), '()\n', (5640, 5642), False, 'from random import random\n'), ((7839, 7847), 'random.random', 'random', ([], {}), '()\n', (7845, 7847), False, 'from random import random\n'), ((9994, 10036), 'math.sqrt', 'math.sqrt', (['(yc2[c] + a ** 2 - 2 * a * yc[c])'], {}), '(yc2[c] + a ** 2 - 2 * a * yc[c])\n', (10003, 10036), False, 'import math\n'), ((15151, 15185), 'numpy.abs', 'np.abs', (['(log_c_hat - prev_log_c_hat)'], {}), '(log_c_hat - prev_log_c_hat)\n', (15157, 15185), True, 'import numpy as np\n'), ((17353, 17400), 'math.exp', 'math.exp', (['(self.weights[j] * x[i] + self.bias[j])'], {}), '(self.weights[j] * x[i] + self.bias[j])\n', (17361, 17400), False, 'import math\n'), ((17574, 17597), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'softmax[i]'], {}), '(x, softmax[i])\n', (17582, 17597), True, 'import matplotlib.pyplot as plt\n'), ((17676, 17694), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {}), '(labels)\n', (17686, 17694), True, 'import matplotlib.pyplot as plt\n'), ((22297, 22315), 'numpy.dot', 'np.dot', (['x[i]', 'w[i]'], {}), '(x[i], w[i])\n', (22303, 22315), True, 'import numpy as np\n'), ((22892, 22905), 'numpy.zeros', 'np.zeros', (['(500)'], {}), '(500)\n', (22900, 22905), True, 'import numpy as np\n'), ((26841, 26878), 'gaussianMixtures.Gaussian', 'Gaussian', (['[i, j]', '[[1, 0], [0, 1]]', '(1)'], {}), '([i, j], [[1, 0], [0, 1]], 1)\n', (26849, 26878), False, 'from gaussianMixtures import Gaussian\n'), ((27761, 27798), 'gaussianMixtures.Gaussian', 'Gaussian', (['[i, j]', '[[1, 0], [0, 1]]', '(1)'], {}), '([i, j], [[1, 0], [0, 1]], 1)\n', (27769, 27798), False, 'from gaussianMixtures import Gaussian\n'), ((31197, 31234), 'gaussianMixtures.Gaussian', 'Gaussian', (['[i, j]', '[[1, 0], [0, 1]]', '(1)'], {}), '([i, j], [[1, 0], [0, 1]], 1)\n', (31205, 31234), False, 'from gaussianMixtures import Gaussian\n'), ((1508, 1516), 'random.random', 'random', ([], {}), '()\n', (1514, 1516), False, 'from random import random\n'), ((3191, 3209), 'scipy.linalg.lstsq', 'linalg.lstsq', (['M', 'B'], {}), '(M, B)\n', (3203, 3209), True, 'import scipy.linalg as linalg\n'), ((3416, 3432), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (3424, 3432), True, 'import numpy as np\n'), ((3466, 3480), 'numpy.array', 'np.array', (['bias'], {}), '(bias)\n', (3474, 3480), True, 'import numpy as np\n'), ((5105, 5123), 'scipy.linalg.lstsq', 'linalg.lstsq', (['M', 'B'], {}), '(M, B)\n', (5117, 5123), True, 'import scipy.linalg as linalg\n'), ((5400, 5416), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (5408, 5416), True, 'import numpy as np\n'), ((5450, 5464), 'numpy.array', 'np.array', (['bias'], {}), '(bias)\n', (5458, 5464), True, 'import numpy as np\n'), ((7351, 7369), 'scipy.linalg.lstsq', 'linalg.lstsq', (['M', 'B'], {}), '(M, B)\n', (7363, 7369), True, 'import scipy.linalg as linalg\n'), ((7605, 7621), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (7613, 7621), True, 'import numpy as np\n'), ((7655, 7669), 'numpy.array', 'np.array', (['bias'], {}), '(bias)\n', (7663, 7669), True, 'import numpy as np\n'), ((12883, 12903), 'numpy.outer', 'np.outer', (['w[c]', 'w[c]'], {}), '(w[c], w[c])\n', (12891, 12903), True, 'import numpy as np\n'), ((13167, 13175), 'numpy.linalg.inv', 'inv', (['K_l'], {}), '(K_l)\n', (13170, 13175), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((13915, 13974), 'numpy.sqrt', 'np.sqrt', (['(y_cs_squared[c] + alpha ** 2 - 2 * alpha * y_cs[c])'], {}), '(y_cs_squared[c] + alpha ** 2 - 2 * alpha * y_cs[c])\n', (13922, 13974), True, 'import numpy as np\n'), ((17460, 17507), 'math.exp', 'math.exp', (['(self.weights[j] * x[i] + self.bias[j])'], {}), '(self.weights[j] * x[i] + self.bias[j])\n', (17468, 17507), False, 'import math\n'), ((18990, 19008), 'numpy.array', 'np.array', (['model[i]'], {}), '(model[i])\n', (18998, 19008), True, 'import numpy as np\n'), ((20574, 20591), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (20582, 20591), True, 'import numpy as np\n'), ((20629, 20643), 'numpy.array', 'np.array', (['bias'], {}), '(bias)\n', (20637, 20643), True, 'import numpy as np\n'), ((20855, 20863), 'random.random', 'random', ([], {}), '()\n', (20861, 20863), False, 'from random import random\n'), ((22469, 22485), 'numpy.outer', 'np.outer', (['xi', 'xi'], {}), '(xi, xi)\n', (22477, 22485), True, 'import numpy as np\n'), ((10300, 10315), 'numpy.exp', 'np.exp', (['(-zeta_c)'], {}), '(-zeta_c)\n', (10306, 10315), True, 'import numpy as np\n'), ((11107, 11124), 'numpy.exp', 'np.exp', (['zeta_c[c]'], {}), '(zeta_c[c])\n', (11113, 11124), True, 'import numpy as np\n'), ((12974, 13010), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * prior_var)'], {}), '(2 * np.pi * prior_var)\n', (12987, 13010), True, 'import numpy as np\n'), ((16524, 16539), 'numpy.exp', 'np.exp', (['logCHat'], {}), '(logCHat)\n', (16530, 16539), True, 'import numpy as np\n'), ((18356, 18428), 'numpy.exp', 'np.exp', (['(self.weights[k][0] * xx + self.weights[k][1] * yy + self.bias[k])'], {}), '(self.weights[k][0] * xx + self.weights[k][1] * yy + self.bias[k])\n', (18362, 18428), True, 'import numpy as np\n'), ((18448, 18520), 'numpy.exp', 'np.exp', (['(self.weights[m][0] * xx + self.weights[m][1] * yy + self.bias[m])'], {}), '(self.weights[m][0] * xx + self.weights[m][1] * yy + self.bias[m])\n', (18454, 18520), True, 'import numpy as np\n'), ((21422, 21494), 'numpy.exp', 'np.exp', (['(self.weights[k][0] * xx + self.weights[k][1] * yy + self.bias[k])'], {}), '(self.weights[k][0] * xx + self.weights[k][1] * yy + self.bias[k])\n', (21428, 21494), True, 'import numpy as np\n'), ((21517, 21589), 'numpy.exp', 'np.exp', (['(self.weights[m][0] * xx + self.weights[m][1] * yy + self.bias[m])'], {}), '(self.weights[m][0] * xx + self.weights[m][1] * yy + self.bias[m])\n', (21523, 21589), True, 'import numpy as np\n'), ((10676, 10690), 'numpy.linalg.det', 'det', (['prior_var'], {}), '(prior_var)\n', (10679, 10690), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((10693, 10705), 'numpy.linalg.det', 'det', (['var_hat'], {}), '(var_hat)\n', (10696, 10705), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((10795, 10809), 'numpy.linalg.inv', 'inv', (['prior_var'], {}), '(prior_var)\n', (10798, 10809), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((12497, 12511), 'numpy.exp', 'np.exp', (['xis[c]'], {}), '(xis[c])\n', (12503, 12511), True, 'import numpy as np\n'), ((14988, 15002), 'numpy.exp', 'np.exp', (['xis[c]'], {}), '(xis[c])\n', (14994, 15002), True, 'import numpy as np\n'), ((32884, 32902), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (32900, 32902), True, 'import numpy as np\n'), ((32907, 32925), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (32923, 32925), True, 'import numpy as np\n'), ((8008, 8029), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8020, 8029), False, 'import math\n'), ((8062, 8083), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8074, 8083), False, 'import math\n'), ((8117, 8138), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8129, 8138), False, 'import math\n'), ((8170, 8191), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8182, 8191), False, 'import math\n'), ((8281, 8302), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8293, 8302), False, 'import math\n'), ((8339, 8360), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8351, 8360), False, 'import math\n'), ((8400, 8421), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8412, 8421), False, 'import math\n'), ((8459, 8480), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8471, 8480), False, 'import math\n'), ((8515, 8536), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8527, 8536), False, 'import math\n'), ((8569, 8590), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8581, 8590), False, 'import math\n'), ((8624, 8645), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8636, 8645), False, 'import math\n'), ((8677, 8698), 'math.radians', 'math.radians', (['pose[2]'], {}), '(pose[2])\n', (8689, 8698), False, 'import math\n'), ((10725, 10739), 'numpy.linalg.inv', 'inv', (['prior_var'], {}), '(prior_var)\n', (10728, 10739), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((14566, 14580), 'numpy.linalg.det', 'det', (['prior_var'], {}), '(prior_var)\n', (14569, 14580), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((14583, 14595), 'numpy.linalg.det', 'det', (['var_hat'], {}), '(var_hat)\n', (14586, 14595), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((14703, 14717), 'numpy.linalg.inv', 'inv', (['prior_var'], {}), '(prior_var)\n', (14706, 14717), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((14624, 14638), 'numpy.linalg.inv', 'inv', (['prior_var'], {}), '(prior_var)\n', (14627, 14638), False, 'from numpy.linalg import inv, det, svd, solve\n'), ((13467, 13493), 'numpy.outer', 'np.outer', (['mu_hat', 'mu_hat.T'], {}), '(mu_hat, mu_hat.T)\n', (13475, 13493), True, 'import numpy as np\n'), ((16186, 16201), 'numpy.exp', 'np.exp', (['logCHat'], {}), '(logCHat)\n', (16192, 16201), True, 'import numpy as np\n')] |
# map-ephys interative shell module
import os
import sys
import logging
from code import interact
import time
import numpy as np
import pandas as pd
import datetime
import datajoint as dj
from pipeline import lab
from pipeline import experiment
from pipeline import ccf
from pipeline import ephys
from pipeline import histology
from pipeline import tracking
from pipeline import psth
from pipeline import report
from pipeline import export
from pipeline import publication
pipeline_modules = [lab, ccf, experiment, ephys, histology, tracking, psth]
log = logging.getLogger(__name__)
def usage_exit():
print("usage: {p} [{c}] <args>"
.format(p=os.path.basename(sys.argv[0]),
c='|'.join(list(actions.keys()))))
sys.exit(0)
def logsetup(*args):
level_map = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
level = level_map[args[0]] if args else logging.INFO
logfile = dj.config.get('custom', {'logfile': None}).get('logfile', None)
if logfile:
handlers = [logging.StreamHandler(), logging.FileHandler(logfile)]
else:
handlers = [logging.StreamHandler()]
logging.basicConfig(level=logging.ERROR, handlers=handlers)
log.setLevel(level)
logging.getLogger('pipeline').setLevel(level)
logging.getLogger('pipeline.psth').setLevel(level)
logging.getLogger('pipeline.ccf').setLevel(level)
logging.getLogger('pipeline.report').setLevel(level)
logging.getLogger('pipeline.publication').setLevel(level)
logging.getLogger('pipeline.ingest.behavior').setLevel(level)
logging.getLogger('pipeline.ingest.ephys').setLevel(level)
logging.getLogger('pipeline.ingest.tracking').setLevel(level)
logging.getLogger('pipeline.ingest.histology').setLevel(level)
def ingest_behavior(*args):
from pipeline.ingest import behavior as behavior_ingest
behavior_ingest.BehaviorIngest().populate(display_progress=True)
def ingest_ephys(*args):
from pipeline.ingest import ephys as ephys_ingest
ephys_ingest.EphysIngest().populate(display_progress=True)
def ingest_tracking(*args):
from pipeline.ingest import tracking as tracking_ingest
tracking_ingest.TrackingIngest().populate(display_progress=True)
def ingest_histology(*args):
from pipeline.ingest import histology as histology_ingest
histology_ingest.HistologyIngest().populate(display_progress=True)
def load_animal(excel_fp, sheet_name='Sheet1'):
df = pd.read_excel(excel_fp, sheet_name)
df.columns = [cname.lower().replace(' ', '_') for cname in df.columns]
subjects, water_restrictions, subject_ids = [], [], []
for i, row in df.iterrows():
if row.subject_id not in subject_ids and {'subject_id': row.subject_id} not in lab.Subject.proj():
subject = {'subject_id': row.subject_id, 'username': row.username,
'cage_number': row.cage_number, 'date_of_birth': row.date_of_birth.date(),
'sex': row.sex, 'animal_source': row.animal_source}
wr = {'subject_id': row.subject_id, 'water_restriction_number': row.water_restriction_number,
'cage_number': row.cage_number, 'wr_start_date': row.wr_start_date.date(),
'wr_start_weight': row.wr_start_weight}
subject_ids.append(row.subject_id)
subjects.append(subject)
water_restrictions.append(wr)
lab.Subject.insert(subjects)
lab.WaterRestriction.insert(water_restrictions)
log.info('Inserted {} subjects'.format(len(subjects)))
log.info('Water restriction number: {}'.format([s['water_restriction_number'] for s in water_restrictions]))
def load_insertion_location(excel_fp, sheet_name='Sheet1'):
from pipeline.ingest import behavior as behav_ingest
df = pd.read_excel(excel_fp, sheet_name)
df.columns = [cname.lower().replace(' ', '_') for cname in df.columns]
insertion_locations = []
recordable_brain_regions = []
for i, row in df.iterrows():
sess_key = experiment.Session & (behav_ingest.BehaviorIngest.BehaviorFile
& {'subject_id': row.subject_id, 'session_date': row.session_date.date()}
& 'behavior_file LIKE "%{}%{}_{}%"'.format(row.water_restriction_number,
row.session_date.date().strftime('%Y%m%d'),
row.behaviour_time))
if sess_key:
pinsert_key = dict(sess_key.fetch1('KEY'), insertion_number=row.insertion_number)
if pinsert_key in ephys.ProbeInsertion.proj():
if not (ephys.ProbeInsertion.InsertionLocation & pinsert_key):
insertion_locations.append(dict(pinsert_key, skull_reference=row.skull_reference,
ap_location=row.ap_location, ml_location=row.ml_location,
depth=row.depth, theta=row.theta, phi=row.phi, beta=row.beta))
if not (ephys.ProbeInsertion.RecordableBrainRegion & pinsert_key):
recordable_brain_regions.append(dict(pinsert_key, brain_area=row.brain_area,
hemisphere=row.hemisphere))
log.debug('InsertionLocation: {}'.format(insertion_locations))
log.debug('RecordableBrainRegion: {}'.format(recordable_brain_regions))
ephys.ProbeInsertion.InsertionLocation.insert(insertion_locations)
ephys.ProbeInsertion.RecordableBrainRegion.insert(recordable_brain_regions)
log.info('load_insertion_location - Number of insertions: {}'.format(len(insertion_locations)))
def populate_ephys(populate_settings={'reserve_jobs': True, 'display_progress': True}):
log.info('experiment.PhotostimBrainRegion.populate()')
experiment.PhotostimBrainRegion.populate(**populate_settings)
log.info('ephys.UnitCoarseBrainLocation.populate()')
ephys.UnitCoarseBrainLocation.populate(**populate_settings)
log.info('ephys.UnitStat.populate()')
ephys.UnitStat.populate(**populate_settings)
log.info('ephys.UnitCellType.populate()')
ephys.UnitCellType.populate(**populate_settings)
def populate_psth(populate_settings={'reserve_jobs': True, 'display_progress': True}):
log.info('psth.UnitPsth.populate()')
psth.UnitPsth.populate(**populate_settings)
log.info('psth.PeriodSelectivity.populate()')
psth.PeriodSelectivity.populate(**populate_settings)
log.info('psth.UnitSelectivity.populate()')
psth.UnitSelectivity.populate(**populate_settings)
def generate_report(populate_settings={'reserve_jobs': True, 'display_progress': True}):
from pipeline import report
for report_tbl in report.report_tables:
log.info(f'Populate: {report_tbl.full_table_name}')
report_tbl.populate(**populate_settings)
def sync_report():
from pipeline import report
for report_tbl in report.report_tables:
log.info(f'Sync: {report_tbl.full_table_name} - From {report.store_location} - To {report.store_stage}')
report_tbl.fetch()
def nuke_all():
if 'nuclear_option' not in dj.config:
raise RuntimeError('nuke_all() function not enabled')
from pipeline.ingest import behavior as behavior_ingest
from pipeline.ingest import ephys as ephys_ingest
from pipeline.ingest import tracking as tracking_ingest
from pipeline.ingest import histology as histology_ingest
ingest_modules = [behavior_ingest, ephys_ingest, tracking_ingest,
histology_ingest]
for m in reversed(ingest_modules):
m.schema.drop()
# production lab schema is not map project specific, so keep it.
for m in reversed([m for m in pipeline_modules if m is not lab]):
m.schema.drop()
def publish(*args):
from pipeline import publication # triggers ingest, so skipped
publication.ArchivedRawEphys.populate()
publication.ArchivedTrackingVideo.populate()
def export_recording(*args):
if not args:
print("usage: {} export-recording \"probe key\"\n"
" where \"probe key\" specifies a ProbeInsertion")
return
ik = eval(args[0]) # "{k: v}" -> {k: v}
fn = args[1] if len(args) > 1 else None
export.export_recording(ik, fn)
def shell(*args):
interact('map shell.\n\nschema modules:\n\n - {m}\n'
.format(m='\n - '.join(
'.'.join(m.__name__.split('.')[1:])
for m in pipeline_modules)),
local=globals())
def ccfload(*args):
ccf.CCFAnnotation.load_ccf_r3_20um()
def erd(*args):
mods = (ephys, lab, experiment, tracking, psth, ccf, histology,
report, publication)
for mod in mods:
modname = str().join(mod.__name__.split('.')[1:])
fname = os.path.join('images', '{}.png'.format(modname))
print('saving', fname)
dj.ERD(mod, context={modname: mod}).save(fname)
def automate_computation():
from pipeline import report
populate_settings = {'reserve_jobs': True, 'suppress_errors': True, 'display_progress': True}
while True:
log.info('Populate for: Ephys - PSTH - Report')
populate_ephys(populate_settings)
populate_psth(populate_settings)
generate_report(populate_settings)
log.info('report.delete_outdated_probe_tracks()')
report.delete_outdated_probe_tracks()
# random sleep time between 5 to 10 minutes
sleep_time = np.random.randint(300, 600)
log.info('Sleep: {} minutes'.format(sleep_time / 60))
time.sleep(sleep_time)
def sync_and_external_cleanup():
if dj.config['custom'].get('allow_external_cleanup', False):
from pipeline import report
while True:
sync_report()
report.schema.external['report_store'].delete(delete_external_files=True)
time.sleep(3600) # once every hour
actions = {
'ingest-behavior': ingest_behavior,
'ingest-ephys': ingest_ephys,
'ingest-tracking': ingest_tracking,
'ingest-histology': ingest_histology,
'populate-psth': populate_psth,
'publish': publish,
'export-recording': export_recording,
'generate-report': generate_report,
'sync-report': sync_report,
'shell': shell,
'erd': erd,
'ccfload': ccfload,
'automate-computation': automate_computation,
'load-insertion-location': load_insertion_location,
'load-animal': load_animal
}
| [
"logging.getLogger",
"pipeline.ephys.ProbeInsertion.InsertionLocation.insert",
"logging.StreamHandler",
"pipeline.psth.UnitSelectivity.populate",
"pipeline.lab.Subject.proj",
"pipeline.ephys.ProbeInsertion.RecordableBrainRegion.insert",
"pipeline.ingest.tracking.TrackingIngest",
"time.sleep",
"pipel... | [((561, 588), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (578, 588), False, 'import logging\n'), ((753, 764), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (761, 764), False, 'import sys\n'), ((1302, 1361), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR', 'handlers': 'handlers'}), '(level=logging.ERROR, handlers=handlers)\n', (1321, 1361), False, 'import logging\n'), ((2613, 2648), 'pandas.read_excel', 'pd.read_excel', (['excel_fp', 'sheet_name'], {}), '(excel_fp, sheet_name)\n', (2626, 2648), True, 'import pandas as pd\n'), ((3564, 3592), 'pipeline.lab.Subject.insert', 'lab.Subject.insert', (['subjects'], {}), '(subjects)\n', (3582, 3592), False, 'from pipeline import lab\n'), ((3597, 3644), 'pipeline.lab.WaterRestriction.insert', 'lab.WaterRestriction.insert', (['water_restrictions'], {}), '(water_restrictions)\n', (3624, 3644), False, 'from pipeline import lab\n'), ((3947, 3982), 'pandas.read_excel', 'pd.read_excel', (['excel_fp', 'sheet_name'], {}), '(excel_fp, sheet_name)\n', (3960, 3982), True, 'import pandas as pd\n'), ((5693, 5759), 'pipeline.ephys.ProbeInsertion.InsertionLocation.insert', 'ephys.ProbeInsertion.InsertionLocation.insert', (['insertion_locations'], {}), '(insertion_locations)\n', (5738, 5759), False, 'from pipeline import ephys\n'), ((5764, 5839), 'pipeline.ephys.ProbeInsertion.RecordableBrainRegion.insert', 'ephys.ProbeInsertion.RecordableBrainRegion.insert', (['recordable_brain_regions'], {}), '(recordable_brain_regions)\n', (5813, 5839), False, 'from pipeline import ephys\n'), ((6095, 6156), 'pipeline.experiment.PhotostimBrainRegion.populate', 'experiment.PhotostimBrainRegion.populate', ([], {}), '(**populate_settings)\n', (6135, 6156), False, 'from pipeline import experiment\n'), ((6219, 6278), 'pipeline.ephys.UnitCoarseBrainLocation.populate', 'ephys.UnitCoarseBrainLocation.populate', ([], {}), '(**populate_settings)\n', (6257, 6278), False, 'from pipeline import ephys\n'), ((6326, 6370), 'pipeline.ephys.UnitStat.populate', 'ephys.UnitStat.populate', ([], {}), '(**populate_settings)\n', (6349, 6370), False, 'from pipeline import ephys\n'), ((6422, 6470), 'pipeline.ephys.UnitCellType.populate', 'ephys.UnitCellType.populate', ([], {}), '(**populate_settings)\n', (6449, 6470), False, 'from pipeline import ephys\n'), ((6606, 6649), 'pipeline.psth.UnitPsth.populate', 'psth.UnitPsth.populate', ([], {}), '(**populate_settings)\n', (6628, 6649), False, 'from pipeline import psth\n'), ((6705, 6757), 'pipeline.psth.PeriodSelectivity.populate', 'psth.PeriodSelectivity.populate', ([], {}), '(**populate_settings)\n', (6736, 6757), False, 'from pipeline import psth\n'), ((6811, 6861), 'pipeline.psth.UnitSelectivity.populate', 'psth.UnitSelectivity.populate', ([], {}), '(**populate_settings)\n', (6840, 6861), False, 'from pipeline import psth\n'), ((8167, 8206), 'pipeline.publication.ArchivedRawEphys.populate', 'publication.ArchivedRawEphys.populate', ([], {}), '()\n', (8204, 8206), False, 'from pipeline import publication\n'), ((8211, 8255), 'pipeline.publication.ArchivedTrackingVideo.populate', 'publication.ArchivedTrackingVideo.populate', ([], {}), '()\n', (8253, 8255), False, 'from pipeline import publication\n'), ((8538, 8569), 'pipeline.export.export_recording', 'export.export_recording', (['ik', 'fn'], {}), '(ik, fn)\n', (8561, 8569), False, 'from pipeline import export\n'), ((8841, 8877), 'pipeline.ccf.CCFAnnotation.load_ccf_r3_20um', 'ccf.CCFAnnotation.load_ccf_r3_20um', ([], {}), '()\n', (8875, 8877), False, 'from pipeline import ccf\n'), ((9653, 9690), 'pipeline.report.delete_outdated_probe_tracks', 'report.delete_outdated_probe_tracks', ([], {}), '()\n', (9688, 9690), False, 'from pipeline import report\n'), ((9765, 9792), 'numpy.random.randint', 'np.random.randint', (['(300)', '(600)'], {}), '(300, 600)\n', (9782, 9792), True, 'import numpy as np\n'), ((9863, 9885), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (9873, 9885), False, 'import time\n'), ((1086, 1128), 'datajoint.config.get', 'dj.config.get', (['"""custom"""', "{'logfile': None}"], {}), "('custom', {'logfile': None})\n", (1099, 1128), True, 'import datajoint as dj\n'), ((1187, 1210), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1208, 1210), False, 'import logging\n'), ((1212, 1240), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (1231, 1240), False, 'import logging\n'), ((1272, 1295), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1293, 1295), False, 'import logging\n'), ((1392, 1421), 'logging.getLogger', 'logging.getLogger', (['"""pipeline"""'], {}), "('pipeline')\n", (1409, 1421), False, 'import logging\n'), ((1442, 1476), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.psth"""'], {}), "('pipeline.psth')\n", (1459, 1476), False, 'import logging\n'), ((1497, 1530), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.ccf"""'], {}), "('pipeline.ccf')\n", (1514, 1530), False, 'import logging\n'), ((1551, 1587), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.report"""'], {}), "('pipeline.report')\n", (1568, 1587), False, 'import logging\n'), ((1608, 1649), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.publication"""'], {}), "('pipeline.publication')\n", (1625, 1649), False, 'import logging\n'), ((1670, 1715), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.ingest.behavior"""'], {}), "('pipeline.ingest.behavior')\n", (1687, 1715), False, 'import logging\n'), ((1736, 1778), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.ingest.ephys"""'], {}), "('pipeline.ingest.ephys')\n", (1753, 1778), False, 'import logging\n'), ((1799, 1844), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.ingest.tracking"""'], {}), "('pipeline.ingest.tracking')\n", (1816, 1844), False, 'import logging\n'), ((1865, 1911), 'logging.getLogger', 'logging.getLogger', (['"""pipeline.ingest.histology"""'], {}), "('pipeline.ingest.histology')\n", (1882, 1911), False, 'import logging\n'), ((2022, 2054), 'pipeline.ingest.behavior.BehaviorIngest', 'behavior_ingest.BehaviorIngest', ([], {}), '()\n', (2052, 2054), True, 'from pipeline.ingest import behavior as behavior_ingest\n'), ((2172, 2198), 'pipeline.ingest.ephys.EphysIngest', 'ephys_ingest.EphysIngest', ([], {}), '()\n', (2196, 2198), True, 'from pipeline.ingest import ephys as ephys_ingest\n'), ((2325, 2357), 'pipeline.ingest.tracking.TrackingIngest', 'tracking_ingest.TrackingIngest', ([], {}), '()\n', (2355, 2357), True, 'from pipeline.ingest import tracking as tracking_ingest\n'), ((2487, 2521), 'pipeline.ingest.histology.HistologyIngest', 'histology_ingest.HistologyIngest', ([], {}), '()\n', (2519, 2521), True, 'from pipeline.ingest import histology as histology_ingest\n'), ((10167, 10183), 'time.sleep', 'time.sleep', (['(3600)'], {}), '(3600)\n', (10177, 10183), False, 'import time\n'), ((665, 694), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (681, 694), False, 'import os\n'), ((2904, 2922), 'pipeline.lab.Subject.proj', 'lab.Subject.proj', ([], {}), '()\n', (2920, 2922), False, 'from pipeline import lab\n'), ((4844, 4871), 'pipeline.ephys.ProbeInsertion.proj', 'ephys.ProbeInsertion.proj', ([], {}), '()\n', (4869, 4871), False, 'from pipeline import ephys\n'), ((9180, 9215), 'datajoint.ERD', 'dj.ERD', (['mod'], {'context': '{modname: mod}'}), '(mod, context={modname: mod})\n', (9186, 9215), True, 'import datajoint as dj\n')] |
import numpy as np
import rllab.misc.logger as logger
from rllab.misc import special2 as special
class SimpleReplayPool(object):
def __init__(
self,
max_pool_size,
observation_dim,
action_dim,
replacement_policy='stochastic',
replacement_prob=1.0,
max_skip_episode=10,
env=None):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_pool_size = max_pool_size
self._replacement_policy = replacement_policy
self._replacement_prob = replacement_prob
self._max_skip_episode = max_skip_episode
self._observations = np.zeros((max_pool_size, observation_dim),)
if env is not None and env.action_space.is_discrete:
self._actions = np.zeros((max_pool_size,),dtype=np.int64)
self._n = env.action_space.n
self._is_action_discrete = True
else:
self._actions = np.zeros((max_pool_size, action_dim),)
self._is_action_discrete = False
self._rewards = np.zeros(max_pool_size)
self._terminals = np.zeros(max_pool_size, dtype='uint8')
self._initials = np.zeros(max_pool_size, dtype='uint8')
self._observations.fill(0) # pre-allocate
self._actions.fill(0) # pre-allocate
self._terminals.fill(0) # pre-allocate
self._initials.fill(0) # pre-allocate
self._rewards.fill(0) # pre-allocate
# Bottom pointer
self._bottom = 0
# Top pointer
self._top = 0
# Size of the replay buffer
self._size = 0
def add_sample(self, observation, action, reward, terminal, initial):
"""
Add a sample to current replay buffer.
Parameters
----------
observation (np.array):
# TODO (ewei)
"""
self.check_replacement()
self._observations[self._top] = observation
if self._is_action_discrete and not isinstance(action,
(int, np.int64)):
action = special.from_onehot(action)
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._initials[self._top] = initial
self.advance()
def advance(self):
"""
Update the top pointer, bottom pointer, and size of the replay buffer.
"""
self._top = (self._top + 1) % self._max_pool_size
if self._size >= self._max_pool_size:
self._bottom = (self._bottom + 1) % self._max_pool_size
else:
self._size += 1
def check_replacement(self):
if self._replacement_prob < 1.0:
if self._size < self._max_pool_size or \
not self._initials[self._top]: return
self.advance_until_terminate()
def get_skip_flag(self):
"""
"""
if self._replacement_policy == 'full':
skip = False
elif self._replacement_policy == 'stochastic':
skip = np.random.uniform() > self._replacement_prob
else:
raise NotImplementedError
return skip
def advance_until_terminate(self):
skip = self.get_skip_flag()
n_skips = 0
old_top = self._top
new_top = (old_top + 1) % self._max_pool_size
while skip and old_top != new_top and n_skips < self._max_skip_episode:
n_skips += 1
self.advance()
while not self._initials[self._top]:
self.advance()
skip = self.get_skip_flag()
new_top = self._top
logger.log("add_sample, skipped %d episodes, top=%d->%d"%(
n_skips, old_top, new_top))
def last_batch(self, batch_size):
assert self._size >= batch_size
if self._top >= batch_size:
observations=self._observations[self._top-batch_size:self._top]
else:
assert self._size == self._max_pool_size
obs1 = self._observations[self._max_pool_size+
self._top-batch_size:]
obs2 = self._observations[:self._top]
observations = np.concatenate((obs1, obs2), axis=0)
return dict(
observations = observations,
)
def random_batch(self, batch_size):
"""
Draw a random batch from the replay buffer.
Parameters
----------
batch_size (int): The size of the batch.
Returns
-------
sample_batch (dict): A dict contains the state, action,
reward, terminal, next_state
"""
assert self._size >= batch_size
indices = np.zeros(batch_size, dtype='uint64')
transition_indices = np.zeros(batch_size, dtype='uint64')
count = 0
while count < batch_size:
index = np.random.randint(self._bottom, self._bottom + self._size) % self._max_pool_size
# make sure that the transition is valid: if we are at the end of the pool, we need to discard
# this sample
if index == self._size - 1 and self._size <= self._max_pool_size:
continue
# if self._terminals[index]:
# continue
transition_index = (index + 1) % self._max_pool_size
# make sure that the transition is valid: discard the transition if it crosses horizon-triggered resets
if not self._terminals[index] and self._initials[transition_index]:
continue
indices[count] = index
transition_indices[count] = transition_index
count += 1
actions = self._actions[indices]
if self._is_action_discrete:
actions = special.to_onehot_n(actions, self._n)
return dict(
observations=self._observations[indices],
actions=actions,
rewards=self._rewards[indices],
terminals=self._terminals[indices],
initials=self._initials[indices],
next_observations=self._observations[transition_indices]
)
@property
def size(self):
return self._size
| [
"rllab.misc.special2.to_onehot_n",
"rllab.misc.special2.from_onehot",
"rllab.misc.logger.log",
"numpy.zeros",
"numpy.random.randint",
"numpy.concatenate",
"numpy.random.uniform"
] | [((698, 740), 'numpy.zeros', 'np.zeros', (['(max_pool_size, observation_dim)'], {}), '((max_pool_size, observation_dim))\n', (706, 740), True, 'import numpy as np\n'), ((1108, 1131), 'numpy.zeros', 'np.zeros', (['max_pool_size'], {}), '(max_pool_size)\n', (1116, 1131), True, 'import numpy as np\n'), ((1158, 1196), 'numpy.zeros', 'np.zeros', (['max_pool_size'], {'dtype': '"""uint8"""'}), "(max_pool_size, dtype='uint8')\n", (1166, 1196), True, 'import numpy as np\n'), ((1222, 1260), 'numpy.zeros', 'np.zeros', (['max_pool_size'], {'dtype': '"""uint8"""'}), "(max_pool_size, dtype='uint8')\n", (1230, 1260), True, 'import numpy as np\n'), ((3679, 3770), 'rllab.misc.logger.log', 'logger.log', (["('add_sample, skipped %d episodes, top=%d->%d' % (n_skips, old_top, new_top))"], {}), "('add_sample, skipped %d episodes, top=%d->%d' % (n_skips,\n old_top, new_top))\n", (3689, 3770), True, 'import rllab.misc.logger as logger\n'), ((4731, 4767), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': '"""uint64"""'}), "(batch_size, dtype='uint64')\n", (4739, 4767), True, 'import numpy as np\n'), ((4797, 4833), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': '"""uint64"""'}), "(batch_size, dtype='uint64')\n", (4805, 4833), True, 'import numpy as np\n'), ((831, 873), 'numpy.zeros', 'np.zeros', (['(max_pool_size,)'], {'dtype': 'np.int64'}), '((max_pool_size,), dtype=np.int64)\n', (839, 873), True, 'import numpy as np\n'), ((1000, 1037), 'numpy.zeros', 'np.zeros', (['(max_pool_size, action_dim)'], {}), '((max_pool_size, action_dim))\n', (1008, 1037), True, 'import numpy as np\n'), ((2100, 2127), 'rllab.misc.special2.from_onehot', 'special.from_onehot', (['action'], {}), '(action)\n', (2119, 2127), True, 'from rllab.misc import special2 as special\n'), ((4221, 4257), 'numpy.concatenate', 'np.concatenate', (['(obs1, obs2)'], {'axis': '(0)'}), '((obs1, obs2), axis=0)\n', (4235, 4257), True, 'import numpy as np\n'), ((5792, 5829), 'rllab.misc.special2.to_onehot_n', 'special.to_onehot_n', (['actions', 'self._n'], {}), '(actions, self._n)\n', (5811, 5829), True, 'from rllab.misc import special2 as special\n'), ((4906, 4964), 'numpy.random.randint', 'np.random.randint', (['self._bottom', '(self._bottom + self._size)'], {}), '(self._bottom, self._bottom + self._size)\n', (4923, 4964), True, 'import numpy as np\n'), ((3092, 3111), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3109, 3111), True, 'import numpy as np\n')] |
# Freddy @DC, uWaterloo, ON, Canada
# Nov 13, 2017
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import numpy as np
import pandas as pd
import sys
import math
import time
from tqdm import *
from data_preprocess import *
from utils import *
from logger import Logger
# Hyper Parameters
num_epochs = 12
batch_size = 256
learning_rate = 2e-4
# argv
data_dir = sys.argv[1]
debug = False
load_prev_model = False
direct_test = False
use_gpu = False
if(sys.argv[2] == '1'):
debug = True
if(sys.argv[3] == '1'):
load_prev_model = True
if(sys.argv[4] == '1'):
direct_test = True
if(torch.cuda.is_available()):
use_gpu = True
''' Data '''
# stock Datase
train_set = stock_img_dataset(csv_file=data_dir+'/sample/label_table_train.csv',
root_dir=data_dir+'/sample/train',
transform=transforms.Compose([
Rescale(64),
ToTensor()
]))
test_set = stock_img_dataset(csv_file=data_dir+'/sample/label_table_test.csv',
root_dir=data_dir+'/sample/test',
transform=transforms.Compose([
Rescale(64),
ToTensor()
]))
validation_set = stock_img_dataset(csv_file=data_dir+'/sample/label_table_validation.csv',
root_dir=data_dir+'/sample/validation',
transform=transforms.Compose([
Rescale(64),
ToTensor()
]))
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set,
batch_size=batch_size,
shuffle=False)
val_loader = torch.utils.data.DataLoader(dataset=validation_set,
batch_size=batch_size,
shuffle=False)
''' Models '''
# Residual CNN
class res_cnn(nn.Module):
def __init__(self):
super(res_cnn, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(4, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU())
self.layer3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer4 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU())
self.layer6 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU())
self.layer7 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.MaxPool2d(2))
self.layer8 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU())
self.layer9 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer10 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU())
self.layer5 = nn.Sequential(
nn.MaxPool2d(2),
nn.MaxPool2d(2))
#self.fc = nn.Linear(4*4*256, 3)
self.fc = nn.Linear(4*4*256, 1)
def forward(self, x):
out1 = self.layer1(x)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out = self.layer6(out4)
out = self.layer7(out)
#re_layer_0 = self.layer
res_layer_1 = self.layer5(self.layer8(out1))
#print(res_layer.size())
#print(out.size())
out5 = res_layer_1+out
#out5 = out4
out = self.layer9(out5)
out = self.layer10(out)
#out = self.layer10(out)
out6 = out.view(out.size(0), -1)
#print(out6.size())
out7 = self.fc(out6)
return out7
'''
# GoogLeNet
class google_net(nn.Module):
def __init__(self):
super(google_net, self).__init__()
self.conv2d_1x1_a = nn.Conv2d(4,64,kernel_size=1),
self.conv2d_3x3_a = nn.Conv2d(4,64,kernel_size=3,padding=1),
self.conv2d_5x5_a = nn.Conv2d(4,64,kernel_size=5,padding=2),
self.conv2d_1x1_b = nn.Conv2d(64,128,kernel_size=1)
self.conv2d_3x3_b = nn.Conv2d(64,128,kernel_size=3,padding=1)
self.conv2d_5x5_b = nn.Conv2d(64,128,kernel_size=5,padding=2)
self.max_pool = nn.MaxPool2d(kernel_size=3,stride=1,padding=1)
def forward(self, x):
# inception 1
# inception 2
return out7
'''
''' train and test '''
cnn = res_cnn().double()
if(use_gpu):
cnn.cuda()
if(load_prev_model):
print('Loading previous model...')
cnn.load_state_dict(torch.load('cnn.pkl'))
# Loss and Optimizer
#criterion = nn.CrossEntropyLoss()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
logger = Logger('./logs')
def test_module(train_size, epoch, data_loader, write=False):
# Test the Model
cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
correct_d1 = 0
correct_d2 = 0
correct_d3 = 0
total = 0
counter = 0
for sample in tqdm(data_loader):
if(debug and counter >= 3): break
counter+=1
images = Variable(sample['image'])
labels = Variable(sample['labels'])
if(use_gpu):
images = images.cuda()
labels = labels.cuda()
outputs = cnn(images)
#_, predicted = torch.max(outputs.data, 1)
#labels = to_np(labels.view(-1,1))
#predicted = np.sign(to_np(outputs.view(-1,1)))
#print(labels.shape, predicted.shape)
#=======cpu computation=======#
# can we migrate it to the GPU?
labels = to_np(labels)
outputs = to_np(outputs)
labels_d1 = labels[:,0]
#labels_d2 = labels[:,1]
#labels_d3 = labels[:,2]
# if output>=0: predict=1, else: predict=-1
predicted_d1 = np.sign(outputs[:,0])
#predicted_d2 = np.sign(outputs[:,1])
#predicted_d3 = np.sign(outputs[:,2])
# only conduct a prediction when abs(output) > 0.5
#predicted_high_d1 = np.sign(outputs[:,0])
#predicted_high_d2 = np.sign(outputs[:,1])
#predicted_high_d3 = np.sign(outputs[:,2])
correct_d1 += (predicted_d1 == labels_d1).sum()
#correct_d2 += (predicted_d2 == labels_d2).sum()
#correct_d3 += (predicted_d3 == labels_d3).sum()
total += labels.shape[0]
acc1 = correct_d1 / total
#acc2 = correct_d2 / total
#acc3 = correct_d3 / total
#acc_total = (correct_d3+correct_d2+correct_d3) / ( total * 3)
acc2 = 0
acc3 = 0
acc_total = acc1
print('Test Accuracy of the model on the %d test images, Day 11: %.4f %%' % (total, 100 * acc1))
#print('Test Accuracy of the model on the %d test images, Day 12: %.4f %%' % (total, 100 * acc2))
#print('Test Accuracy of the model on the %d test images, Day 13: %.4f %%' % (total, 100 * acc3))
#print('Test Accuracy of the model on the %d test images, total: %.4f %%' % (total, 100 * acc_total))
test_size = total
if(write):
df = pd.DataFrame([[train_size,test_size,acc1,acc2,acc3,acc_total]])
df.to_csv('./accuracy_records.csv', mode='a',header=False)
#============ TensorBoard logging ============#
# log validation acc
if(epoch != -1):
info = {
'acc_d1': acc1*100,
#'acc_d2': acc2*100,
#'acc_d3': acc3*100,
#'acc_avg': acc_total*100
}
for tag, value in info.items():
logger.scalar_summary(tag, value, epoch)
counter = 0
total = 0
# Train the Model
for epoch in range(num_epochs):
if(direct_test):
print('direc _test...')
test_module(-1, -1,test_loader, False)
break
if(debug and counter>=3):
break
prev_i = len(train_loader)*epoch
if(epoch == 0):
test_module(total, epoch, val_loader,False)
for i, sample in enumerate(train_loader):
if(debug and counter>=3):break
counter+=1
#if(i == 0): continue
#print(images,labels)
images = Variable(sample['image'])
labels = Variable(sample['labels']).float()
if(use_gpu):
images = images.cuda()
labels = labels.cuda()
#print(images.size(), labels.size())
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images).float()
#print(outputs.size(), labels.size())
#print(outputs)
#print(labels)
labels = labels[:,0]
outputs = outputs[:,0]
loss = criterion(outputs.float(), labels)
# costly? change this step?
df = pd.DataFrame([[i+1+prev_i ,to_np(loss)[0]]])
df.to_csv('./training_loss_records.csv', mode='a',header=False)
loss.backward()
optimizer.step()
# costly? change this step?
total += to_np(labels).shape[0]
#total += labels.shape[0] # test
if (i+1) % 1 == 0:
print ('Epoch [%d/%d], Batch [%d/%d] Loss: %.4f'
%(epoch+1, num_epochs,i+1, math.ceil(len(train_set)/batch_size),loss.data[0]))
#============ TensorBoard logging ============#
# (1) Log the scalar values
info = {
'loss': loss.data[0]
}
for tag, value in info.items():
logger.scalar_summary(tag, value, i+1+prev_i)
# (2) Log values and gradients of the parameters (histogram)
for tag, value in cnn.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, to_np(value), i+1+prev_i)
logger.histo_summary(tag+'/grad', to_np(value.grad), i+1+prev_i)
# test at the end of every epoch
if(epoch + 1 == num_epochs ):
# last epoch ends
print('final test:')
test_module(total, -1, test_loader, True)
print('Traine data size: ' + str(total))
else:
# during training
test_module(total, epoch+1,val_loader,False)
# Save the Trained Model
if(not debug):
torch.save(cnn.state_dict(), 'cnn.pkl')
# rest 20min for every n epochs
#rest_time = 1200 #20min
#n = 10
#if((epoch+1) % n == 0 and (epoch+1) != num_epochs):
# print('Having a rest...')
# time.sleep(rest_time)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"pandas.DataFrame",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.MaxPool2d",
"logger.Logger",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"numpy.sign",
"torch.autograd.Variable"
] | [((698, 723), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (721, 723), False, 'import torch\n'), ((1519, 1606), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset=train_set, batch_size=batch_size,\n shuffle=True)\n', (1546, 1606), False, 'import torch\n'), ((1704, 1791), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_set', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset=test_set, batch_size=batch_size,\n shuffle=False)\n', (1731, 1791), False, 'import torch\n'), ((1886, 1979), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'validation_set', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset=validation_set, batch_size=batch_size,\n shuffle=False)\n', (1913, 1979), False, 'import torch\n'), ((5404, 5416), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5414, 5416), True, 'import torch.nn as nn\n'), ((5492, 5508), 'logger.Logger', 'Logger', (['"""./logs"""'], {}), "('./logs')\n", (5498, 5508), False, 'from logger import Logger\n'), ((3792, 3817), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 256)', '(1)'], {}), '(4 * 4 * 256, 1)\n', (3801, 3817), True, 'import torch.nn as nn\n'), ((5312, 5333), 'torch.load', 'torch.load', (['"""cnn.pkl"""'], {}), "('cnn.pkl')\n", (5322, 5333), False, 'import torch\n'), ((5888, 5913), 'torch.autograd.Variable', 'Variable', (["sample['image']"], {}), "(sample['image'])\n", (5896, 5913), False, 'from torch.autograd import Variable\n'), ((5931, 5957), 'torch.autograd.Variable', 'Variable', (["sample['labels']"], {}), "(sample['labels'])\n", (5939, 5957), False, 'from torch.autograd import Variable\n'), ((6603, 6625), 'numpy.sign', 'np.sign', (['outputs[:, 0]'], {}), '(outputs[:, 0])\n', (6610, 6625), True, 'import numpy as np\n'), ((7830, 7898), 'pandas.DataFrame', 'pd.DataFrame', (['[[train_size, test_size, acc1, acc2, acc3, acc_total]]'], {}), '([[train_size, test_size, acc1, acc2, acc3, acc_total]])\n', (7842, 7898), True, 'import pandas as pd\n'), ((8879, 8904), 'torch.autograd.Variable', 'Variable', (["sample['image']"], {}), "(sample['image'])\n", (8887, 8904), False, 'from torch.autograd import Variable\n'), ((2234, 2276), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(4, 64, kernel_size=3, padding=1)\n', (2243, 2276), True, 'import torch.nn as nn\n'), ((2290, 2308), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2304, 2308), True, 'import torch.nn as nn\n'), ((2322, 2331), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2329, 2331), True, 'import torch.nn as nn\n'), ((2345, 2360), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2357, 2360), True, 'import torch.nn as nn\n'), ((2411, 2454), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 64, kernel_size=3, padding=1)\n', (2420, 2454), True, 'import torch.nn as nn\n'), ((2468, 2486), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2482, 2486), True, 'import torch.nn as nn\n'), ((2500, 2509), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2507, 2509), True, 'import torch.nn as nn\n'), ((2560, 2604), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 128, kernel_size=3, padding=1)\n', (2569, 2604), True, 'import torch.nn as nn\n'), ((2618, 2637), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2632, 2637), True, 'import torch.nn as nn\n'), ((2651, 2660), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2658, 2660), True, 'import torch.nn as nn\n'), ((2674, 2689), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2686, 2689), True, 'import torch.nn as nn\n'), ((2740, 2785), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(128, 128, kernel_size=3, padding=1)\n', (2749, 2785), True, 'import torch.nn as nn\n'), ((2799, 2818), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2813, 2818), True, 'import torch.nn as nn\n'), ((2832, 2841), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2839, 2841), True, 'import torch.nn as nn\n'), ((2892, 2937), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(128, 128, kernel_size=3, padding=1)\n', (2901, 2937), True, 'import torch.nn as nn\n'), ((2951, 2970), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2965, 2970), True, 'import torch.nn as nn\n'), ((2984, 2993), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2991, 2993), True, 'import torch.nn as nn\n'), ((3044, 3089), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(128, 256, kernel_size=3, padding=1)\n', (3053, 3089), True, 'import torch.nn as nn\n'), ((3103, 3122), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3117, 3122), True, 'import torch.nn as nn\n'), ((3136, 3151), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3148, 3151), True, 'import torch.nn as nn\n'), ((3202, 3246), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 256, kernel_size=3, padding=1)\n', (3211, 3246), True, 'import torch.nn as nn\n'), ((3260, 3279), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3274, 3279), True, 'import torch.nn as nn\n'), ((3293, 3302), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3300, 3302), True, 'import torch.nn as nn\n'), ((3353, 3398), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 256, kernel_size=3, padding=1)\n', (3362, 3398), True, 'import torch.nn as nn\n'), ((3412, 3431), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3426, 3431), True, 'import torch.nn as nn\n'), ((3445, 3454), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3452, 3454), True, 'import torch.nn as nn\n'), ((3468, 3483), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3480, 3483), True, 'import torch.nn as nn\n'), ((3535, 3580), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 256, kernel_size=3, padding=1)\n', (3544, 3580), True, 'import torch.nn as nn\n'), ((3594, 3613), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3608, 3613), True, 'import torch.nn as nn\n'), ((3627, 3636), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3634, 3636), True, 'import torch.nn as nn\n'), ((3687, 3702), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3699, 3702), True, 'import torch.nn as nn\n'), ((3716, 3731), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3728, 3731), True, 'import torch.nn as nn\n'), ((8922, 8948), 'torch.autograd.Variable', 'Variable', (["sample['labels']"], {}), "(sample['labels'])\n", (8930, 8948), False, 'from torch.autograd import Variable\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.