code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import unittest
import numpy as np
from rastervision.core.raster_transformer import RasterTransformer
from rastervision.core.raster_stats import RasterStats
class TestRasterTransformer(unittest.TestCase):
def test_no_channel_order_no_stats(self):
transformer = RasterTransformer()
chip = np.ones((2, 2, 3)).astype(np.uint8)
out_chip = transformer.transform(chip)
np.testing.assert_equal(chip, out_chip)
# Need to supply raster_stats for non-uint8 chips.
chip = np.ones((2, 2, 3))
with self.assertRaises(ValueError):
out_chip = transformer.transform(chip)
def test_no_channel_order_has_stats(self):
raster_stats = RasterStats()
raster_stats.means = np.ones((4, ))
raster_stats.stds = np.ones((4, )) * 2
# All values have z-score of 1, which translates to
# uint8 value of 170.
transformer = RasterTransformer(raster_stats=raster_stats)
chip = np.ones((2, 2, 4)) * 3
out_chip = transformer.transform(chip)
expected_out_chip = np.ones((2, 2, 4)) * 170
np.testing.assert_equal(out_chip, expected_out_chip)
def test_has_channel_order_no_stats(self):
channel_order = [0, 1, 2]
transformer = RasterTransformer(channel_order=channel_order)
chip = np.ones((2, 2, 4)).astype(np.uint8)
chip[:, :, :] *= np.array([0, 1, 2, 3]).astype(np.uint8)
out_chip = transformer.transform(chip)
expected_out_chip = np.ones((2, 2, 3)).astype(np.uint8)
expected_out_chip[:, :, :] *= np.array([0, 1, 2]).astype(np.uint8)
np.testing.assert_equal(out_chip, expected_out_chip)
def test_has_channel_order_has_stats(self):
raster_stats = RasterStats()
raster_stats.means = np.ones((4, ))
raster_stats.stds = np.ones((4, )) * 2
channel_order = [0, 1, 2]
transformer = RasterTransformer(
raster_stats=raster_stats, channel_order=channel_order)
chip = np.ones((2, 2, 4)) * [3, 3, 3, 0]
out_chip = transformer.transform(chip)
expected_out_chip = np.ones((2, 2, 3)) * 170
np.testing.assert_equal(out_chip, expected_out_chip)
# Also test when chip has same number of channels as channel_order
# but different number of channels than stats.
chip = np.ones((2, 2, 3)) * [3, 3, 3]
out_chip = transformer.transform(chip)
expected_out_chip = np.ones((2, 2, 3)) * 170
np.testing.assert_equal(out_chip, expected_out_chip)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"rastervision.core.raster_transformer.RasterTransformer",
"numpy.ones",
"numpy.array",
"numpy.testing.assert_equal",
"rastervision.core.raster_stats.RasterStats"
] | [((2581, 2596), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2594, 2596), False, 'import unittest\n'), ((277, 296), 'rastervision.core.raster_transformer.RasterTransformer', 'RasterTransformer', ([], {}), '()\n', (294, 296), False, 'from rastervision.core.raster_transformer import RasterTransformer\n'), ((403, 442), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['chip', 'out_chip'], {}), '(chip, out_chip)\n', (426, 442), True, 'import numpy as np\n'), ((518, 536), 'numpy.ones', 'np.ones', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (525, 536), True, 'import numpy as np\n'), ((703, 716), 'rastervision.core.raster_stats.RasterStats', 'RasterStats', ([], {}), '()\n', (714, 716), False, 'from rastervision.core.raster_stats import RasterStats\n'), ((746, 759), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (753, 759), True, 'import numpy as np\n'), ((921, 965), 'rastervision.core.raster_transformer.RasterTransformer', 'RasterTransformer', ([], {'raster_stats': 'raster_stats'}), '(raster_stats=raster_stats)\n', (938, 965), False, 'from rastervision.core.raster_transformer import RasterTransformer\n'), ((1112, 1164), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out_chip', 'expected_out_chip'], {}), '(out_chip, expected_out_chip)\n', (1135, 1164), True, 'import numpy as np\n'), ((1269, 1315), 'rastervision.core.raster_transformer.RasterTransformer', 'RasterTransformer', ([], {'channel_order': 'channel_order'}), '(channel_order=channel_order)\n', (1286, 1315), False, 'from rastervision.core.raster_transformer import RasterTransformer\n'), ((1626, 1678), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out_chip', 'expected_out_chip'], {}), '(out_chip, expected_out_chip)\n', (1649, 1678), True, 'import numpy as np\n'), ((1751, 1764), 'rastervision.core.raster_stats.RasterStats', 'RasterStats', ([], {}), '()\n', (1762, 1764), False, 'from rastervision.core.raster_stats import RasterStats\n'), ((1794, 1807), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (1801, 1807), True, 'import numpy as np\n'), ((1912, 1985), 'rastervision.core.raster_transformer.RasterTransformer', 'RasterTransformer', ([], {'raster_stats': 'raster_stats', 'channel_order': 'channel_order'}), '(raster_stats=raster_stats, channel_order=channel_order)\n', (1929, 1985), False, 'from rastervision.core.raster_transformer import RasterTransformer\n'), ((2157, 2209), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out_chip', 'expected_out_chip'], {}), '(out_chip, expected_out_chip)\n', (2180, 2209), True, 'import numpy as np\n'), ((2495, 2547), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out_chip', 'expected_out_chip'], {}), '(out_chip, expected_out_chip)\n', (2518, 2547), True, 'import numpy as np\n'), ((789, 802), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (796, 802), True, 'import numpy as np\n'), ((981, 999), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {}), '((2, 2, 4))\n', (988, 999), True, 'import numpy as np\n'), ((1079, 1097), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {}), '((2, 2, 4))\n', (1086, 1097), True, 'import numpy as np\n'), ((1837, 1850), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (1844, 1850), True, 'import numpy as np\n'), ((2015, 2033), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {}), '((2, 2, 4))\n', (2022, 2033), True, 'import numpy as np\n'), ((2124, 2142), 'numpy.ones', 'np.ones', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (2131, 2142), True, 'import numpy as np\n'), ((2356, 2374), 'numpy.ones', 'np.ones', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (2363, 2374), True, 'import numpy as np\n'), ((2462, 2480), 'numpy.ones', 'np.ones', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (2469, 2480), True, 'import numpy as np\n'), ((312, 330), 'numpy.ones', 'np.ones', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (319, 330), True, 'import numpy as np\n'), ((1331, 1349), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {}), '((2, 2, 4))\n', (1338, 1349), True, 'import numpy as np\n'), ((1392, 1414), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (1400, 1414), True, 'import numpy as np\n'), ((1507, 1525), 'numpy.ones', 'np.ones', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (1514, 1525), True, 'import numpy as np\n'), ((1581, 1600), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1589, 1600), True, 'import numpy as np\n')] |
# Regresion polinomial
import matplotlib.pyplot as plt
import numpy as np
def createMatrix(m,n, valor=0):
C = []
for i in range(m):
C.append([]) #could be c.append([valor]*n)
for j in range(n):
C[i].append(valor)
return C
def getDimensions(A):
return (len(A),len(A[0]))
def copyMatrix(B):
m,n = getDimensions(B)
A = createMatrix(m,n)
for i in range(m):
for j in range(n):
A[i][j]=B[i][j]
return A
def sumMatrix(A,B):
Am, An = getDimensions(A)
Bm, Bn = getDimensions(B)
if (Am != Bm or An != Bn):
print("Error, matrix of diferent size")
return []
C = createMatrix(Am,An)
for i in range(Am):
for j in range(An):
C[i][j] = A[i][j] + B[i][j]
return C
def restaMatrix(A,B):
Am, An = getDimensions(A)
Bm, Bn = getDimensions(B)
if (Am != Bm or An != Bn):
print("Error, matrix of diferent size")
return []
C = createMatrix(Am,An)
for i in range(Am):
for j in range(An):
C[i][j] = A[i][j] - B[i][j]
return C
def multMatrix(A,B):
Am, An = getDimensions(A)
Bm, Bn = getDimensions(B)
if (An != Bm):
print("Error multiplicacion # columnas y # renglos no son iguales")
return []
C = createMatrix(Am,Bn)
counter = 0
for i in range(Am):
for j in range(Bn):
for k in range(An):
C[i][j] += A[i][k] * B[k][j]
return C
def getAdyacente(A,r,c):
Am, An = getDimensions(A)
C = createMatrix(Am-1, An-1, 0)
for i in range(Am):
if (i == r):
continue
for j in range(An):
if (j == c):
continue
ci = 0
cj = 0
if (i < r):
ci = i
else:
ci = i-1
if (j < c):
cj = j
else:
cj = j-1
C[ci][cj] = A[i][j]
return C
def detMatrix(A):
m,n = getDimensions(A)
if m!=n:
print("Matriz no es cuadrada")
return []
if (n==1):
return A[0][0]
if (n==2):
return (A[0][0]*A[1][1]) - (A[1][0]*A[0][1])
det = 0
for j in range(m):
det += (-1)**j*A[0][j]*detMatrix(getAdyacente(A,0,j))
return det
def getMatrizTranspuesta(A):
m,n = getDimensions(A)
C = createMatrix(n,m,0)
for i in range(m):
for j in range(n):
C[j][i] = A[i][j]
return C
def getMatrizAdjunta(A):
m,n = getDimensions(A)
if m != n:
print("La matriz no es cuadrada")
return []
C = createMatrix(m,n,0)
for i in range(m):
for j in range(n):
C[i][j] = ((-1)**(i+j))*detMatrix(getAdyacente(A,i,j))
return C
def getMatrizInversa(A):
m,n = getDimensions(A)
if m != n:
print("La matriz no es cuadrada")
return []
detA = detMatrix(A)
if detA== 0:
print("La matriz no tiene inversa")
return []
At = getMatrizTranspuesta(A)
adjA = getMatrizAdjunta(At)
invDetA = 1/detA
C = createMatrix(m,n,0)
for i in range(m):
for j in range(n):
C[i][j]=invDetA*adjA[i][j]
return C
def regPolinomial(grado, x, y):
grado += 1
A = createMatrix(grado,grado)
for i in range(grado):
for j in range(grado):
A[i][j] = sum( xi**(i+j) for xi in x)
C = createMatrix(grado,1)
for i in range(grado):
C[i][0] = sum((xi**i)*yi for xi,yi in zip(x,y))
invA = getMatrizInversa(A)
return multMatrix(invA,C)
def evalPolinomio(coef,x):
y = []
coef = np.asarray(coef)
for i in range(len(x)):
y.append(0)
for c in range(len(coef)):
y[i] += (x[i]**c) * coef[c]
return y
x = [8, 16, 24, 32] #gradoMAX X-1
y = [4.1, 4.5, 5.1, 6.1]
plt.plot(x,y,'rx')
#plt.show()
coef = regPolinomial(3, x, y) #aqui se cambia para el grado de la funcion a buscar
print(coef)
x2 = np.linspace(5,35,100)
y2 = evalPolinomio(coef,x2)
plt.plot(x2,y2)
plt.show()
yesp = evalPolinomio(coef,[7])
print("peso esperado 7 semanas --> ", yesp[0][0])
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
# Regresion polinomial
import matplotlib.pyplot as plt
import numpy as np
def createMatrix(m,n, valor=0):
C = []
for i in range(m):
C.append([]) #could be c.append([valor]*n)
for j in range(n):
C[i].append(valor)
return C
def getDimensions(A):
return (len(A),len(A[0]))
def copyMatrix(B):
m,n = getDimensions(B)
A = createMatrix(m,n)
for i in range(m):
for j in range(n):
A[i][j]=B[i][j]
return A
def sumMatrix(A,B):
Am, An = getDimensions(A)
Bm, Bn = getDimensions(B)
if (Am != Bm or An != Bn):
print("Error, matrix of diferent size")
return []
C = createMatrix(Am,An)
for i in range(Am):
for j in range(An):
C[i][j] = A[i][j] + B[i][j]
return C
def restaMatrix(A,B):
Am, An = getDimensions(A)
Bm, Bn = getDimensions(B)
if (Am != Bm or An != Bn):
print("Error, matrix of diferent size")
return []
C = createMatrix(Am,An)
for i in range(Am):
for j in range(An):
C[i][j] = A[i][j] - B[i][j]
return C
def multMatrix(A,B):
Am, An = getDimensions(A)
Bm, Bn = getDimensions(B)
if (An != Bm):
print("Error multiplicacion # columnas y # renglos no son iguales")
return []
C = createMatrix(Am,Bn)
counter = 0
for i in range(Am):
for j in range(Bn):
for k in range(An):
C[i][j] += A[i][k] * B[k][j]
return C
def getAdyacente(A,r,c):
Am, An = getDimensions(A)
C = createMatrix(Am-1, An-1, 0)
for i in range(Am):
if (i == r):
continue
for j in range(An):
if (j == c):
continue
ci = 0
cj = 0
if (i < r):
ci = i
else:
ci = i-1
if (j < c):
cj = j
else:
cj = j-1
C[ci][cj] = A[i][j]
return C
def detMatrix(A):
m,n = getDimensions(A)
if m!=n:
print("Matriz no es cuadrada")
return []
if (n==1):
return A[0][0]
if (n==2):
return (A[0][0]*A[1][1]) - (A[1][0]*A[0][1])
det = 0
for j in range(m):
det += (-1)**j*A[0][j]*detMatrix(getAdyacente(A,0,j))
return det
def getMatrizTranspuesta(A):
m,n = getDimensions(A)
C = createMatrix(n,m,0)
for i in range(m):
for j in range(n):
C[j][i] = A[i][j]
return C
def getMatrizAdjunta(A):
m,n = getDimensions(A)
if m != n:
print("La matriz no es cuadrada")
return []
C = createMatrix(m,n,0)
for i in range(m):
for j in range(n):
C[i][j] = ((-1)**(i+j))*detMatrix(getAdyacente(A,i,j))
return C
def getMatrizInversa(A):
m,n = getDimensions(A)
if m != n:
print("La matriz no es cuadrada")
return []
detA = detMatrix(A)
if detA== 0:
print("La matriz no tiene inversa")
return []
At = getMatrizTranspuesta(A)
adjA = getMatrizAdjunta(At)
invDetA = 1/detA
C = createMatrix(m,n,0)
for i in range(m):
for j in range(n):
C[i][j]=invDetA*adjA[i][j]
return C
def regPolinomial(grado, x, y):
grado += 1
A = createMatrix(grado,grado)
for i in range(grado):
for j in range(grado):
A[i][j] = sum( xi**(i+j) for xi in x)
C = createMatrix(grado,1)
for i in range(grado):
C[i][0] = sum((xi**i)*yi for xi,yi in zip(x,y))
invA = getMatrizInversa(A)
return multMatrix(invA,C)
def evalPolinomio(coef,x):
y = []
coef = np.asarray(coef)
for i in range(len(x)):
y.append(0)
for c in range(len(coef)):
y[i] += (x[i]**c) * coef[c]
return y
x = [1, 2, 3, 4, 5] #gradoMAX X-1
y = [88, 87, 84, 82, 79]
plt.plot(x,y,'rx')
#plt.show()
coef = regPolinomial(2, x, y) #aqui se cambia para el grado de la funcion a buscar
print(coef)
x2 = np.linspace(0,8,100)
y2 = evalPolinomio(coef,x2)
plt.plot(x2,y2)
plt.show()
yesp = evalPolinomio(coef,[7])
print("peso esperado 7 semanas --> ", yesp[0][0])
| [
"numpy.linspace",
"numpy.asarray",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
] | [((3884, 3904), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""rx"""'], {}), "(x, y, 'rx')\n", (3892, 3904), True, 'import matplotlib.pyplot as plt\n'), ((4017, 4040), 'numpy.linspace', 'np.linspace', (['(5)', '(35)', '(100)'], {}), '(5, 35, 100)\n', (4028, 4040), True, 'import numpy as np\n'), ((4067, 4083), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {}), '(x2, y2)\n', (4075, 4083), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4093), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4091, 4093), True, 'import matplotlib.pyplot as plt\n'), ((8415, 8435), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""rx"""'], {}), "(x, y, 'rx')\n", (8423, 8435), True, 'import matplotlib.pyplot as plt\n'), ((8548, 8570), 'numpy.linspace', 'np.linspace', (['(0)', '(8)', '(100)'], {}), '(0, 8, 100)\n', (8559, 8570), True, 'import numpy as np\n'), ((8597, 8613), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {}), '(x2, y2)\n', (8605, 8613), True, 'import matplotlib.pyplot as plt\n'), ((8613, 8623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8621, 8623), True, 'import matplotlib.pyplot as plt\n'), ((3670, 3686), 'numpy.asarray', 'np.asarray', (['coef'], {}), '(coef)\n', (3680, 3686), True, 'import numpy as np\n'), ((8201, 8217), 'numpy.asarray', 'np.asarray', (['coef'], {}), '(coef)\n', (8211, 8217), True, 'import numpy as np\n')] |
"""
The environment for grasp exploration. Defines states, actions, rewards
and steps forward based on the reward and toppling transitions.
"""
import numpy as np
class StablePoseEnv(object):
def __init__(self,
arm_means,
pose_probs,
topple_matrix):
self.arm_means = arm_means
self.num_poses, self.num_arms = self.arm_means.shape
self.pose_probs = pose_probs / pose_probs.sum()
self.topple_matrix = topple_matrix.cumsum(axis=1)
# Draw pose from pose distribution
self.start_pose = np.random.choice(np.arange(self.num_poses),
p=self.pose_probs)
self.pose = self.start_pose
def step(self, arm):
if arm < self.num_arms:
reward = np.random.random() < self.arm_means[self.pose, arm]
else:
raise IndexError("Arm idx out of bounds")
# Update pose if reward is 1 or we topple
if reward:
self.pose = np.random.choice(np.arange(self.num_poses),
p=self.pose_probs)
else:
self.pose = (np.random.random() < self.topple_matrix[self.pose]).argmax()
return reward
def reset(self, start_pose=None):
if start_pose is not None:
self.pose = self.start_pose
else:
self.pose = np.random.choice(np.arange(self.num_poses),
p=self.pose_probs)
| [
"numpy.random.random",
"numpy.arange"
] | [((606, 631), 'numpy.arange', 'np.arange', (['self.num_poses'], {}), '(self.num_poses)\n', (615, 631), True, 'import numpy as np\n'), ((811, 829), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (827, 829), True, 'import numpy as np\n'), ((1042, 1067), 'numpy.arange', 'np.arange', (['self.num_poses'], {}), '(self.num_poses)\n', (1051, 1067), True, 'import numpy as np\n'), ((1429, 1454), 'numpy.arange', 'np.arange', (['self.num_poses'], {}), '(self.num_poses)\n', (1438, 1454), True, 'import numpy as np\n'), ((1168, 1186), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1184, 1186), True, 'import numpy as np\n')] |
import collections
import copy
from logging import getLogger
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from pfrl.agent import AttributeSavingMixin
from pfrl.agent import BatchAgent
from pfrl.utils.batch_states import batch_states
from pfrl.utils.contexts import evaluating
from pfrl.utils.copy_param import synchronize_parameters
from pfrl.replay_buffer import batch_experiences
from pfrl.replay_buffer import ReplayUpdater
def _mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return np.mean(xs) if xs else np.nan
class DDPG(AttributeSavingMixin, BatchAgent):
"""Deep Deterministic Policy Gradients.
This can be used as SVG(0) by specifying a Gaussian policy instead of a
deterministic policy.
Args:
policy (torch.nn.Module): Policy
q_func (torch.nn.Module): Q-function
actor_optimizer (Optimizer): Optimizer setup with the policy
critic_optimizer (Optimizer): Optimizer setup with the Q-function
replay_buffer (ReplayBuffer): Replay buffer
gamma (float): Discount factor
explorer (Explorer): Explorer that specifies an exploration strategy.
gpu (int): GPU device id if not None nor negative.
replay_start_size (int): if the replay buffer's size is less than
replay_start_size, skip update
minibatch_size (int): Minibatch size
update_interval (int): Model update interval in step
target_update_interval (int): Target model update interval in step
phi (callable): Feature extractor applied to observations
target_update_method (str): 'hard' or 'soft'.
soft_update_tau (float): Tau of soft target update.
n_times_update (int): Number of repetition of update
batch_accumulator (str): 'mean' or 'sum'
episodic_update (bool): Use full episodes for update if set True
episodic_update_len (int or None): Subsequences of this length are used
for update if set int and episodic_update=True
logger (Logger): Logger used
batch_states (callable): method which makes a batch of observations.
default is `pfrl.utils.batch_states.batch_states`
burnin_action_func (callable or None): If not None, this callable
object is used to select actions before the model is updated
one or more times during training.
"""
saved_attributes = ("model", "target_model", "actor_optimizer", "critic_optimizer")
def __init__(
self,
policy,
q_func,
actor_optimizer,
critic_optimizer,
replay_buffer,
gamma,
explorer,
gpu=None,
replay_start_size=50000,
minibatch_size=32,
update_interval=1,
target_update_interval=10000,
phi=lambda x: x,
target_update_method="hard",
soft_update_tau=1e-2,
n_times_update=1,
recurrent=False,
episodic_update_len=None,
logger=getLogger(__name__),
batch_states=batch_states,
burnin_action_func=None,
):
self.model = nn.ModuleList([policy, q_func])
if gpu is not None and gpu >= 0:
assert torch.cuda.is_available()
self.device = torch.device("cuda:{}".format(gpu))
self.model.to(self.device)
else:
self.device = torch.device("cpu")
self.replay_buffer = replay_buffer
self.gamma = gamma
self.explorer = explorer
self.gpu = gpu
self.target_update_interval = target_update_interval
self.phi = phi
self.target_update_method = target_update_method
self.soft_update_tau = soft_update_tau
self.logger = logger
self.actor_optimizer = actor_optimizer
self.critic_optimizer = critic_optimizer
self.recurrent = recurrent
assert not self.recurrent, "recurrent=True is not yet implemented"
if self.recurrent:
update_func = self.update_from_episodes
else:
update_func = self.update
self.replay_updater = ReplayUpdater(
replay_buffer=replay_buffer,
update_func=update_func,
batchsize=minibatch_size,
episodic_update=recurrent,
episodic_update_len=episodic_update_len,
n_times_update=n_times_update,
replay_start_size=replay_start_size,
update_interval=update_interval,
)
self.batch_states = batch_states
self.burnin_action_func = burnin_action_func
self.t = 0
self.last_state = None
self.last_action = None
self.target_model = copy.deepcopy(self.model)
self.target_model.eval()
self.q_record = collections.deque(maxlen=1000)
self.actor_loss_record = collections.deque(maxlen=100)
self.critic_loss_record = collections.deque(maxlen=100)
self.n_updates = 0
# Aliases for convenience
self.policy, self.q_function = self.model
self.target_policy, self.target_q_function = self.target_model
self.sync_target_network()
def sync_target_network(self):
"""Synchronize target network with current network."""
synchronize_parameters(
src=self.model,
dst=self.target_model,
method=self.target_update_method,
tau=self.soft_update_tau,
)
# Update Q-function
def compute_critic_loss(self, batch):
"""Compute loss for critic."""
batch_next_state = batch["next_state"]
batch_rewards = batch["reward"]
batch_terminal = batch["is_state_terminal"]
batch_state = batch["state"]
batch_actions = batch["action"]
batchsize = len(batch_rewards)
with torch.no_grad():
assert not self.recurrent
next_actions = self.target_policy(batch_next_state).sample()
next_q = self.target_q_function((batch_next_state, next_actions))
target_q = batch_rewards + self.gamma * (
1.0 - batch_terminal
) * next_q.reshape((batchsize,))
predict_q = self.q_function((batch_state, batch_actions)).reshape((batchsize,))
loss = F.mse_loss(target_q, predict_q)
# Update stats
self.critic_loss_record.append(float(loss.detach().cpu().numpy()))
return loss
def compute_actor_loss(self, batch):
"""Compute loss for actor."""
batch_state = batch["state"]
onpolicy_actions = self.policy(batch_state).rsample()
q = self.q_function((batch_state, onpolicy_actions))
loss = -q.mean()
# Update stats
self.q_record.extend(q.detach().cpu().numpy())
self.actor_loss_record.append(float(loss.detach().cpu().numpy()))
return loss
def update(self, experiences, errors_out=None):
"""Update the model from experiences"""
batch = batch_experiences(experiences, self.device, self.phi, self.gamma)
self.critic_optimizer.zero_grad()
self.compute_critic_loss(batch).backward()
self.critic_optimizer.step()
self.actor_optimizer.zero_grad()
self.compute_actor_loss(batch).backward()
self.actor_optimizer.step()
self.n_updates += 1
def update_from_episodes(self, episodes, errors_out=None):
raise NotImplementedError
# Sort episodes desc by their lengths
sorted_episodes = list(reversed(sorted(episodes, key=len)))
max_epi_len = len(sorted_episodes[0])
# Precompute all the input batches
batches = []
for i in range(max_epi_len):
transitions = []
for ep in sorted_episodes:
if len(ep) <= i:
break
transitions.append([ep[i]])
batch = batch_experiences(
transitions, xp=self.device, phi=self.phi, gamma=self.gamma
)
batches.append(batch)
with self.model.state_reset(), self.target_model.state_reset():
# Since the target model is evaluated one-step ahead,
# its internal states need to be updated
self.target_q_function.update_state(
batches[0]["state"], batches[0]["action"]
)
self.target_policy(batches[0]["state"])
# Update critic through time
critic_loss = 0
for batch in batches:
critic_loss += self.compute_critic_loss(batch)
self.critic_optimizer.update(lambda: critic_loss / max_epi_len)
with self.model.state_reset():
# Update actor through time
actor_loss = 0
for batch in batches:
actor_loss += self.compute_actor_loss(batch)
self.actor_optimizer.update(lambda: actor_loss / max_epi_len)
def batch_act(self, batch_obs):
if self.training:
return self._batch_act_train(batch_obs)
else:
return self._batch_act_eval(batch_obs)
def batch_observe(self, batch_obs, batch_reward, batch_done, batch_reset):
if self.training:
self._batch_observe_train(batch_obs, batch_reward, batch_done, batch_reset)
def _batch_select_greedy_actions(self, batch_obs):
with torch.no_grad(), evaluating(self.policy):
batch_xs = self.batch_states(batch_obs, self.device, self.phi)
batch_action = self.policy(batch_xs).sample()
return batch_action.cpu().numpy()
def _batch_act_eval(self, batch_obs):
assert not self.training
return self._batch_select_greedy_actions(batch_obs)
def _batch_act_train(self, batch_obs):
assert self.training
if self.burnin_action_func is not None and self.n_updates == 0:
batch_action = [self.burnin_action_func() for _ in range(len(batch_obs))]
else:
batch_greedy_action = self._batch_select_greedy_actions(batch_obs)
batch_action = [
self.explorer.select_action(self.t, lambda: batch_greedy_action[i])
for i in range(len(batch_greedy_action))
]
self.batch_last_obs = list(batch_obs)
self.batch_last_action = list(batch_action)
return batch_action
def _batch_observe_train(self, batch_obs, batch_reward, batch_done, batch_reset):
assert self.training
for i in range(len(batch_obs)):
self.t += 1
# Update the target network
if self.t % self.target_update_interval == 0:
self.sync_target_network()
if self.batch_last_obs[i] is not None:
assert self.batch_last_action[i] is not None
# Add a transition to the replay buffer
self.replay_buffer.append(
state=self.batch_last_obs[i],
action=self.batch_last_action[i],
reward=batch_reward[i],
next_state=batch_obs[i],
next_action=None,
is_state_terminal=batch_done[i],
env_id=i,
)
if batch_reset[i] or batch_done[i]:
self.batch_last_obs[i] = None
self.batch_last_action[i] = None
self.replay_buffer.stop_current_episode(env_id=i)
self.replay_updater.update_if_necessary(self.t)
def get_statistics(self):
return [
("average_q", _mean_or_nan(self.q_record)),
("average_actor_loss", _mean_or_nan(self.actor_loss_record)),
("average_critic_loss", _mean_or_nan(self.critic_loss_record)),
("n_updates", self.n_updates),
]
| [
"copy.deepcopy",
"pfrl.utils.copy_param.synchronize_parameters",
"torch.nn.ModuleList",
"collections.deque",
"torch.nn.functional.mse_loss",
"pfrl.replay_buffer.batch_experiences",
"pfrl.utils.contexts.evaluating",
"pfrl.replay_buffer.ReplayUpdater",
"numpy.mean",
"torch.cuda.is_available",
"tor... | [((584, 595), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (591, 595), True, 'import numpy as np\n'), ((3051, 3070), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (3060, 3070), False, 'from logging import getLogger\n'), ((3169, 3200), 'torch.nn.ModuleList', 'nn.ModuleList', (['[policy, q_func]'], {}), '([policy, q_func])\n', (3182, 3200), False, 'from torch import nn\n'), ((4159, 4433), 'pfrl.replay_buffer.ReplayUpdater', 'ReplayUpdater', ([], {'replay_buffer': 'replay_buffer', 'update_func': 'update_func', 'batchsize': 'minibatch_size', 'episodic_update': 'recurrent', 'episodic_update_len': 'episodic_update_len', 'n_times_update': 'n_times_update', 'replay_start_size': 'replay_start_size', 'update_interval': 'update_interval'}), '(replay_buffer=replay_buffer, update_func=update_func,\n batchsize=minibatch_size, episodic_update=recurrent,\n episodic_update_len=episodic_update_len, n_times_update=n_times_update,\n replay_start_size=replay_start_size, update_interval=update_interval)\n', (4172, 4433), False, 'from pfrl.replay_buffer import ReplayUpdater\n'), ((4734, 4759), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (4747, 4759), False, 'import copy\n'), ((4817, 4847), 'collections.deque', 'collections.deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (4834, 4847), False, 'import collections\n'), ((4881, 4910), 'collections.deque', 'collections.deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (4898, 4910), False, 'import collections\n'), ((4945, 4974), 'collections.deque', 'collections.deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (4962, 4974), False, 'import collections\n'), ((5301, 5427), 'pfrl.utils.copy_param.synchronize_parameters', 'synchronize_parameters', ([], {'src': 'self.model', 'dst': 'self.target_model', 'method': 'self.target_update_method', 'tau': 'self.soft_update_tau'}), '(src=self.model, dst=self.target_model, method=self.\n target_update_method, tau=self.soft_update_tau)\n', (5323, 5427), False, 'from pfrl.utils.copy_param import synchronize_parameters\n'), ((6305, 6336), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['target_q', 'predict_q'], {}), '(target_q, predict_q)\n', (6315, 6336), True, 'from torch.nn import functional as F\n'), ((7015, 7080), 'pfrl.replay_buffer.batch_experiences', 'batch_experiences', (['experiences', 'self.device', 'self.phi', 'self.gamma'], {}), '(experiences, self.device, self.phi, self.gamma)\n', (7032, 7080), False, 'from pfrl.replay_buffer import batch_experiences\n'), ((3261, 3286), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3284, 3286), False, 'import torch\n'), ((3428, 3447), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3440, 3447), False, 'import torch\n'), ((5858, 5873), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5871, 5873), False, 'import torch\n'), ((7921, 7999), 'pfrl.replay_buffer.batch_experiences', 'batch_experiences', (['transitions'], {'xp': 'self.device', 'phi': 'self.phi', 'gamma': 'self.gamma'}), '(transitions, xp=self.device, phi=self.phi, gamma=self.gamma)\n', (7938, 7999), False, 'from pfrl.replay_buffer import batch_experiences\n'), ((9393, 9408), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9406, 9408), False, 'import torch\n'), ((9410, 9433), 'pfrl.utils.contexts.evaluating', 'evaluating', (['self.policy'], {}), '(self.policy)\n', (9420, 9433), False, 'from pfrl.utils.contexts import evaluating\n')] |
""" Module for calculations related to FRB experiments
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from pkg_resources import resource_filename
from astropy import units as u
from frb import utils
class Experiment(object):
"""
"""
def __init__(self, name):
"""
Parameters
----------
name : str
See YAML files in data/experiment
"""
self.name = name
#
self.setup()
def setup(self):
""" Load the characteristics of the experiment
"""
self.data_file=resource_filename('frb', 'data/experiments/{:s}.yaml'.format(
self.name.lower()))
self.data = utils.loadyaml(self.data_file)
def signal_to_noise(self, frb, beta=1., T_Sky=None, t_scatt=None):
"""
Follows Cordes & McLaughlin 2003
Parameters
----------
frb : FRB
beta : float, optional
Factor for digitization losses
t_scatt : Quantity, optional
Scattering time
Returns
-------
s2n : float
"""
# TODO -- Add t_samp to experiment data
t_samp = 0 * u.s
# t_scatt
if t_scatt is None:
try:
t_scatt = frb.t_scatt
except AttributeError:
t_scatt = 0.*u.s
# t_chan (Lorimer & Kramer 2005)
t_chan = 8.3e-6*u.s * (self.data['Dnu'].to('MHz').value/self.data['Channels']) * (
frb.nu_c.to('GHz').value)**(-3) * frb.DM.to('pc/cm**3').value
# Wb
Wb = np.sqrt(frb.Wi**2 + t_chan**2 + t_samp**2 + t_scatt**2)
# T_Sky
if T_Sky is None:
T_Sky = utils.Tsky(frb.nu_c)
#
sqrt_term = np.sqrt(Wb/(self.data['np']*self.data['Dnu']))
# Here we go
s2n = frb.S * self.data['G'] * frb.Wi / beta / (
self.data['Trec'] + T_Sky) / sqrt_term
# Return
return s2n.decompose()
def __repr__(self):
txt = '<{:s}: name={:s} data={}'.format(
self.__class__.__name__, self.name, self.data)
# Finish
txt = txt + '>'
return (txt)
| [
"frb.utils.Tsky",
"numpy.sqrt",
"frb.utils.loadyaml"
] | [((743, 773), 'frb.utils.loadyaml', 'utils.loadyaml', (['self.data_file'], {}), '(self.data_file)\n', (757, 773), False, 'from frb import utils\n'), ((1633, 1696), 'numpy.sqrt', 'np.sqrt', (['(frb.Wi ** 2 + t_chan ** 2 + t_samp ** 2 + t_scatt ** 2)'], {}), '(frb.Wi ** 2 + t_chan ** 2 + t_samp ** 2 + t_scatt ** 2)\n', (1640, 1696), True, 'import numpy as np\n'), ((1802, 1852), 'numpy.sqrt', 'np.sqrt', (["(Wb / (self.data['np'] * self.data['Dnu']))"], {}), "(Wb / (self.data['np'] * self.data['Dnu']))\n", (1809, 1852), True, 'import numpy as np\n'), ((1751, 1771), 'frb.utils.Tsky', 'utils.Tsky', (['frb.nu_c'], {}), '(frb.nu_c)\n', (1761, 1771), False, 'from frb import utils\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.optimize import curve_fit
from pylab import cm as plcm # colors for plotting
import pickle # for storing calculations and reading again while editing plot details
import math
import random
from scipy.stats import sem
plt.rcParams.update({'font.size': 14})
# ------------------------- Set global values
n = 10
tolerance = 0.000001
Nlist = [10, 50, 100, 500, 1000, 2500, 5000, 7500, 10000]
x = 5
y = 6
V = 0
# ------------------------- Define functions
def initiateVMatrices():
"""Initiates potential matrixes with first boundary value: V = 10 on
two opposing sides and 5 on the other two sides.
- v has boundary values and initial guess of 9 everywhere else
- vNew is a copy of v
- vExact is the exact analytical solution, 10 everywhere"""
global v, vNew
# Initialize the grid to 0
v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column
# Set the boundary conditions
for i in range(1,n):
v[0,i] = 10
v[n,i] = 10
v[i,0] = 5
v[i,n] = 5
# Initial guess
for i in range(1,n):
for j in range(1,n):
v[i,j] = 7.5
vNew = np.copy(v)
return vNew
def relax():
"""One relax iteration. v[i,j] is set as the avarage of its neighbours."""
global v, vNew, n
for x in range(1,n):
for y in range(1,n):
vNew[x,y] = (v[x-1][y] + v[x+1][y] + v[x][y-1] + v[x][y+1])*0.25
for x in range(1,n):
for y in range(1,n):
v[x,y] = vNew[x,y]
def randomwalk(x, y):
global v, V
for i in range(100):
val = random.randint(0, 4)
if val == 0:
x += 1
if val == 1:
y += 1
if val == 2:
x -= 1
if val == 3:
y -= 1
if v[x,y] == 5 or v[x,y] == 10:
V += v[x,y]
break
else:
V += 0
def calculate1():
"""Main calculation function that first initalizes with initiateVMatrixes()
and then uses relax() until v is within tolerance.
1. First boundary coditions
2. Second boundary conditions"""
global v, vNew, n, v1
# First bondary conditions
step = 0
toleranceAcqurired = False
while not toleranceAcqurired:
step+=1
vOld = np.copy(v)
relax()
# Controll accuracy
toleranceAcqurired = True # run through v and set false if not acquired
for i in range(1,n):
for j in range(1,n):
if np.abs( (v[i,j]-vOld[i,j])/vOld[i,j] ) > tolerance:
toleranceAcqurired = False
print('Tolerance was met after', step, 'steps.')
v1 = np.copy(v)
# ----------------------- Plot
def calculate2():
def walk(N):
global V, v
for i in range(N):
randomwalk(x,y)
V1 = V
V = 0
return V1/N
Vlist = []
semlist = []
errorlist = []
exactlist = [v[x,y]]*len(Nlist)
convergencelist = [walk(Nlist[-1])]*len(Nlist)
for N in Nlist:
Vlist.append(walk(N))
sample = []
for _ in range(10):
sample.append(walk(N))
errorlist.append(sem(sample))
plt.figure()
plt.title('Convergence for (x,y) = ('
+ str(x) + ', ' + str(y) + ')',
fontsize = 18)
plt.plot(Nlist, Vlist, marker = '.',
color = 'm', markersize = 10)
plt.plot(Nlist, exactlist, linestyle = '--',
color = 'r', markersize = 10)
plt.plot(Nlist, convergencelist, linestyle = '--',
color = 'b', markersize = 10)
plt.legend(['Random-walk solution',
'Relaxation solution: V ≈ ' + str(round(v[x,y],1)),
'Convergence: V ≈ ' + str(round(walk(Nlist[-1]),1))])
plt.xlabel('Walkers (n)', fontsize = 18)
plt.ylabel('Potential (V)', fontsize = 18)
plt.show()
plt.figure()
plt.title('Standard error of the mean for (x,y) = ('
+ str(x) + ', ' + str(y) + ')',
fontsize = 18)
plt.plot(Nlist, errorlist, marker = '.',
color = 'tab:green', markersize = 10)
plt.xlabel('Walkers (n)', fontsize = 18)
plt.ylabel('SEM', fontsize = 18)
plt.show()
initiateVMatrices()
calculate1()
calculate2()
print(v[x,y])
| [
"matplotlib.pyplot.show",
"random.randint",
"matplotlib.pyplot.plot",
"numpy.copy",
"numpy.abs",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"scipy.stats.sem",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((349, 387), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (368, 387), True, 'import matplotlib.pyplot as plt\n'), ((954, 978), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (962, 978), True, 'import numpy as np\n'), ((1271, 1281), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (1278, 1281), True, 'import numpy as np\n'), ((2799, 2809), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (2806, 2809), True, 'import numpy as np\n'), ((3337, 3349), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3347, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3472, 3532), 'matplotlib.pyplot.plot', 'plt.plot', (['Nlist', 'Vlist'], {'marker': '"""."""', 'color': '"""m"""', 'markersize': '(10)'}), "(Nlist, Vlist, marker='.', color='m', markersize=10)\n", (3480, 3532), True, 'import matplotlib.pyplot as plt\n'), ((3556, 3624), 'matplotlib.pyplot.plot', 'plt.plot', (['Nlist', 'exactlist'], {'linestyle': '"""--"""', 'color': '"""r"""', 'markersize': '(10)'}), "(Nlist, exactlist, linestyle='--', color='r', markersize=10)\n", (3564, 3624), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3723), 'matplotlib.pyplot.plot', 'plt.plot', (['Nlist', 'convergencelist'], {'linestyle': '"""--"""', 'color': '"""b"""', 'markersize': '(10)'}), "(Nlist, convergencelist, linestyle='--', color='b', markersize=10)\n", (3657, 3723), True, 'import matplotlib.pyplot as plt\n'), ((3927, 3965), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Walkers (n)"""'], {'fontsize': '(18)'}), "('Walkers (n)', fontsize=18)\n", (3937, 3965), True, 'import matplotlib.pyplot as plt\n'), ((3972, 4012), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Potential (V)"""'], {'fontsize': '(18)'}), "('Potential (V)', fontsize=18)\n", (3982, 4012), True, 'import matplotlib.pyplot as plt\n'), ((4019, 4029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4027, 4029), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4047), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4045, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4185, 4257), 'matplotlib.pyplot.plot', 'plt.plot', (['Nlist', 'errorlist'], {'marker': '"""."""', 'color': '"""tab:green"""', 'markersize': '(10)'}), "(Nlist, errorlist, marker='.', color='tab:green', markersize=10)\n", (4193, 4257), True, 'import matplotlib.pyplot as plt\n'), ((4281, 4319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Walkers (n)"""'], {'fontsize': '(18)'}), "('Walkers (n)', fontsize=18)\n", (4291, 4319), True, 'import matplotlib.pyplot as plt\n'), ((4326, 4356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SEM"""'], {'fontsize': '(18)'}), "('SEM', fontsize=18)\n", (4336, 4356), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4371, 4373), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1727), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1721, 1727), False, 'import random\n'), ((2420, 2430), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (2427, 2430), True, 'import numpy as np\n'), ((3310, 3321), 'scipy.stats.sem', 'sem', (['sample'], {}), '(sample)\n', (3313, 3321), False, 'from scipy.stats import sem\n'), ((2638, 2681), 'numpy.abs', 'np.abs', (['((v[i, j] - vOld[i, j]) / vOld[i, j])'], {}), '((v[i, j] - vOld[i, j]) / vOld[i, j])\n', (2644, 2681), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interpolate
from scipy.optimize import curve_fit
import sys
class AsymmetricData:
def __init__(self, mu=10.0, sigma_n=1.0, sigma_p=1.0, N=10000, confidence=1.0, creation_type='by_constructor', data=[]):
"""
:param mu: Mode of the distribution, the most probable value
:param sigma_n: Pegative sigma
:param sigma_p: Positive sigma
:param N: Sample size
"""
self.mu = mu
self.confidence = confidence # sigma
self.sigma_n, self.sigma_p = sigma_n, sigma_p
self.sigma2_n, self.sigma2_p = None, None
self.sigma3_n, self.sigma3_p = None, None
self.N = int(N)
self.creation_type = creation_type
if not any(data):
self.data = np.asarray([])
else:
self.data = np.asarray(data)
#self.creation_type = 'by_operation'
self.bin_value = 50
if str(self.creation_type) == 'by_constructor':
if confidence == 1.0:
self.sigma_n, self.sigma_p = sigma_n, sigma_p
self.sigma2_n, self.sigma2_p = self.convert_from_1_sigma(self.mu, sigma_n, sigma_p, 2.0)
self.sigma3_n, self.sigma3_p = self.convert_from_1_sigma(self.mu, sigma_n, sigma_p, 3.0)
elif confidence == 2.0:
self.sigma_n, self.sigma_p = self.convert_to_1_sigma(self.mu, sigma_n, sigma_p, 2.0)
self.sigma2_n, self.sigma2_p = sigma_n, sigma_p
self.sigma3_n, self.sigma3_p = self.convert_from_1_sigma(self.mu, self.sigma_n, self.sigma_p, 3.0)
elif confidence == 3.0:
self.sigma_n, self.sigma_p = self.convert_to_1_sigma(self.mu, sigma_n, sigma_p, 3.0)
self.sigma2_n, self.sigma2_p = self.convert_from_1_sigma(self.mu, self.sigma_n, self.sigma_p, 2.0)
self.sigma3_n, self.sigma3_p = sigma_n, sigma_p
else:
raise ValueError
self.x_limits = [self.mu - 5.0*self.sigma_n, self.mu + 5.0*self.sigma_p]
self.x_values = np.linspace(self.x_limits[0], self.x_limits[1], self.N)
self.norm = 1.0
self.norm = self.calculate_norm()
self.pdf_values = np.asarray(self.pdf(self.x_values))
self.cdf_values = self.calculate_cdf_values()
self.cdf = self.calculate_cdf()
self.inverse_cdf = self.calculate_inverse_cdf()
self.log_likelihood_values = self.log_likelihood(self.x_values)
self.generate()
elif str(self.creation_type) == 'by_operation':
self.N = self.data.size
self.fit()
#self.sigma_n, self.sigma_p = self.estimate()
self.sigma2_n, self.sigma2_p = self.convert_from_1_sigma(self.mu, self.sigma_n, self.sigma_p, 2.0)
self.sigma3_n, self.sigma3_p = self.convert_from_1_sigma(self.mu, self.sigma_n, self.sigma_p, 3.0)
self.x_limits = [self.mu - 5.0*self.sigma_n, self.mu + 5.0*self.sigma_p]
self.x_values = np.linspace(self.x_limits[0], self.x_limits[1], self.N)
self.norm = 1.0
self.norm = self.calculate_norm()
self.pdf_values = np.asarray(self.pdf(self.x_values))
self.cdf_values = self.calculate_cdf_values()
self.cdf = self.calculate_cdf()
self.inverse_cdf = self.calculate_inverse_cdf()
def __str__(self):
output = f"Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)"
output2 = f"Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)"
output3 = f"Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)"
result = "{}\n{}\n{}".format(output, output2, output3)
return result
@classmethod
def new(cls, mu=10.0, sigma_n=1.0, sigma_p=1.0, N=10000):
return cls(mu, sigma_n, sigma_p, N)
def integrate(self):
delta_x = self.x_limits[1] - self.x_limits[0]
c = delta_x / (self.N - 1)
# x_values = np.linspace(self.x_limits[0], self.x_limits[1], self.N, dtype=float)
area = np.sum(c * self.pdf(self.x_values))
return area
def calculate_norm(self):
area = self.integrate()
norm = 1/area
return norm
def pdf(self, x):
par_1 = (2.0 * self.sigma_p * self.sigma_n) / (self.sigma_p + self.sigma_n)
par_2 = (self.sigma_p - self.sigma_n) / (self.sigma_p + self.sigma_n)
par_3 = (-1.0/2.0) * ((self.mu - x)/(par_1 + par_2*(x - self.mu)))**2.0
par_4 = self.norm / (2.0 * np.pi)**0.5
value = par_4 * np.exp(par_3)
return value
def log_likelihood(self, x):
par_1 = (2.0 * self.sigma_p * self.sigma_n) / (self.sigma_p + self.sigma_n)
par_2 = (self.sigma_p - self.sigma_n) / (self.sigma_p + self.sigma_n)
value = (-1.0/2.0) * ((self.mu - x)/(par_1 + par_2*(x - self.mu)))**2.0
return value
def calculate_cdf_values(self):
delta_x = self.x_limits[1] - self.x_limits[0]
c = delta_x / (self.N - 1)
area = 0.0
cdf_values = np.asarray([])
for i in range(self.N):
area += self.pdf_values[i] * c
cdf_values = np.append(cdf_values, area)
return cdf_values
def calculate_cdf(self):
cdf = interpolate.interp1d(self.x_values, self.cdf_values, kind='nearest')
return cdf
def calculate_inverse_cdf(self):
inverse_cdf = interpolate.interp1d(self.cdf_values, self.x_values, kind='nearest')
return inverse_cdf
def generate(self):
rnd_prob = np.random.uniform(0, 1, self.N)
self.data = self.inverse_cdf(rnd_prob)
@staticmethod
def fit_func(x, norm, mu, sigma_n, sigma_p):
par_1 = (2.0 * sigma_p * sigma_n) / (sigma_p + sigma_n)
par_2 = (sigma_p - sigma_n) / (sigma_p + sigma_n)
par_3 = (-1.0 / 2.0) * ((mu - x) / (par_1 + par_2 * (x - mu))) ** 2.0
par_4 = norm / (2.0 * np.pi) ** 0.5
value = par_4 * np.exp(par_3)
return value
def fit(self, expected_values=None):
y, x, _ = plt.hist(self.data, bins=int(self.N/250))
plt.clf()
x = (x[1:] + x[:-1]) / 2 # for len(x)==len(y)
mod = None
max_y = max(y)
for i in range(len(y)):
if y[i] == max_y:
mod = x[i]
#print("mod", mod)
#print(len(self.data))
min_data = min(self.data)
max_data = max(self.data)
norm = 1000.0
if not expected_values:
expected_values = norm, mod, (mod - min_data) * 0.1, (max_data - mod) * 0.1
expected = (expected_values[0], expected_values[1], expected_values[2], expected_values[3])
params, cov = curve_fit(self.fit_func, x, y, expected, method='trf')
self.norm = params[0]
self.mu = params[1]
#print("params", params)
if params[2] > 0.0:
self.sigma_n = (params[2])
self.sigma_p = (params[3])
else:
self.sigma_n = (params[3])
self.sigma_p = (params[2])
def estimate(self, confidence=1.0):
target_likelihood = -0.5 * float(confidence)
delta_steps = 1e-5
current_value = self.mu
delta = abs(self.mu - self.sigma_p) * delta_steps
current_likelihood = self.log_likelihood(current_value)
while abs(current_likelihood) < abs(target_likelihood):
current_value += delta
current_likelihood = self.log_likelihood(current_value)
positive_limit = current_value
current_value = self.mu
delta = abs(self.mu - self.sigma_n) * delta_steps
current_likelihood = self.log_likelihood(current_value)
while abs(current_likelihood) < abs(target_likelihood):
current_value -= delta
current_likelihood = self.log_likelihood(current_value)
negative_limit = current_value
print("interval found")
return [self.mu - negative_limit, positive_limit - self.mu]
def plot_pdf(self, show=True, save=False):
plt.clf()
plt.plot(self.x_values, self.pdf_values, color="blue")
plt.xlabel("x")
plt.ylabel("prob")
if save:
plt.savefig("plot_pdf.png", dpi=300)
if show:
plt.show()
def plot_log_likelihood(self, show=True, save=False):
plt.clf()
plt.plot(self.x_values, self.log_likelihood(self.x_values))
plt.ylim([-5, 1.5])
plt.xlabel("x")
plt.ylabel("ln L")
plt.axhline(y=-0.5, color="black", ls="--", lw="2.0", label=f"Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)")
plt.axhline(y=-2.0, color="black", ls="--", lw="1.5", label=f"Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)")
plt.axhline(y=-4.5, color="black", ls="--", lw="1.0", label=f"Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)")
plt.legend()
if save:
plt.savefig("plot_log_likelihood.png", dpi=300)
if show:
plt.show()
def plot_cdf(self, show=True, save=False):
plt.plot(self.x_values, self.cdf(self.x_values))
if save:
plt.savefig("plot_cdf.png", dpi=300)
if show:
plt.show()
def plot_data(self, bins=None, show=True, save=False):
if not bins:
bins = self.bin_value
plt.clf()
plt.hist(self.data, bins=bins, density=True, color="green", alpha=0.7)
if save:
plt.savefig("plot_data.png", dpi=300)
if show:
plt.show()
def plot_data_and_pdf(self, bins=None, show=True, save=False):
if not bins:
bins = self.bin_value
plt.clf()
plt.hist(self.data, bins=bins, density=True, color="green", alpha=0.6)
plt.plot(self.x_values, self.pdf_values, color="blue")
plt.xlabel("x")
plt.ylabel("Prob.")
plt.axvline(x=self.mu - self.sigma_n, color="black", ls="--", lw="1.5",
label=f"Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)")
plt.axvline(x=self.mu + self.sigma_p, color="black", ls="--", lw="1.5")
plt.axvline(x=self.mu - self.sigma2_n, color="black", ls="--", lw="1.0",
label=f"Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)")
plt.axvline(x=self.mu + self.sigma2_p, color="black", ls="--", lw="1.0")
plt.axvline(x=self.mu - self.sigma3_n, color="black", ls="--", lw="0.5",
label=f"Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)")
plt.axvline(x=self.mu + self.sigma3_p, color="black", ls="--", lw="0.5")
plt.legend()
if save:
plt.savefig("plot_data_and_pdf.png", dpi=300)
if show:
plt.show()
def plot_pdf_cdf(self, show=True, save=False):
plt.plot(self.x_values, self.cdf_values)
plt.plot(self.x_values, self.pdf_values)
if save:
plt.savefig("plot_pdf_cdf.png", dpi=300)
if show:
plt.show()
def __add__(self, other):
if isinstance(other, self.__class__):
add = self.data + other.data
print(len(add))
elif isinstance(other, (int, float)):
add = self.data + float(other)
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __radd__(self, other):
if isinstance(other, self.__class__):
add = other.data + self.data
elif isinstance(other, (int, float)):
add = float(other) + self.data
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __sub__(self, other):
if isinstance(other, self.__class__):
add = self.data - other.data
elif isinstance(other, (int, float)):
add = self.data - float(other)
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __rsub__(self, other):
if isinstance(other, self.__class__):
add = other.data - self.data
elif isinstance(other, (int, float)):
add = float(other) - self.data
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __mul__(self, other):
if isinstance(other, self.__class__):
add = self.data * other.data
elif isinstance(other, (int, float)):
add = self.data * float(other)
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __rmul__(self, other):
if isinstance(other, self.__class__):
add = other.data * self.data
elif isinstance(other, (int, float)):
add = float(other) * self.data
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __truediv__(self, other):
if isinstance(other, self.__class__):
add = self.data / other.data
elif isinstance(other, (int, float)):
add = self.data / float(other)
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __rtruediv__(self, other):
if isinstance(other, self.__class__):
add = other.data / self.data
elif isinstance(other, (int, float)):
add = float(other) / self.data
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __pow__(self, other):
if isinstance(other, self.__class__):
add = self.data ** other.data
elif isinstance(other, (int, float)):
add = self.data ** float(other)
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
def __rpow__(self, other):
if isinstance(other, self.__class__):
add = other.data ** self.data
elif isinstance(other, (int, float)):
add = float(other) ** self.data
else:
print("Unindentified input type! ({}, {})".format(other, type(other)))
sys.exit()
temp_obj = AsymmetricData(creation_type='by_operation', data=add)
return temp_obj
@staticmethod
def lnL(x, mu, sigma_n, sigma_p):
par_1 = (2.0 * sigma_p * sigma_n) / (sigma_p + sigma_n)
par_2 = (sigma_p - sigma_n) / (sigma_p + sigma_n)
value = (-1.0 / 2.0) * ((mu - x) / (par_1 + par_2 * (x - mu))) ** 2.0
return value
@staticmethod
def residual(params1, mu, n3, p3, confidence):
n1, p1 = params1
if confidence == 1.0:
target_likelihood = -0.5
elif confidence == 2.0:
target_likelihood = -2.0
elif confidence == 3.0:
target_likelihood = -4.5
else:
target_likelihood = -0.5
print("Something went wrong!")
resid = (AsymmetricData.lnL(mu - n3, mu, n1, p1) - target_likelihood) ** 2.0 + (
AsymmetricData.lnL(mu + p3, mu, n1, p1) - target_likelihood) ** 2.0
return resid
@staticmethod
def convert_to_1_sigma(mu, n, p, confidence):
N = 500
n_range = np.linspace(1e-5, n, N)
p_range = np.linspace(1e-5, p, N)
np_matrix = np.zeros([n_range.shape[0], p_range.shape[0]])
for i in range(n_range.shape[0]):
for j in range(p_range.shape[0]):
np_matrix[i, j] = np.log(AsymmetricData.residual([n_range[i], p_range[j]], mu, n, p, confidence))
min_val = np_matrix.min()
index_n, index_p = np.where(np_matrix == min_val)
n_new, p_new = n_range[index_n[0]], p_range[index_p[0]]
#print("")
#print("# Converting to 1 sigma")
#print("# {} (-{},+{}) ({} sigma) -> {} (-{},+{}) ({} sigma)".format(mu, n, p, confidence, mu, n_new, p_new, 1.0))
return [n_new, p_new]
@staticmethod
def convert_from_1_sigma(mu, sigma_n, sigma_p, confidence):
if confidence == 1.0:
target_likelihood = -0.5
elif confidence == 2.0:
target_likelihood = -2.0
elif confidence == 3.0:
target_likelihood = -4.5
else:
target_likelihood = -0.5
delta_steps = 1e-4
current_value = mu
delta = abs(mu - sigma_p) * delta_steps
current_likelihood = AsymmetricData.lnL(current_value, mu, sigma_n, sigma_p)
while abs(current_likelihood) < abs(target_likelihood):
current_value += delta
current_likelihood = AsymmetricData.lnL(current_value, mu, sigma_n, sigma_p)
positive_limit = current_value
current_value = mu
delta = abs(mu - sigma_n) * delta_steps
current_likelihood = AsymmetricData.lnL(current_value, mu, sigma_n, sigma_p)
while abs(current_likelihood) < abs(target_likelihood):
current_value -= delta
current_likelihood = AsymmetricData.lnL(current_value, mu, sigma_n, sigma_p)
negative_limit = current_value
n_new, p_new = mu - negative_limit, positive_limit - mu
#print("")
#print("# Converting from 1 sigma")
#print("# {} (-{},+{}) ({} sigma) -> {} (-{},+{}) ({} sigma)".format(mu, sigma_n, sigma_p, 1.0, mu, n_new, p_new, confidence))
return [n_new, p_new]
| [
"matplotlib.pyplot.clf",
"numpy.exp",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.axvline",
"numpy.append",
"numpy.linspace",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.asarray",
"matplotlib.pyplot.legend",
"scipy.optimize.curve_fit",
"matplo... | [((5218, 5232), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (5228, 5232), True, 'import numpy as np\n'), ((5431, 5499), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.x_values', 'self.cdf_values'], {'kind': '"""nearest"""'}), "(self.x_values, self.cdf_values, kind='nearest')\n", (5451, 5499), True, 'import scipy.interpolate as interpolate\n'), ((5579, 5647), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.cdf_values', 'self.x_values'], {'kind': '"""nearest"""'}), "(self.cdf_values, self.x_values, kind='nearest')\n", (5599, 5647), True, 'import scipy.interpolate as interpolate\n'), ((5719, 5750), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.N'], {}), '(0, 1, self.N)\n', (5736, 5750), True, 'import numpy as np\n'), ((6280, 6289), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6287, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6870, 6924), 'scipy.optimize.curve_fit', 'curve_fit', (['self.fit_func', 'x', 'y', 'expected'], {'method': '"""trf"""'}), "(self.fit_func, x, y, expected, method='trf')\n", (6879, 6924), False, 'from scipy.optimize import curve_fit\n'), ((8215, 8224), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8222, 8224), True, 'import matplotlib.pyplot as plt\n'), ((8233, 8287), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_values', 'self.pdf_values'], {'color': '"""blue"""'}), "(self.x_values, self.pdf_values, color='blue')\n", (8241, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8297, 8312), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8307, 8312), True, 'import matplotlib.pyplot as plt\n'), ((8321, 8339), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""prob"""'], {}), "('prob')\n", (8331, 8339), True, 'import matplotlib.pyplot as plt\n'), ((8515, 8524), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8522, 8524), True, 'import matplotlib.pyplot as plt\n'), ((8601, 8620), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-5, 1.5]'], {}), '([-5, 1.5])\n', (8609, 8620), True, 'import matplotlib.pyplot as plt\n'), ((8630, 8645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8640, 8645), True, 'import matplotlib.pyplot as plt\n'), ((8654, 8672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ln L"""'], {}), "('ln L')\n", (8664, 8672), True, 'import matplotlib.pyplot as plt\n'), ((8682, 8830), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(-0.5)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""2.0"""', 'label': 'f"""Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)"""'}), "(y=-0.5, color='black', ls='--', lw='2.0', label=\n f'Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)'\n )\n", (8693, 8830), True, 'import matplotlib.pyplot as plt\n'), ((8829, 8979), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(-2.0)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""1.5"""', 'label': 'f"""Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)"""'}), "(y=-2.0, color='black', ls='--', lw='1.5', label=\n f'Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)'\n )\n", (8840, 8979), True, 'import matplotlib.pyplot as plt\n'), ((8978, 9128), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(-4.5)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""1.0"""', 'label': 'f"""Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)"""'}), "(y=-4.5, color='black', ls='--', lw='1.0', label=\n f'Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)'\n )\n", (8989, 9128), True, 'import matplotlib.pyplot as plt\n'), ((9128, 9140), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((9597, 9606), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9604, 9606), True, 'import matplotlib.pyplot as plt\n'), ((9615, 9685), 'matplotlib.pyplot.hist', 'plt.hist', (['self.data'], {'bins': 'bins', 'density': '(True)', 'color': '"""green"""', 'alpha': '(0.7)'}), "(self.data, bins=bins, density=True, color='green', alpha=0.7)\n", (9623, 9685), True, 'import matplotlib.pyplot as plt\n'), ((9927, 9936), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9934, 9936), True, 'import matplotlib.pyplot as plt\n'), ((9945, 10015), 'matplotlib.pyplot.hist', 'plt.hist', (['self.data'], {'bins': 'bins', 'density': '(True)', 'color': '"""green"""', 'alpha': '(0.6)'}), "(self.data, bins=bins, density=True, color='green', alpha=0.6)\n", (9953, 10015), True, 'import matplotlib.pyplot as plt\n'), ((10024, 10078), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_values', 'self.pdf_values'], {'color': '"""blue"""'}), "(self.x_values, self.pdf_values, color='blue')\n", (10032, 10078), True, 'import matplotlib.pyplot as plt\n'), ((10088, 10103), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (10098, 10103), True, 'import matplotlib.pyplot as plt\n'), ((10112, 10131), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prob."""'], {}), "('Prob.')\n", (10122, 10131), True, 'import matplotlib.pyplot as plt\n'), ((10141, 10311), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(self.mu - self.sigma_n)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""1.5"""', 'label': 'f"""Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)"""'}), "(x=self.mu - self.sigma_n, color='black', ls='--', lw='1.5',\n label=\n f'Value = {self.mu:.4f} (-{self.sigma_n:.4f}, +{self.sigma_p:.4f}) (1 sigma)'\n )\n", (10152, 10311), True, 'import matplotlib.pyplot as plt\n'), ((10326, 10397), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(self.mu + self.sigma_p)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""1.5"""'}), "(x=self.mu + self.sigma_p, color='black', ls='--', lw='1.5')\n", (10337, 10397), True, 'import matplotlib.pyplot as plt\n'), ((10407, 10580), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(self.mu - self.sigma2_n)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""1.0"""', 'label': 'f"""Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)"""'}), "(x=self.mu - self.sigma2_n, color='black', ls='--', lw='1.0',\n label=\n f'Value = {self.mu:.4f} (-{self.sigma2_n:.4f}, +{self.sigma2_p:.4f}) (2 sigma)'\n )\n", (10418, 10580), True, 'import matplotlib.pyplot as plt\n'), ((10595, 10667), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(self.mu + self.sigma2_p)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""1.0"""'}), "(x=self.mu + self.sigma2_p, color='black', ls='--', lw='1.0')\n", (10606, 10667), True, 'import matplotlib.pyplot as plt\n'), ((10677, 10850), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(self.mu - self.sigma3_n)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""0.5"""', 'label': 'f"""Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)"""'}), "(x=self.mu - self.sigma3_n, color='black', ls='--', lw='0.5',\n label=\n f'Value = {self.mu:.4f} (-{self.sigma3_n:.4f}, +{self.sigma3_p:.4f}) (3 sigma)'\n )\n", (10688, 10850), True, 'import matplotlib.pyplot as plt\n'), ((10865, 10937), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(self.mu + self.sigma3_p)', 'color': '"""black"""', 'ls': '"""--"""', 'lw': '"""0.5"""'}), "(x=self.mu + self.sigma3_p, color='black', ls='--', lw='0.5')\n", (10876, 10937), True, 'import matplotlib.pyplot as plt\n'), ((10947, 10959), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10957, 10959), True, 'import matplotlib.pyplot as plt\n'), ((11137, 11177), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_values', 'self.cdf_values'], {}), '(self.x_values, self.cdf_values)\n', (11145, 11177), True, 'import matplotlib.pyplot as plt\n'), ((11186, 11226), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_values', 'self.pdf_values'], {}), '(self.x_values, self.pdf_values)\n', (11194, 11226), True, 'import matplotlib.pyplot as plt\n'), ((16610, 16634), 'numpy.linspace', 'np.linspace', (['(1e-05)', 'n', 'N'], {}), '(1e-05, n, N)\n', (16621, 16634), True, 'import numpy as np\n'), ((16652, 16676), 'numpy.linspace', 'np.linspace', (['(1e-05)', 'p', 'N'], {}), '(1e-05, p, N)\n', (16663, 16676), True, 'import numpy as np\n'), ((16697, 16743), 'numpy.zeros', 'np.zeros', (['[n_range.shape[0], p_range.shape[0]]'], {}), '([n_range.shape[0], p_range.shape[0]])\n', (16705, 16743), True, 'import numpy as np\n'), ((17009, 17039), 'numpy.where', 'np.where', (['(np_matrix == min_val)'], {}), '(np_matrix == min_val)\n', (17017, 17039), True, 'import numpy as np\n'), ((830, 844), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (840, 844), True, 'import numpy as np\n'), ((883, 899), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (893, 899), True, 'import numpy as np\n'), ((2138, 2193), 'numpy.linspace', 'np.linspace', (['self.x_limits[0]', 'self.x_limits[1]', 'self.N'], {}), '(self.x_limits[0], self.x_limits[1], self.N)\n', (2149, 2193), True, 'import numpy as np\n'), ((4718, 4731), 'numpy.exp', 'np.exp', (['par_3'], {}), '(par_3)\n', (4724, 4731), True, 'import numpy as np\n'), ((5333, 5360), 'numpy.append', 'np.append', (['cdf_values', 'area'], {}), '(cdf_values, area)\n', (5342, 5360), True, 'import numpy as np\n'), ((6134, 6147), 'numpy.exp', 'np.exp', (['par_3'], {}), '(par_3)\n', (6140, 6147), True, 'import numpy as np\n'), ((8370, 8406), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_pdf.png"""'], {'dpi': '(300)'}), "('plot_pdf.png', dpi=300)\n", (8381, 8406), True, 'import matplotlib.pyplot as plt\n'), ((8437, 8447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8445, 8447), True, 'import matplotlib.pyplot as plt\n'), ((9171, 9218), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_log_likelihood.png"""'], {'dpi': '(300)'}), "('plot_log_likelihood.png', dpi=300)\n", (9182, 9218), True, 'import matplotlib.pyplot as plt\n'), ((9249, 9259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9257, 9259), True, 'import matplotlib.pyplot as plt\n'), ((9395, 9431), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_cdf.png"""'], {'dpi': '(300)'}), "('plot_cdf.png', dpi=300)\n", (9406, 9431), True, 'import matplotlib.pyplot as plt\n'), ((9462, 9472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9470, 9472), True, 'import matplotlib.pyplot as plt\n'), ((9716, 9753), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_data.png"""'], {'dpi': '(300)'}), "('plot_data.png', dpi=300)\n", (9727, 9753), True, 'import matplotlib.pyplot as plt\n'), ((9784, 9794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9792, 9794), True, 'import matplotlib.pyplot as plt\n'), ((10990, 11035), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_data_and_pdf.png"""'], {'dpi': '(300)'}), "('plot_data_and_pdf.png', dpi=300)\n", (11001, 11035), True, 'import matplotlib.pyplot as plt\n'), ((11066, 11076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11074, 11076), True, 'import matplotlib.pyplot as plt\n'), ((11257, 11297), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_pdf_cdf.png"""'], {'dpi': '(300)'}), "('plot_pdf_cdf.png', dpi=300)\n", (11268, 11297), True, 'import matplotlib.pyplot as plt\n'), ((11328, 11338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11336, 11338), True, 'import matplotlib.pyplot as plt\n'), ((3116, 3171), 'numpy.linspace', 'np.linspace', (['self.x_limits[0]', 'self.x_limits[1]', 'self.N'], {}), '(self.x_limits[0], self.x_limits[1], self.N)\n', (3127, 3171), True, 'import numpy as np\n'), ((11683, 11693), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11691, 11693), False, 'import sys\n'), ((12109, 12119), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12117, 12119), False, 'import sys\n'), ((12534, 12544), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12542, 12544), False, 'import sys\n'), ((12960, 12970), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12968, 12970), False, 'import sys\n'), ((13385, 13395), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13393, 13395), False, 'import sys\n'), ((13811, 13821), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13819, 13821), False, 'import sys\n'), ((14240, 14250), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14248, 14250), False, 'import sys\n'), ((14670, 14680), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14678, 14680), False, 'import sys\n'), ((15097, 15107), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15105, 15107), False, 'import sys\n'), ((15525, 15535), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15533, 15535), False, 'import sys\n')] |
from ..utils import *
from ..motion import blockMotion
import numpy as np
import scipy.ndimage
import scipy.fftpack
import scipy.stats
import scipy.io
import sys
from os.path import dirname
from os.path import join
def motion_feature_extraction(frames):
# setup
frames = frames.astype(np.float32)
mblock=10
h = gen_gauss_window(2, 0.5)
# step 1: motion vector calculation
motion_vectors = blockMotion(frames, method='N3SS', mbSize=mblock, p=np.int(1.5*mblock))
motion_vectors = motion_vectors.astype(np.float32)
# step 2: compute coherency
Eigens = np.zeros((motion_vectors.shape[0], motion_vectors.shape[1], motion_vectors.shape[2], 2), dtype=np.float32)
for i in range(motion_vectors.shape[0]):
motion_frame = motion_vectors[i]
upper_left = np.zeros_like(motion_frame[:, :, 0])
lower_right= np.zeros_like(motion_frame[:, :, 0])
off_diag = np.zeros_like(motion_frame[:, :, 0])
scipy.ndimage.correlate1d(motion_frame[:, :, 0]**2, h, 0, upper_left, mode='reflect')
scipy.ndimage.correlate1d(upper_left, h, 1, upper_left, mode='reflect')
scipy.ndimage.correlate1d(motion_frame[:, :, 1]**2, h, 0, lower_right, mode='reflect')
scipy.ndimage.correlate1d(lower_right, h, 1, lower_right, mode='reflect')
scipy.ndimage.correlate1d(motion_frame[:, :, 1]*motion_frame[:, :, 0], h, 0, off_diag, mode='reflect')
scipy.ndimage.correlate1d(off_diag, h, 1, off_diag, mode='reflect')
for y in range(motion_vectors.shape[1]):
for x in range(motion_vectors.shape[2]):
mat = np.array([
[upper_left[y, x], off_diag[y, x]],
[off_diag[y, x], lower_right[y, x]],
])
w, _ = np.linalg.eig(mat)
Eigens[i, y, x] = w
num = (Eigens[:, :, :, 0] - Eigens[:, :, :, 1])**2
den = (Eigens[:, :, :, 0] + Eigens[:, :, :, 1])**2
Coh10x10 = np.zeros_like(num)
Coh10x10[den!=0] = num[den!=0] / den[den!=0]
meanCoh10x10 = np.mean(Coh10x10)
# step 3: global motion
mode10x10 = np.zeros((motion_vectors.shape[0]), dtype=np.float32)
mean10x10 = np.zeros((motion_vectors.shape[0]), dtype=np.float32)
for i in range(motion_vectors.shape[0]):
motion_frame = motion_vectors[i]
motion_amplitude = np.sqrt(motion_vectors[i, :, :, 0]**2 + motion_vectors[i, :, :, 1]**2)
mode10x10[i] = scipy.stats.mode(motion_amplitude, axis=None)[0]
mean10x10[i] = np.mean(motion_amplitude)
motion_diff = np.abs(mode10x10 - mean10x10)
G = np.mean(motion_diff) / (1 + np.mean(mode10x10))
return np.array([meanCoh10x10, G])
def _extract_subband_feats(mscncoefs):
# alpha_m, = extract_ggd_features(mscncoefs)
alpha_m, N, bl, br, lsq, rsq = aggd_features(mscncoefs.copy())
pps1, pps2, pps3, pps4 = paired_product(mscncoefs)
alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)
alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)
alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)
alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)
# print bl1, br1
# print bl2, br2
# print bl3, br3
# print bl4, br4
# exit(0)
return np.array([alpha_m, (bl+br)/2.0]), np.array([
alpha1, N1, bl1, br1, # (V)
alpha2, N2, bl2, br2, # (H)
alpha3, N3, bl3, br3, # (D1)
alpha4, N4, bl4, br4, # (D2)
])
def extract_on_patches(img, blocksizerow, blocksizecol):
h, w = img.shape
blocksizerow = np.int(blocksizerow)
blocksizecol = np.int(blocksizecol)
patches = []
for j in range(0, np.int(h-blocksizerow+1), np.int(blocksizerow)):
for i in range(0, np.int(w-blocksizecol+1), np.int(blocksizecol)):
patch = img[j:j+blocksizerow, i:i+blocksizecol]
patches.append(patch)
patches = np.array(patches)
patch_features = []
for p in patches:
mscn_features, pp_features = _extract_subband_feats(p)
patch_features.append(np.hstack((mscn_features, pp_features)))
patch_features = np.array(patch_features)
return patch_features
def computequality(img, blocksizerow, blocksizecol, mu_prisparam, cov_prisparam):
img = img[:, :, 0]
h, w = img.shape
if (h < blocksizerow) or (w < blocksizecol):
print("Input frame is too small")
exit(0)
# ensure that the patch divides evenly into img
hoffset = (h % blocksizerow)
woffset = (w % blocksizecol)
if hoffset > 0:
img = img[:-hoffset, :]
if woffset > 0:
img = img[:, :-woffset]
img = img.astype(np.float32)
img2 = scipy.misc.imresize(img, 0.5, interp='bicubic', mode='F')
mscn1, var, mu = compute_image_mscn_transform(img, extend_mode='nearest')
mscn1 = mscn1.astype(np.float32)
mscn2, _, _ = compute_image_mscn_transform(img2, extend_mode='nearest')
mscn2 = mscn2.astype(np.float32)
feats_lvl1 = extract_on_patches(mscn1, blocksizerow, blocksizecol)
feats_lvl2 = extract_on_patches(mscn2, blocksizerow/2, blocksizecol/2)
# stack the scale features
feats = np.hstack((feats_lvl1, feats_lvl2))# feats_lvl3))
mu_distparam = np.mean(feats, axis=0)
cov_distparam = np.cov(feats.T)
invcov_param = np.linalg.pinv((cov_prisparam + cov_distparam)/2)
xd = mu_prisparam - mu_distparam
quality = np.sqrt(np.dot(np.dot(xd, invcov_param), xd.T))[0][0]
return np.hstack((mu_distparam, [quality]))
def compute_niqe_features(frames):
blocksizerow = 96
blocksizecol = 96
T, M, N, C = frames.shape
assert ((M >= blocksizerow*2) & (N >= blocksizecol*2)), "Video too small for NIQE extraction"
module_path = dirname(__file__)
params = scipy.io.loadmat(join(module_path, 'data', 'frames_modelparameters.mat'))
mu_prisparam = params['mu_prisparam']
cov_prisparam = params['cov_prisparam']
niqe_features = np.zeros((frames.shape[0]-10, 37))
idx = 0
for i in range(5, frames.shape[0]-5):
niqe_features[idx] = computequality(frames[i], blocksizerow, blocksizecol, mu_prisparam, cov_prisparam)
idx += 1
niqe_features = np.mean(niqe_features, axis=0)
return niqe_features
def temporal_dc_variation_feature_extraction(frames):
frames = frames.astype(np.float32)
mblock=16
mbsize=16
ih = np.int(frames.shape[1]/mbsize)*mbsize
iw = np.int(frames.shape[2]/mbsize)*mbsize
# step 1: motion vector calculation
motion_vectors = blockMotion(frames, method='N3SS', mbSize=mblock, p=7)
# step 2: compensated temporal dct differences
dct_motion_comp_diff = np.zeros((motion_vectors.shape[0], motion_vectors.shape[1], motion_vectors.shape[2]), dtype=np.float32)
for i in range(motion_vectors.shape[0]):
for y in range(motion_vectors.shape[1]):
for x in range(motion_vectors.shape[2]):
patchP = frames[i+1, y*mblock:(y+1)*mblock, x*mblock:(x+1)*mblock, 0].astype(np.float32)
patchI = frames[i, y*mblock+motion_vectors[i, y, x, 0]:(y+1)*mblock+motion_vectors[i, y, x, 0], x*mblock+motion_vectors[i, y, x, 1]:(x+1)*mblock+motion_vectors[i, y, x, 1], 0].astype(np.float32)
diff = patchP - patchI
t = scipy.fftpack.dct(scipy.fftpack.dct(diff, axis=1, norm='ortho'), axis=0, norm='ortho')
#dct_motion_comp_diff[i, y*mblock:(y+1)*mblock, x*mblock:(x+1)*mblock] = t
dct_motion_comp_diff[i, y, x] = t[0, 0]
dct_motion_comp_diff = dct_motion_comp_diff.reshape(motion_vectors.shape[0], -1)
std_dc = np.std(dct_motion_comp_diff, axis=1)
dt_dc_temp = np.abs(std_dc[1:] - std_dc[:-1])
dt_dc_measure1 = np.mean(dt_dc_temp)
return np.array([dt_dc_measure1])
def NSS_spectral_ratios_feature_extraction(frames):
def zigzag(data):
nrows, ncols = data.shape
d=sum([list(data[::-1,:].diagonal(i)[::(i+nrows+1)%2*-2+1])for i in range(-nrows,nrows+len(data[0]))], [])
return np.array(d)
mblock=5
# step 1: compute local dct frame differences
dct_diff5x5 = np.zeros((frames.shape[0]-1, np.int(frames.shape[1]/mblock), np.int(frames.shape[2]/mblock),mblock**2), dtype=np.float32)
for i in range(dct_diff5x5.shape[0]):
for y in range(dct_diff5x5.shape[1]):
for x in range(dct_diff5x5.shape[2]):
diff = frames[i+1, y*mblock:(y+1)*mblock, x*mblock:(x+1)*mblock].astype(np.float32) - frames[i, y*mblock:(y+1)*mblock, x*mblock:(x+1)*mblock].astype(np.float32)
t = scipy.fftpack.dct(scipy.fftpack.dct(diff, axis=1, norm='ortho'), axis=0, norm='ortho')
dct_diff5x5[i, y, x] = t.ravel()
dct_diff5x5 = dct_diff5x5.reshape(dct_diff5x5.shape[0],dct_diff5x5.shape[1] * dct_diff5x5.shape[2], -1)
# step 2: compute gamma
g = np.arange(0.03, 10+0.001, 0.001)
r = (scipy.special.gamma(1/g) * scipy.special.gamma(3/g)) / (scipy.special.gamma(2/g)**2)
gamma_matrix = np.zeros((dct_diff5x5.shape[0], mblock**2), dtype=np.float32)
for i in range(dct_diff5x5.shape[0]):
for s in range(mblock**2):
temp = dct_diff5x5[i, :, s]
mean_gauss = np.mean(temp)
var_gauss = np.var(temp, ddof=1)
mean_abs = np.mean(np.abs(temp - mean_gauss))**2
rho = var_gauss/(mean_abs + 1e-7)
gamma_gauss = 11
for x in range(len(g)-1):
if (rho <= r[x]) and (rho > r[x+1]):
gamma_gauss = g[x]
break
gamma_matrix[i, s] = gamma_gauss
gamma_matrix = gamma_matrix.reshape(dct_diff5x5.shape[0], mblock, mblock)
#zigzag = lambda N,w,h:[N[i*w+s-i]for s in range(w+h+1)for i in range(h)[::s%2*2-1]if-1<s-i<w]
freq_bands = np.zeros((dct_diff5x5.shape[0], mblock**2))
for i in range(dct_diff5x5.shape[0]):
freq_bands[i] = zigzag(gamma_matrix[i])
lf_gamma5x5 = freq_bands[:, 1:np.int((mblock**2-1)/3)+1]
mf_gamma5x5 = freq_bands[:, np.int((mblock**2-1)/3)+1:2*np.int((mblock**2-1)/3)+1]
hf_gamma5x5 = freq_bands[:, np.int(2*(mblock**2-1)/3)+1:]
geomean_lf_gam = scipy.stats.mstats.gmean(lf_gamma5x5.T)
geomean_mf_gam = scipy.stats.mstats.gmean(mf_gamma5x5.T)
geomean_hf_gam = scipy.stats.mstats.gmean(hf_gamma5x5.T)
geo_high_ratio = scipy.stats.mstats.gmean(geomean_hf_gam/(0.1 + (geomean_mf_gam + geomean_lf_gam)/2))
geo_low_ratio = scipy.stats.mstats.gmean(geomean_mf_gam/(0.1 + geomean_lf_gam))
geo_HL_ratio = scipy.stats.mstats.gmean(geomean_hf_gam/(0.1 + geomean_lf_gam))
geo_HM_ratio = scipy.stats.mstats.gmean(geomean_hf_gam/(0.1 + geomean_mf_gam))
geo_hh_ratio = scipy.stats.mstats.gmean(((geomean_hf_gam + geomean_mf_gam)/2)/(0.1 + geomean_lf_gam))
mean_dc = np.mean(dct_diff5x5[:, :, 0], axis=1)
dt_dc_measure2 = np.mean(np.abs(mean_dc[1:] - mean_dc[:-1]))
return np.array([dt_dc_measure2, geo_HL_ratio, geo_HM_ratio, geo_hh_ratio, geo_high_ratio, geo_low_ratio])
def videobliinds_features(videoData):
"""Computes Video Bliinds features. [#f1]_
Since this is a referenceless quality algorithm, only 1 video is needed. This function
provides the raw features used by the algorithm.
Parameters
----------
videoData : ndarray
Reference video, ndarray of dimension (T, M, N, C), (T, M, N), (M, N, C), or (M, N),
where T is the number of frames, M is the height, N is width,
and C is number of channels.
Returns
-------
features : ndarray, shape (46,)
| The individual features of the algorithm. The features are arranged as follows:
|
| features[:36] : spatial niqe vector averaged over the video, shape (36,)
| features[36] : niqe naturalness score, shape (1,)
| features[37:39] : DC measurements between frames, shape (2,)
| features[39:44] : Natural Video Statistics, shape (5,)
| features[44] : Motion coherence, shape (1,)
| features[45] : Global motion, shape (1,)
References
----------
.. [#f1] <NAME> and <NAME>, "Blind prediction of natural video quality" IEEE Transactions on Image Processing, December 2013.
"""
videoData = vshape(videoData)
T, M, N, C = videoData.shape
assert C == 1, "videobliinds called with video having %d channels. Please supply only the luminance channel." % (C,)
dt_dc_measure1 = temporal_dc_variation_feature_extraction(videoData)
spectral_features = NSS_spectral_ratios_feature_extraction(videoData)
temporal_features = motion_feature_extraction(videoData)
niqe_features = compute_niqe_features(videoData)
features = np.hstack((
niqe_features,
np.log(1+dt_dc_measure1),
np.log(1+spectral_features),
np.log(1+temporal_features),
))
return features
| [
"numpy.zeros_like",
"numpy.abs",
"os.path.join",
"numpy.log",
"numpy.std",
"os.path.dirname",
"numpy.zeros",
"numpy.var",
"numpy.hstack",
"numpy.linalg.eig",
"numpy.mean",
"numpy.array",
"numpy.int",
"numpy.arange",
"numpy.dot",
"numpy.cov",
"numpy.linalg.pinv",
"numpy.sqrt"
] | [((588, 699), 'numpy.zeros', 'np.zeros', (['(motion_vectors.shape[0], motion_vectors.shape[1], motion_vectors.shape[2], 2)'], {'dtype': 'np.float32'}), '((motion_vectors.shape[0], motion_vectors.shape[1], motion_vectors.\n shape[2], 2), dtype=np.float32)\n', (596, 699), True, 'import numpy as np\n'), ((1905, 1923), 'numpy.zeros_like', 'np.zeros_like', (['num'], {}), '(num)\n', (1918, 1923), True, 'import numpy as np\n'), ((1993, 2010), 'numpy.mean', 'np.mean', (['Coh10x10'], {}), '(Coh10x10)\n', (2000, 2010), True, 'import numpy as np\n'), ((2056, 2107), 'numpy.zeros', 'np.zeros', (['motion_vectors.shape[0]'], {'dtype': 'np.float32'}), '(motion_vectors.shape[0], dtype=np.float32)\n', (2064, 2107), True, 'import numpy as np\n'), ((2126, 2177), 'numpy.zeros', 'np.zeros', (['motion_vectors.shape[0]'], {'dtype': 'np.float32'}), '(motion_vectors.shape[0], dtype=np.float32)\n', (2134, 2177), True, 'import numpy as np\n'), ((2497, 2526), 'numpy.abs', 'np.abs', (['(mode10x10 - mean10x10)'], {}), '(mode10x10 - mean10x10)\n', (2503, 2526), True, 'import numpy as np\n'), ((2595, 2622), 'numpy.array', 'np.array', (['[meanCoh10x10, G]'], {}), '([meanCoh10x10, G])\n', (2603, 2622), True, 'import numpy as np\n'), ((3496, 3516), 'numpy.int', 'np.int', (['blocksizerow'], {}), '(blocksizerow)\n', (3502, 3516), True, 'import numpy as np\n'), ((3536, 3556), 'numpy.int', 'np.int', (['blocksizecol'], {}), '(blocksizecol)\n', (3542, 3556), True, 'import numpy as np\n'), ((3829, 3846), 'numpy.array', 'np.array', (['patches'], {}), '(patches)\n', (3837, 3846), True, 'import numpy as np\n'), ((4053, 4077), 'numpy.array', 'np.array', (['patch_features'], {}), '(patch_features)\n', (4061, 4077), True, 'import numpy as np\n'), ((5089, 5124), 'numpy.hstack', 'np.hstack', (['(feats_lvl1, feats_lvl2)'], {}), '((feats_lvl1, feats_lvl2))\n', (5098, 5124), True, 'import numpy as np\n'), ((5159, 5181), 'numpy.mean', 'np.mean', (['feats'], {'axis': '(0)'}), '(feats, axis=0)\n', (5166, 5181), True, 'import numpy as np\n'), ((5202, 5217), 'numpy.cov', 'np.cov', (['feats.T'], {}), '(feats.T)\n', (5208, 5217), True, 'import numpy as np\n'), ((5238, 5289), 'numpy.linalg.pinv', 'np.linalg.pinv', (['((cov_prisparam + cov_distparam) / 2)'], {}), '((cov_prisparam + cov_distparam) / 2)\n', (5252, 5289), True, 'import numpy as np\n'), ((5407, 5443), 'numpy.hstack', 'np.hstack', (['(mu_distparam, [quality])'], {}), '((mu_distparam, [quality]))\n', (5416, 5443), True, 'import numpy as np\n'), ((5673, 5690), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (5680, 5690), False, 'from os.path import dirname\n'), ((5885, 5921), 'numpy.zeros', 'np.zeros', (['(frames.shape[0] - 10, 37)'], {}), '((frames.shape[0] - 10, 37))\n', (5893, 5921), True, 'import numpy as np\n'), ((6120, 6150), 'numpy.mean', 'np.mean', (['niqe_features'], {'axis': '(0)'}), '(niqe_features, axis=0)\n', (6127, 6150), True, 'import numpy as np\n'), ((6586, 6694), 'numpy.zeros', 'np.zeros', (['(motion_vectors.shape[0], motion_vectors.shape[1], motion_vectors.shape[2])'], {'dtype': 'np.float32'}), '((motion_vectors.shape[0], motion_vectors.shape[1], motion_vectors.\n shape[2]), dtype=np.float32)\n', (6594, 6694), True, 'import numpy as np\n'), ((7507, 7543), 'numpy.std', 'np.std', (['dct_motion_comp_diff'], {'axis': '(1)'}), '(dct_motion_comp_diff, axis=1)\n', (7513, 7543), True, 'import numpy as np\n'), ((7561, 7593), 'numpy.abs', 'np.abs', (['(std_dc[1:] - std_dc[:-1])'], {}), '(std_dc[1:] - std_dc[:-1])\n', (7567, 7593), True, 'import numpy as np\n'), ((7616, 7635), 'numpy.mean', 'np.mean', (['dt_dc_temp'], {}), '(dt_dc_temp)\n', (7623, 7635), True, 'import numpy as np\n'), ((7647, 7673), 'numpy.array', 'np.array', (['[dt_dc_measure1]'], {}), '([dt_dc_measure1])\n', (7655, 7673), True, 'import numpy as np\n'), ((8718, 8752), 'numpy.arange', 'np.arange', (['(0.03)', '(10 + 0.001)', '(0.001)'], {}), '(0.03, 10 + 0.001, 0.001)\n', (8727, 8752), True, 'import numpy as np\n'), ((8865, 8928), 'numpy.zeros', 'np.zeros', (['(dct_diff5x5.shape[0], mblock ** 2)'], {'dtype': 'np.float32'}), '((dct_diff5x5.shape[0], mblock ** 2), dtype=np.float32)\n', (8873, 8928), True, 'import numpy as np\n'), ((9608, 9653), 'numpy.zeros', 'np.zeros', (['(dct_diff5x5.shape[0], mblock ** 2)'], {}), '((dct_diff5x5.shape[0], mblock ** 2))\n', (9616, 9653), True, 'import numpy as np\n'), ((10614, 10651), 'numpy.mean', 'np.mean', (['dct_diff5x5[:, :, 0]'], {'axis': '(1)'}), '(dct_diff5x5[:, :, 0], axis=1)\n', (10621, 10651), True, 'import numpy as np\n'), ((10734, 10837), 'numpy.array', 'np.array', (['[dt_dc_measure2, geo_HL_ratio, geo_HM_ratio, geo_hh_ratio, geo_high_ratio,\n geo_low_ratio]'], {}), '([dt_dc_measure2, geo_HL_ratio, geo_HM_ratio, geo_hh_ratio,\n geo_high_ratio, geo_low_ratio])\n', (10742, 10837), True, 'import numpy as np\n'), ((799, 835), 'numpy.zeros_like', 'np.zeros_like', (['motion_frame[:, :, 0]'], {}), '(motion_frame[:, :, 0])\n', (812, 835), True, 'import numpy as np\n'), ((855, 891), 'numpy.zeros_like', 'np.zeros_like', (['motion_frame[:, :, 0]'], {}), '(motion_frame[:, :, 0])\n', (868, 891), True, 'import numpy as np\n'), ((909, 945), 'numpy.zeros_like', 'np.zeros_like', (['motion_frame[:, :, 0]'], {}), '(motion_frame[:, :, 0])\n', (922, 945), True, 'import numpy as np\n'), ((2289, 2363), 'numpy.sqrt', 'np.sqrt', (['(motion_vectors[i, :, :, 0] ** 2 + motion_vectors[i, :, :, 1] ** 2)'], {}), '(motion_vectors[i, :, :, 0] ** 2 + motion_vectors[i, :, :, 1] ** 2)\n', (2296, 2363), True, 'import numpy as np\n'), ((2452, 2477), 'numpy.mean', 'np.mean', (['motion_amplitude'], {}), '(motion_amplitude)\n', (2459, 2477), True, 'import numpy as np\n'), ((2535, 2555), 'numpy.mean', 'np.mean', (['motion_diff'], {}), '(motion_diff)\n', (2542, 2555), True, 'import numpy as np\n'), ((3180, 3216), 'numpy.array', 'np.array', (['[alpha_m, (bl + br) / 2.0]'], {}), '([alpha_m, (bl + br) / 2.0])\n', (3188, 3216), True, 'import numpy as np\n'), ((3214, 3316), 'numpy.array', 'np.array', (['[alpha1, N1, bl1, br1, alpha2, N2, bl2, br2, alpha3, N3, bl3, br3, alpha4,\n N4, bl4, br4]'], {}), '([alpha1, N1, bl1, br1, alpha2, N2, bl2, br2, alpha3, N3, bl3, br3,\n alpha4, N4, bl4, br4])\n', (3222, 3316), True, 'import numpy as np\n'), ((3596, 3624), 'numpy.int', 'np.int', (['(h - blocksizerow + 1)'], {}), '(h - blocksizerow + 1)\n', (3602, 3624), True, 'import numpy as np\n'), ((3622, 3642), 'numpy.int', 'np.int', (['blocksizerow'], {}), '(blocksizerow)\n', (3628, 3642), True, 'import numpy as np\n'), ((5721, 5776), 'os.path.join', 'join', (['module_path', '"""data"""', '"""frames_modelparameters.mat"""'], {}), "(module_path, 'data', 'frames_modelparameters.mat')\n", (5725, 5776), False, 'from os.path import join\n'), ((6307, 6339), 'numpy.int', 'np.int', (['(frames.shape[1] / mbsize)'], {}), '(frames.shape[1] / mbsize)\n', (6313, 6339), True, 'import numpy as np\n'), ((6354, 6386), 'numpy.int', 'np.int', (['(frames.shape[2] / mbsize)'], {}), '(frames.shape[2] / mbsize)\n', (6360, 6386), True, 'import numpy as np\n'), ((7907, 7918), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (7915, 7918), True, 'import numpy as np\n'), ((10682, 10716), 'numpy.abs', 'np.abs', (['(mean_dc[1:] - mean_dc[:-1])'], {}), '(mean_dc[1:] - mean_dc[:-1])\n', (10688, 10716), True, 'import numpy as np\n'), ((467, 487), 'numpy.int', 'np.int', (['(1.5 * mblock)'], {}), '(1.5 * mblock)\n', (473, 487), True, 'import numpy as np\n'), ((2563, 2581), 'numpy.mean', 'np.mean', (['mode10x10'], {}), '(mode10x10)\n', (2570, 2581), True, 'import numpy as np\n'), ((3671, 3699), 'numpy.int', 'np.int', (['(w - blocksizecol + 1)'], {}), '(w - blocksizecol + 1)\n', (3677, 3699), True, 'import numpy as np\n'), ((3697, 3717), 'numpy.int', 'np.int', (['blocksizecol'], {}), '(blocksizecol)\n', (3703, 3717), True, 'import numpy as np\n'), ((3991, 4030), 'numpy.hstack', 'np.hstack', (['(mscn_features, pp_features)'], {}), '((mscn_features, pp_features))\n', (4000, 4030), True, 'import numpy as np\n'), ((8031, 8063), 'numpy.int', 'np.int', (['(frames.shape[1] / mblock)'], {}), '(frames.shape[1] / mblock)\n', (8037, 8063), True, 'import numpy as np\n'), ((8063, 8095), 'numpy.int', 'np.int', (['(frames.shape[2] / mblock)'], {}), '(frames.shape[2] / mblock)\n', (8069, 8095), True, 'import numpy as np\n'), ((9060, 9073), 'numpy.mean', 'np.mean', (['temp'], {}), '(temp)\n', (9067, 9073), True, 'import numpy as np\n'), ((9094, 9114), 'numpy.var', 'np.var', (['temp'], {'ddof': '(1)'}), '(temp, ddof=1)\n', (9100, 9114), True, 'import numpy as np\n'), ((12565, 12591), 'numpy.log', 'np.log', (['(1 + dt_dc_measure1)'], {}), '(1 + dt_dc_measure1)\n', (12571, 12591), True, 'import numpy as np\n'), ((12597, 12626), 'numpy.log', 'np.log', (['(1 + spectral_features)'], {}), '(1 + spectral_features)\n', (12603, 12626), True, 'import numpy as np\n'), ((12632, 12661), 'numpy.log', 'np.log', (['(1 + temporal_features)'], {}), '(1 + temporal_features)\n', (12638, 12661), True, 'import numpy as np\n'), ((1590, 1678), 'numpy.array', 'np.array', (['[[upper_left[y, x], off_diag[y, x]], [off_diag[y, x], lower_right[y, x]]]'], {}), '([[upper_left[y, x], off_diag[y, x]], [off_diag[y, x], lower_right[\n y, x]]])\n', (1598, 1678), True, 'import numpy as np\n'), ((1728, 1746), 'numpy.linalg.eig', 'np.linalg.eig', (['mat'], {}), '(mat)\n', (1741, 1746), True, 'import numpy as np\n'), ((5356, 5380), 'numpy.dot', 'np.dot', (['xd', 'invcov_param'], {}), '(xd, invcov_param)\n', (5362, 5380), True, 'import numpy as np\n'), ((9142, 9167), 'numpy.abs', 'np.abs', (['(temp - mean_gauss)'], {}), '(temp - mean_gauss)\n', (9148, 9167), True, 'import numpy as np\n'), ((9776, 9805), 'numpy.int', 'np.int', (['((mblock ** 2 - 1) / 3)'], {}), '((mblock ** 2 - 1) / 3)\n', (9782, 9805), True, 'import numpy as np\n'), ((9835, 9864), 'numpy.int', 'np.int', (['((mblock ** 2 - 1) / 3)'], {}), '((mblock ** 2 - 1) / 3)\n', (9841, 9864), True, 'import numpy as np\n'), ((9922, 9955), 'numpy.int', 'np.int', (['(2 * (mblock ** 2 - 1) / 3)'], {}), '(2 * (mblock ** 2 - 1) / 3)\n', (9928, 9955), True, 'import numpy as np\n'), ((9863, 9892), 'numpy.int', 'np.int', (['((mblock ** 2 - 1) / 3)'], {}), '((mblock ** 2 - 1) / 3)\n', (9869, 9892), True, 'import numpy as np\n')] |
import numpy as np
from numpy.random import binomial, multivariate_normal, uniform
from sklearn.model_selection import train_test_split
from ylearn.utils import to_df
TRAIN_SIZE = 1000
TEST_SIZE = 200
ADJUSTMENT_COUNT = 5
COVARIATE_COUNT = 3
def generate_variates(n, d):
return multivariate_normal(np.zeros(d), np.diag(np.ones(d)), n)
def filter_columns(df, prefix):
return list(filter(lambda c: c.startswith(prefix), df.columns.tolist()))
def generate_data(n_train, n_test, d_adjustment, d_covariate, fn_treatment, fn_outcome):
"""Generates population data for given untreated_outcome, treatment_effect and propensity functions.
Parameters
----------
n_train (int): train data size
n_test (int): test data size
d_adjustment (int): number of adjustments
d_covariate (int): number of covariates
fn_treatment (func<w,x>): untreated outcome conditional on covariates
fn_outcome (func<w>): treatment effect conditional on covariates
"""
# Generate covariates
# W = multivariate_normal(np.zeros(d), np.diag(np.ones(d)), n)
assert d_adjustment is not None or d_covariate is not None
W = generate_variates(n_train + n_test, d_adjustment) if d_adjustment else None
V = generate_variates(n_train + n_test, d_covariate) if d_covariate else None
# Generate treatment
fn_x = np.vectorize(fn_treatment, signature='(n)->(m)')
X = fn_x(W) if W is not None else fn_x(V)
# Calculate outcome
fn_y = np.vectorize(fn_outcome, signature='(n),(m)->(k)')
Y = fn_y(W, X) if W is not None else fn_y(V, X)
# x
data = to_df(w=W, x=X, y=Y, v=V)
outcome = filter_columns(data, 'y')
treatment = filter_columns(data, 'x')
adjustment = filter_columns(data, 'w')
covariate = filter_columns(data, 'v')
if len(covariate) == 0:
covariate = None
if n_test is not None:
# W_test = generate_variates(n_test, d_adjustment)
# V_test = generate_variates(n_test, d_covariate) if d_covariate else None
# if cut_test_at is not None:
# delta = 6 / n_test
# W_test[:, cut_test_at] = np.arange(-3, 3, delta)
# test_data = to_df(w=W_test, v=V_test) if n_test is not None else None
data, test_data = train_test_split(data, test_size=n_test)
else:
test_data = None
return data, test_data, outcome, treatment, adjustment, covariate
def binary_TE(w):
return 8 if w[1] > 0.1 else 0
def multiclass_TE(w, wi=1):
boundary = [-1., -0.15, 0.15, 1.]
return np.searchsorted(boundary, w[wi])
def generate_data_x1b_y1(train_size=TRAIN_SIZE, test_size=TEST_SIZE,
d_adjustment=ADJUSTMENT_COUNT, d_covariate=COVARIATE_COUNT):
beta = uniform(-3, 3, d_adjustment if d_adjustment else d_covariate)
def to_treatment(w):
propensity = 0.8 if -0.5 < w[2] < 0.5 else 0.2
return np.random.binomial(1, propensity, 1)
def to_outcome(w, x):
treatment_effect = binary_TE(w)
y0 = np.dot(w, beta) + np.random.normal(0, 1)
y = y0 + treatment_effect * x
return y
return generate_data(train_size, test_size, d_adjustment, d_covariate,
fn_treatment=to_treatment, fn_outcome=to_outcome)
def generate_data_x1b_y1_w5v0():
return generate_data_x1b_y1(d_adjustment=5, d_covariate=0)
def generate_data_x1b_y1_w0v5():
return generate_data_x1b_y1(d_adjustment=0, d_covariate=5)
def generate_data_x1b_y2(train_size=TRAIN_SIZE, test_size=TEST_SIZE,
d_adjustment=ADJUSTMENT_COUNT, d_covariate=COVARIATE_COUNT):
beta = uniform(-3, 3, d_adjustment if d_adjustment else d_covariate)
def to_treatment(w):
propensity = 0.8 if -0.5 < w[2] < 0.5 else 0.2
return np.random.binomial(1, propensity, 1)
def to_outcome(w, x):
treatment_effect = binary_TE(w)
y0 = np.dot(w, beta) + np.random.normal(0, 1)
y = y0 + treatment_effect * x + w[:2]
return y
return generate_data(train_size, test_size, d_adjustment, d_covariate,
fn_treatment=to_treatment, fn_outcome=to_outcome, )
def generate_data_x1b_y2_w5v0():
return generate_data_x1b_y2(d_adjustment=5, d_covariate=0)
def generate_data_x1b_y2_w0v5():
return generate_data_x1b_y2(d_adjustment=0, d_covariate=5)
def generate_data_x2b_y1(train_size=TRAIN_SIZE, test_size=TEST_SIZE,
d_adjustment=ADJUSTMENT_COUNT, d_covariate=COVARIATE_COUNT):
beta = uniform(-3, 3, d_adjustment if d_adjustment else d_covariate)
def to_treatment(w):
propensity = 0.8 if -0.5 < w[2] < 0.5 else 0.2
return np.random.binomial(1, propensity, 2)
def to_outcome(w, x):
treatment_effect = 8 if w[1] > 0.1 else 0
y0 = np.dot(w, beta) + np.random.normal(0, 1)
y = y0 + treatment_effect * x.mean()
return np.array([y])
return generate_data(train_size, test_size, d_adjustment, d_covariate,
fn_treatment=to_treatment, fn_outcome=to_outcome)
def generate_data_x2b_y1_w5v0():
return generate_data_x2b_y1(d_adjustment=5, d_covariate=0)
def generate_data_x2b_y1_w0v5():
return generate_data_x2b_y1(d_adjustment=0, d_covariate=5)
def generate_data_x2b_y2(train_size=TRAIN_SIZE, test_size=TEST_SIZE,
d_adjustment=ADJUSTMENT_COUNT, d_covariate=COVARIATE_COUNT):
beta = uniform(-3, 3, d_adjustment if d_adjustment else d_covariate)
def to_treatment(w):
propensity = 0.8 if -0.5 < w[2] < 0.5 else 0.2
return np.random.binomial(1, propensity, 2)
def to_outcome(w, x):
treatment_effect = np.array([8 if w[0] > 0.0 else 0,
8 if w[1] > 0.1 else 0, ])
y0 = np.dot(w, beta) + np.random.normal(0, 1)
y = y0 + treatment_effect * x
return y
return generate_data(train_size, test_size, d_adjustment, d_covariate,
fn_treatment=to_treatment, fn_outcome=to_outcome)
def generate_data_x2b_y2_w5v0():
return generate_data_x2b_y2(d_adjustment=5, d_covariate=0)
def generate_data_x2b_y2_w0v5():
return generate_data_x2b_y2(d_adjustment=0, d_covariate=5)
def generate_data_x1m_y1(train_size=TRAIN_SIZE, test_size=TEST_SIZE,
d_adjustment=ADJUSTMENT_COUNT, d_covariate=COVARIATE_COUNT):
beta = uniform(-3, 3, d_adjustment if d_adjustment else d_covariate)
def to_treatment(w):
# propensity = 0.8 if -0.5 < w[2] < 0.5 else 0.2
# return np.random.binomial(1, propensity, 1)
return np.array([multiclass_TE(w, wi=2), ])
def to_outcome(w, x):
treatment_effect = multiclass_TE(w)
y0 = np.dot(w, beta) + np.random.normal(0, 1)
y = y0 + treatment_effect * x
return y
return generate_data(train_size, test_size, d_adjustment, d_covariate,
fn_treatment=to_treatment, fn_outcome=to_outcome)
def generate_data_x1m_y1_w5v0():
return generate_data_x1m_y1(d_adjustment=5, d_covariate=0)
def generate_data_x1m_y1_w0v5():
return generate_data_x1m_y1(d_adjustment=0, d_covariate=5)
def generate_data_x2mb_y1(train_size=TRAIN_SIZE, test_size=TEST_SIZE,
d_adjustment=ADJUSTMENT_COUNT, d_covariate=COVARIATE_COUNT):
beta = uniform(-3, 3, d_adjustment if d_adjustment else d_covariate)
def to_treatment(w):
propensity = 0.8 if -0.5 < w[3] < 0.5 else 0.2
return np.array([multiclass_TE(w, wi=2),
np.random.binomial(1, propensity, 1)[0],
])
def to_outcome(w, x):
treatment_effect = multiclass_TE(w)
y0 = np.dot(w, beta) + np.random.normal(0, 1)
y = y0 + treatment_effect * x[:1]
return y
return generate_data(train_size, test_size, d_adjustment, d_covariate,
fn_treatment=to_treatment, fn_outcome=to_outcome)
def generate_data_x2mb_y1_w5v0():
return generate_data_x2mb_y1(d_adjustment=5, d_covariate=0)
def generate_data_x2mb_y1_w0v5():
return generate_data_x2mb_y1(d_adjustment=0, d_covariate=5)
| [
"numpy.random.uniform",
"numpy.vectorize",
"numpy.random.binomial",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"numpy.searchsorted",
"ylearn.utils.to_df",
"numpy.ones",
"numpy.array",
"numpy.random.normal",
"numpy.dot"
] | [((1350, 1398), 'numpy.vectorize', 'np.vectorize', (['fn_treatment'], {'signature': '"""(n)->(m)"""'}), "(fn_treatment, signature='(n)->(m)')\n", (1362, 1398), True, 'import numpy as np\n'), ((1481, 1531), 'numpy.vectorize', 'np.vectorize', (['fn_outcome'], {'signature': '"""(n),(m)->(k)"""'}), "(fn_outcome, signature='(n),(m)->(k)')\n", (1493, 1531), True, 'import numpy as np\n'), ((1604, 1629), 'ylearn.utils.to_df', 'to_df', ([], {'w': 'W', 'x': 'X', 'y': 'Y', 'v': 'V'}), '(w=W, x=X, y=Y, v=V)\n', (1609, 1629), False, 'from ylearn.utils import to_df\n'), ((2540, 2572), 'numpy.searchsorted', 'np.searchsorted', (['boundary', 'w[wi]'], {}), '(boundary, w[wi])\n', (2555, 2572), True, 'import numpy as np\n'), ((2741, 2802), 'numpy.random.uniform', 'uniform', (['(-3)', '(3)', '(d_adjustment if d_adjustment else d_covariate)'], {}), '(-3, 3, d_adjustment if d_adjustment else d_covariate)\n', (2748, 2802), False, 'from numpy.random import binomial, multivariate_normal, uniform\n'), ((3627, 3688), 'numpy.random.uniform', 'uniform', (['(-3)', '(3)', '(d_adjustment if d_adjustment else d_covariate)'], {}), '(-3, 3, d_adjustment if d_adjustment else d_covariate)\n', (3634, 3688), False, 'from numpy.random import binomial, multivariate_normal, uniform\n'), ((4523, 4584), 'numpy.random.uniform', 'uniform', (['(-3)', '(3)', '(d_adjustment if d_adjustment else d_covariate)'], {}), '(-3, 3, d_adjustment if d_adjustment else d_covariate)\n', (4530, 4584), False, 'from numpy.random import binomial, multivariate_normal, uniform\n'), ((5438, 5499), 'numpy.random.uniform', 'uniform', (['(-3)', '(3)', '(d_adjustment if d_adjustment else d_covariate)'], {}), '(-3, 3, d_adjustment if d_adjustment else d_covariate)\n', (5445, 5499), False, 'from numpy.random import binomial, multivariate_normal, uniform\n'), ((6409, 6470), 'numpy.random.uniform', 'uniform', (['(-3)', '(3)', '(d_adjustment if d_adjustment else d_covariate)'], {}), '(-3, 3, d_adjustment if d_adjustment else d_covariate)\n', (6416, 6470), False, 'from numpy.random import binomial, multivariate_normal, uniform\n'), ((7357, 7418), 'numpy.random.uniform', 'uniform', (['(-3)', '(3)', '(d_adjustment if d_adjustment else d_covariate)'], {}), '(-3, 3, d_adjustment if d_adjustment else d_covariate)\n', (7364, 7418), False, 'from numpy.random import binomial, multivariate_normal, uniform\n'), ((306, 317), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (314, 317), True, 'import numpy as np\n'), ((2260, 2300), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': 'n_test'}), '(data, test_size=n_test)\n', (2276, 2300), False, 'from sklearn.model_selection import train_test_split\n'), ((2899, 2935), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'propensity', '(1)'], {}), '(1, propensity, 1)\n', (2917, 2935), True, 'import numpy as np\n'), ((3785, 3821), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'propensity', '(1)'], {}), '(1, propensity, 1)\n', (3803, 3821), True, 'import numpy as np\n'), ((4681, 4717), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'propensity', '(2)'], {}), '(1, propensity, 2)\n', (4699, 4717), True, 'import numpy as np\n'), ((4909, 4922), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (4917, 4922), True, 'import numpy as np\n'), ((5596, 5632), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'propensity', '(2)'], {}), '(1, propensity, 2)\n', (5614, 5632), True, 'import numpy as np\n'), ((5687, 5745), 'numpy.array', 'np.array', (['[8 if w[0] > 0.0 else 0, 8 if w[1] > 0.1 else 0]'], {}), '([8 if w[0] > 0.0 else 0, 8 if w[1] > 0.1 else 0])\n', (5695, 5745), True, 'import numpy as np\n'), ((327, 337), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (334, 337), True, 'import numpy as np\n'), ((3016, 3031), 'numpy.dot', 'np.dot', (['w', 'beta'], {}), '(w, beta)\n', (3022, 3031), True, 'import numpy as np\n'), ((3034, 3056), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (3050, 3056), True, 'import numpy as np\n'), ((3902, 3917), 'numpy.dot', 'np.dot', (['w', 'beta'], {}), '(w, beta)\n', (3908, 3917), True, 'import numpy as np\n'), ((3920, 3942), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (3936, 3942), True, 'import numpy as np\n'), ((4808, 4823), 'numpy.dot', 'np.dot', (['w', 'beta'], {}), '(w, beta)\n', (4814, 4823), True, 'import numpy as np\n'), ((4826, 4848), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (4842, 4848), True, 'import numpy as np\n'), ((5798, 5813), 'numpy.dot', 'np.dot', (['w', 'beta'], {}), '(w, beta)\n', (5804, 5813), True, 'import numpy as np\n'), ((5816, 5838), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (5832, 5838), True, 'import numpy as np\n'), ((6744, 6759), 'numpy.dot', 'np.dot', (['w', 'beta'], {}), '(w, beta)\n', (6750, 6759), True, 'import numpy as np\n'), ((6762, 6784), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (6778, 6784), True, 'import numpy as np\n'), ((7727, 7742), 'numpy.dot', 'np.dot', (['w', 'beta'], {}), '(w, beta)\n', (7733, 7742), True, 'import numpy as np\n'), ((7745, 7767), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (7761, 7767), True, 'import numpy as np\n'), ((7574, 7610), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'propensity', '(1)'], {}), '(1, propensity, 1)\n', (7592, 7610), True, 'import numpy as np\n')] |
"""
Molecule class designed for RESP Optimization
"""
import os, sys, copy
import numpy as np
import re
from collections import OrderedDict
from warnings import warn
try:
import openeye.oechem as oechem
except ImportError:
warn(' The Openeye module cannot be imported. ( Please provide equivGoups and listofpolar manually.)')
from respyte.molecule import *
from respyte.readinp_resp import Input
from pathlib import Path
from respyte.readmol import *
bohr2Ang = 0.52918825 # change unit from bohr to angstrom
class Molecule_respyte:
def __init__(self, gridxyzs = [], espvals = [], efvals = [], prnlev = 0):
# store molecule objects first
self.mols = []
self.inp = None
self.xyzs = []
self.nmols = []
self.elems = []
self.atomids = []
self.atomidinfo = {}
self.resnames = []
self.atomnames = []
self.resnumbers = []
#self.equivGroups = [] # maybe don't need this
self.listofpolars = []
self.listofburieds = []
self.listofchargeinfo = []
self.gridxyzs = []
for gridxyz in gridxyzs:
self.addGridPoints(gridxyz)
self.espvals = []
for espval in espvals:
self.addEspValues(espval)
self.efvals = []
for efval in efvals:
self.addEfValues(efval)
self.prnlev = prnlev
# self.spaces = []
def addXyzCoordinates(self, xyz):
if not isinstance(xyz, np.ndarray) or len(xyz.shape) != 2 or xyz.shape[1] != 3:
print("Problem with input:", xyz)
raise RuntimeError('Please provide each xyz coordinate set as a numpy array with shape (N_atoms, 3)')
self.xyzs.append(xyz.copy())
if self.prnlev >= 1:
print("Added xyz coordinates with shape %s, %i sets total" % (str(xyz.shape), len(self.xyzs)))
def addGridPoints(self, gridxyz):
if not isinstance(gridxyz, np.ndarray) or len(gridxyz.shape) != 2 or gridxyz.shape[1] != 3:
print(len(gridxyz.shape), gridxyz.shape[1])
print("Problem with input:", gridxyz)
raise RuntimeError('Please provide each set of grid points as a numpy array with shape (N_points, 3)')
# LPW: Assume number of grid points can be different for multiple structures.
self.gridxyzs.append(gridxyz.copy())
if self.prnlev >= 1:
print("Added grid points with shape %s, %i sets total" % (str(gridxyz.shape), len(self.gridxyzs)))
def addEspValues(self, espval):
if not isinstance(espval, np.ndarray) or len(espval.shape) != 1:
print("Problem with input:", espval)
raise RuntimeError('Please provide each set of ESP values as a 1D numpy array')
self.espvals.append(espval.copy())
if self.prnlev >= 1:
print("Added ESP values with shape %s, %i sets total" % (str(espval.shape), len(self.espvals)))
def addEfValues(self, efval):
if not isinstance(efval, np.ndarray) or len(efval.shape) != 2 or efval.shape[1] != 3:
print("Problem with input:", efval)
raise RuntimeError('Please provide each set of EF values as a nx3 numpy array')
self.efvals.append(efval.copy())
if self.prnlev >= 1:
print("Added EF values with shape %s, %i sets total" % (str(efval.shape), len(self.efvals)))
# def addSpaces(self, space):
# self.spaces.append(space)
def addInp(self, inpcls):
assert isinstance(inpcls, Input)
self.inp = inpcls
def changesymmetry(self, symmetry=False):
print(self.inp.symmetry)
self.inp.symmetry = symmetry
print('After:',self.inp.symmetry)
def removeSingleElemList(self, lst):
needtoremove = []
for idx, i in enumerate(lst):
if len(i) < 2:
needtoremove.append(idx)
needtoremove.sort(reverse= True)
for i in needtoremove:
del lst[i]
return lst
def set_listofpolar(self, listofpolar):
"""Manually assign polar atoms"""
assert isinstance(listofpolar, (list,))
self.listofpolars.append(listofpolar)
def convert_charge_equal(self, charge_equal, atomidinfo):
"""
Convert charge_equal which assigns equivalent set of atoms into list of equivalent atom ID.
"""
new_charge_equals = []
for atmnms, resnms in charge_equal:
# Case 1, when single or multiple atomnames are set to be equal in any residues.
if resnms is '*':
new_charge_equal = [] # store atom ids set to be equivalent.
for atmnm in atmnms:
for atmid, info in self.atomidinfo.items():
if any(x['atomname'] == atmnm for x in info):
new_charge_equal.append(atmid)
# Case 2, when single or multiple atomnames in specific residues are set to be equivalent.
elif len(atmnms) > 1 or len(resnms) > 1:
new_charge_equal = []
for i in atmnms:
for j in resnms:
val = {'resname' : j, 'atomname' : i}
for atmid, info in self.atomidinfo.items():
if val in info:
new_charge_equal.append(atmid)
else:
pass
new_charge_equal = list(set(new_charge_equal))
new_charge_equals.append(new_charge_equal)
new_charge_equals = self.removeSingleElemList(new_charge_equals)
new_charge_equals.sort()
return new_charge_equals
def gen_chargeinfo(self, resChargeDict, atomid, atomidinfo, resnumber):
"""
Output should be like [[[indices], resname, netcharge], ...]
"""
idxof1statm = [0]
resname = atomidinfo[atomid[0]][0]['resname'] # I think this is also problematic..
resnmof1statm = [resname]
check = [resnumber[0]]
for idx, resn in enumerate(resnumber):
if resn == check[-1]:
pass
else:
check.append(resn)
idxof1statm.append(idx)
resnmof1statm.append(atomidinfo[atomid[idx+2]][0]['resname'])
chargeinfo = []
idxof1statm.append(len(atomid))
terminalidx = []
for idx, resnm in enumerate(resnmof1statm):
if resnm == 'ACE' or resnm == 'NME'or resnm == 'NHE':
for i in range(idxof1statm[idx], idxof1statm[idx+1]):
terminalidx.append(i)
else:
charge = resChargeDict[resnm]
lstofidx =list(range(idxof1statm[idx], idxof1statm[idx+1]))
chargeinf = [lstofidx, resnm, charge]
chargeinfo.append(chargeinf)
if len(terminalidx) ==0:
pass
else:
terminalchginf = [terminalidx, 'terminal', 0]
chargeinfo.append(terminalchginf) #Wonder if it makes sense., ......
return chargeinfo
def getidxof1statm(self, listofresid, listofresname):
idxof1statm = [0]
resnameof1statm = [listofresname[0]]
check = [listofresid[0]]
for idx, resid in enumerate(listofresid):
if resid == check[-1]:
pass
else:
check.append(resid)
idxof1statm.append(idx)
resnameof1statm.append(listofresname[idx])
return idxof1statm, resnameof1statm
def addCoordFiles(self, *coordFiles):
if len(coordFiles) == 0:
print('No conformer is given? ')
self.mols.append(Molecule())
self.atomids.append([])
self.elems.append([])
self.resnames.append([])
self.atomnames.append([])
self.resnumbers.append([])
self.listofpolars.append([])
xyzs = []
self.nmols.append(len(xyzs))
indices = []
charge = 0
number = len(self.elems)+1
resname = 'mol%d' %(number)
chargeinfo = [[indices, resname, charge]]
self.listofchargeinfo.append(chargeinfo)
else:
mols = []
xyzs = []
firstconf = True
for coordFile in coordFiles:
fbmol = Molecule(coordFile)
self.mols.append(fbmol)
xyz = fbmol.xyzs[0]
xyz = np.array(xyz)/bohr2Ang
xyzs.append(xyz)
if firstconf is True:
firstconf is False
atomicNum = []
elem = fbmol.elem
if 'resid' not in list(fbmol.Data.keys()):
print(' Are you using xyz file? will assing resid, resname,atomname for you!')
resnumber = [1 for i in elem]
resname = list('MOL' for i in elem)
atomname = ['%s%d' % (i,idx+1)for idx, i in enumerate(elem) ]
else:
resnumber = fbmol.resid
resname = fbmol.resname
atomname = fbmol.atomname
for elm in elem:
atomicNum.append(list(PeriodicTable.keys()).index(elm) + 1 )
atomid = []
if len(self.atomidinfo) == 0:
atmid = 1
else:
atmid = max(list(self.atomidinfo.keys()))
# if resname is 'MOL', assign resname to be moli
if resname == list('MOL' for i in elem):
fnm = Path(coordFile).stem
newresname = fnm.split('_')[0]
print(' Is this coord file generated from esp_generator? The residue name is MOL.\n')
print(' It reassigns the residue name to %s not to confuse with other molecules while forcing symmetry.' % newresname)
resname = list(newresname for i in elem)
num = 1
for res, atom in zip(resname, elem):
val = {'resname': res, 'atomname':'%s%d' %(atom, num) }
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
num += 1
else:
for res, atom in zip(resname, atomname):
val = {'resname': res, 'atomname': atom}
if len(self.atomidinfo) == 0:
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
elif any(val in v for v in list(self.atomidinfo.values())):
for k, v in self.atomidinfo.items():
if val in v:
atomid.append(int(k))
else:
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
if self.inp is not None:
equiv_ids = self.convert_charge_equal(self.inp.charge_equal, self.atomidinfo)
else:
equiv_ids = []
# And modify atomid and atomidinfo so that equivalent atoms can have the same id.
newatomid = atomid.copy()
newatomidinfo = self.atomidinfo.copy()
for equiv_id in equiv_ids:
newid = equiv_id[0]
for i in equiv_id[1:]:
newatomid = [newid if x ==i else x for x in newatomid]
for j in self.atomidinfo[i]:
newatomidinfo[newid].append(j)
del newatomidinfo[i]
self.atomids.append(newatomid)
self.atomidinfo = newatomidinfo
self.elems.append(atomicNum)
self.resnames.append(resname)
self.atomnames.append(atomname)
self.resnumbers.append(resnumber)
self.nmols.append(len(xyzs))
for xyz in xyzs:
self.xyzs.append(xyz)
if self.inp is not None:
chargeinfo = self.gen_chargeinfo(self.inp.resChargeDict, newatomid, self.atomidinfo, resnumber)
else:
indices = list(range(len(elem)))
charge = None
number = len(self.elems)+1
resname = 'mol%d' %(number)
chargeinfo = [[indices, resname, charge]]
self.listofchargeinfo.append(chargeinfo)
# For now, when cheminformatics is not used, ignore polar atoms
listofpolar = []
self.listofpolars.append(listofpolar)
listofburied = []
self.listofburieds.append(listofburied)
def addEspf(self, *espfFiles, selectedPts):
for idx, espfFile in enumerate(espfFiles):
espval = []
gridxyz = []
efval = []
with open(espfFile, 'r') as espff:
selectedLines = []
if selectedPts[idx] ==None:
for i in range(len(espff.readlines())):
selectedLines.append(int(i))
else:
for i in selectedPts[idx]:
selectedLines.append(int(i*2))
selectedLines.append(int(i*2+1))
with open(espfFile, 'r') as espff:
for i, line in enumerate(espff):
if i in selectedLines:
fields = line.strip().split()
numbers = [float(field) for field in fields]
if (len(numbers)==4):
xyz = [x/bohr2Ang for x in numbers[0:3]]
gridxyz.append(xyz)
espval.append(numbers[3])
elif (len(numbers)==3):
efval.append(numbers[0:3])
else:
print('Error ReadEspfFile: encountered line not having 3 or 4 numbers')
return False
# # read space
# space = 10.0
# for i in gridxyz:
# for j in gridxyz[i:]:
# dr = np.array(j) - np.array(i)
# r = np.sqrt(np.dot(dr, dr))
# if r == 0:
# pass
# elif r < space:
# space = r
if self.prnlev >= 1:
print()
print('ReadEspfFile: % d ESP and % d EF points read in from file %s' % (len(espval), len(efval), espfFile))
print()
gridxyz = np.array(gridxyz)
espval = np.array(espval)
efval = np.array(efval)
self.addGridPoints(gridxyz)
self.addEspValues(espval)
self.addEfValues(efval)
# self.addSpaces(space)
class Molecule_OEMol(Molecule_respyte):
def addCoordFiles(self, *coordFiles):
if len(coordFiles) == 0:
print('Skip this molecule? Empty molecule object will be created since no conformers is provided.')
self.mols.append(oechem.OEMol())
self.atomids.append([])
self.elems.append([])
self.resnames.append([])
self.atomnames.append([])
self.resnumbers.append([])
self.listofpolars.append([])
self.listofburieds.append([])
xyzs = []
self.nmols.append(len(xyzs))
indices = []
charge = 0
number = len(self.elems)
resname = 'mol%d' %(number)
chargeinfo = [[indices, resname, charge]]
self.listofchargeinfo.append(chargeinfo)
else:
xyzs = []
listofoemol = []
firstconf = True
for coordFile in coordFiles:
fbmol = Molecule(coordFile)
xyz = fbmol.xyzs[0]
xyz = np.array(xyz)/bohr2Ang
xyzs.append(xyz)
# Making oemol using openeye toolkit : for atomID(?), equivGroup and listofpolar
ifs = oechem.oemolistream(coordFile)
oemol = oechem.OEGraphMol()
oechem.OEReadMolecule(ifs, oemol)
listofoemol.append(oemol)
oechem.OEPerceiveSymmetry(oemol)
if firstconf == True:
firstconf = False
atomicNum = []
elem = fbmol.elem
if 'resid' not in list(fbmol.Data.keys()):
print(' Are you using xyz file? will assing resid, resname,atomname for you!')
resnumber = [1 for i in elem]
resname = list('MOL' for i in elem)
atomname = ['%s%d' % (i,idx+1)for idx, i in enumerate(elem)]
else:
resnumber = fbmol.resid
resname = fbmol.resname
atomname = fbmol.atomname
for elm in elem:
atomicNum.append(list(PeriodicTable.keys()).index(elm) + 1 )
atomid = []
if len(self.atomidinfo) == 0:
atmid = 1
else:
atmid = max(list(self.atomidinfo.keys())) +1
# if resname is 'MOL', assign resname to be moli
if resname == list('MOL' for i in elem):
fnm = Path(coordFile).stem
newresname = fnm.split('_')[0]
print(' Is this file generated from esp_generator? The residue name is MOL, which is a default residue name for small organic molecule.')
print(' It reassigns the name to %s not to confuse with other molecules while forcing symmetry.' % newresname)
resname = list(newresname for i in elem)
num = 1
for res, atom in zip(resname, elem):
val = {'resname': res, 'atomname':'%s%d' %(atom, num) }
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
num += 1
else:
for res, atom in zip(resname, atomname):
val = {'resname': res, 'atomname': atom}
if len(self.atomidinfo) == 0:
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
elif any(val in v for v in list(self.atomidinfo.values())):
for k, v in self.atomidinfo.items():
if val in v:
atomid.append(int(k))
else:
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
# Using openeye tool, make listofpolar,
symmetryClass = []
listofpolar = []
listofburied = []
oechem.OEAssignHybridization(oemol)
for atom in oemol.GetAtoms():
symmetryClass.append(atom.GetSymmetryClass())
if atom.IsCarbon() and int(atom.GetHyb()) != 3:
listofpolar.append(atom.GetIdx())
if len([bond for bond in atom.GetBonds()]) >3:
listofburied.append(atom.GetIdx())
# ispolar = False
# for bond in atom.GetBonds():
# atom2 = bond.GetNbr(atom)
# if bond.GetOrder() == 1 and ispolar == False:
# continue
# else:
# ispolar = True
# break
# if ispolar == True:
# listofpolar.append(atom.GetIdx())
for atom in oemol.GetAtoms():
if atom.IsHydrogen():
for bond in atom.GetBonds():
atom2 = bond.GetNbr(atom)
if atom2.IsPolar():
listofpolar.append(atom.GetIdx())
elif atom2.IsCarbon() and atom2.GetIdx() in listofpolar:
listofpolar.append(atom.GetIdx())
if atom2.GetIdx() in listofburied:
listofburied.append(atom.GetIdx()) # store hydrogens bonded to buried atoms
if atom.IsPolar():
listofpolar.append(atom.GetIdx())
listofpolar = sorted(set(listofpolar))
listofburied = sorted(set(listofburied))
idxof1statm, resnameof1statm = self.getidxof1statm(resnumber, resname)
unique_resid = set(resnameof1statm)
sameresid = [[i for i, v in enumerate(resnameof1statm) if v == value] for value in unique_resid]
sameresid.sort()
#sameresid = self.removeSingleElemList(sameresid)
idxof1statm.append(len(resnumber))
equiv_ids = []
#print('symmetryClass', symmetryClass)
#print('sameresid', sameresid)
for equivresidgroup in sameresid:
resnum = equivresidgroup[0]
listofsym = symmetryClass[idxof1statm[resnum]: idxof1statm[resnum +1]]
#print(listofsym)
unique_sym = set(listofsym)
equiv_sym = [[i+idxof1statm[resnum] for i, v in enumerate(listofsym) if v == value] for value in unique_sym]
equiv_sym = self.removeSingleElemList(equiv_sym)
#print('equiv_sym', equiv_sym)
# change index to ID
equiv_ID = []
for lst in equiv_sym:
newlist = []
for item in lst:
newlist.append(atomid[item])
equiv_ID.append(newlist)
for i in equiv_ID:
i.sort()
equiv_ids.append(i) # weird:\
needtoremove = []
for idx, equiv_id in enumerate(equiv_ids):
if len(set(equiv_id)) == 1:
needtoremove.append(idx)
needtoremove.sort(reverse = True)
for i in needtoremove:
del equiv_ids[i]
if self.inp is not None:
new_charge_equals = self.convert_charge_equal(self.inp.charge_equal, self.atomidinfo)
else:
new_charge_equals = []
equiv_ids_comb = []
for i in equiv_ids:
equiv_ids_comb.append(i)
for i in new_charge_equals:
equiv_ids_comb.append(i)
for i in equiv_ids_comb:
i.sort()
equiv_ids_comb.sort()
newatomid = atomid.copy()
newatomidinfo = copy.deepcopy(self.atomidinfo)
for equiv_id in equiv_ids_comb:
newid = equiv_id[0]
for i in equiv_id[1:]:
newatomid = [newid if x ==i else x for x in newatomid]
for j in self.atomidinfo[i]:
newatomidinfo[newid].append(j)
del newatomidinfo[i]
# print('newatomidinfo', newatomidinfo)
# print('oldatomidinfo', self.atomidinfo)
# print('newatomid', newatomid)
# print('oldatomid', atomid)
if self.inp is not None:
print(self.inp.symmetry)
if self.inp.symmetry == False:
self.atomids.append(atomid)
self.atomidinfo = self.atomidinfo
else:
self.atomids.append(newatomid)
self.atomidinfo = newatomidinfo
else:
self.atomids.append(newatomid)
self.atomidinfo = newatomidinfo
self.elems.append(atomicNum)
self.resnames.append(resname)
self.atomnames.append(atomname)
self.resnumbers.append(resnumber)
self.listofpolars.append(listofpolar)
self.listofburieds.append(listofburied)
if self.inp is not None:
chargeinfo = self.gen_chargeinfo(self.inp.resChargeDict, newatomid, self.atomidinfo, resnumber)
else:
indices = list(range(len(elem)))
charge = None
number = len(self.elems)+1
resname = 'mol%d' %(number)
chargeinfo = [[indices, resname, charge]]
self.listofchargeinfo.append(chargeinfo)
self.nmols.append(len(xyzs))
for xyz in xyzs:
self.xyzs.append(xyz)
for oemol in listofoemol:
self.mols.append(oemol)
class Molecule_RDMol(Molecule_respyte):
def addCoordFiles(self, *coordFiles):
#raise NotImplementedError('Will be implemented soon!')
if len(coordFiles) == 0:
print('Skip this molecule? Empty molecule object will be created since no conformers is provided.')
self.mols.append(rdchem.Mol())
self.atomids.append([])
self.elems.append([])
self.resnames.append([])
self.atomnames.append([])
self.resnumbers.append([])
self.listofpolars.append([])
xyzs = []
self.nmols.append(len(xyzs))
indices = []
charge = 0
number = len(self.elems)
resname = 'mol%d' %(number)
chargeinfo = [[indices, resname, charge]]
self.listofchargeinfo.append(chargeinfo)
else:
xyzs = []
listofrdmol = []
firstconf = True
for coordFile in coordFiles:
fbmol = Molecule(coordFile)
xyz = fbmol.xyzs[0]
xyz = np.array(xyz)/bohr2Ang
xyzs.append(xyz)
# Making rdmol using rdkit
rdmol = ReadRdMolFromFile(coordFile)
listofrdmol.append(rdmol)
##########################################################################
### Below is the same with addCoordFiles in Molecule_OEMol ###
if firstconf == True:
firstconf = False
atomicNum = []
elem = fbmol.elem
if 'resid' not in list(fbmol.Data.keys()):
print(' Are you using xyz file? will assing resid, resname,atomname for you!')
resnumber = [1 for i in elem]
resname = list('MOL' for i in elem)
atomname = ['%s%d' % (i,idx+1)for idx, i in enumerate(elem)]
else:
resnumber = fbmol.resid
resname = fbmol.resname
atomname = fbmol.atomname
for elm in elem:
atomicNum.append(list(PeriodicTable.keys()).index(elm) + 1 )
atomid = []
if len(self.atomidinfo) == 0:
atmid = 1
else:
atmid = max(list(self.atomidinfo.keys())) +1
# if resname is 'MOL', assign resname to be moli
if resname == list('MOL' for i in elem):
fnm = Path(coordFile).stem
newresname = fnm.split('_')[0]
print(' Is this file generated from esp_generator? The residue name is MOL, which is a default residue name for small organic molecule.')
print(' It reassigns the name to %s not to confuse with other molecules while forcing symmetry.' % newresname)
resname = list(newresname for i in elem)
num = 1
for res, atom in zip(resname, elem):
val = {'resname': res, 'atomname':'%s%d' %(atom, num) }
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
num += 1
else:
for res, atom in zip(resname, atomname):
val = {'resname': res, 'atomname': atom}
if len(self.atomidinfo) == 0:
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
elif any(val in v for v in list(self.atomidinfo.values())):
for k, v in self.atomidinfo.items():
if val in v:
atomid.append(int(k))
else:
atomid.append(atmid)
self.atomidinfo[atmid] = [val]
atmid += 1
### Above is the same with addCoordFiles in Molecule_OEMol ###
##########################################################################
# Get symmetry class from rdkit
rdchem.AssignStereochemistry(rdmol, cleanIt=True, force=True, flagPossibleStereoCenters=True)
symmetryClass = []
for atom in rdmol.GetAtoms():
symmetryClass.append(int(atom.GetProp('_CIPRank')))
# Get a list of polar atoms from rdkit
listofpolar = []
for atom in rdmol.GetAtoms():
if (atom.GetSymbol() != 'C' and atom.GetSymbol() !='H') or atom.GetIsAromatic():
listofpolar.append(atom.GetIdx())
for bond in atom.GetBonds():
atom2 = bond.GetOtherAtom(atom)
if atom2.GetSymbol() == 'H':
listofpolar.append(atom2.GetIdx())
elif atom2.GetSymbol() == 'C' and str(bond.GetBondType()) != 'SINGLE':
listofpolar.append(atom2.GetIdx())
listofpolar = sorted(set(listofpolar))
##########################################################################
### Below is the same with addCoordFiles in Molecule_OEMol ###
idxof1statm, resnameof1statm = self.getidxof1statm(resnumber, resname)
unique_resid = set(resnameof1statm)
sameresid = [[i for i, v in enumerate(resnameof1statm) if v == value] for value in unique_resid]
sameresid.sort()
#sameresid = self.removeSingleElemList(sameresid)
idxof1statm.append(len(resnumber))
equiv_ids = []
#print('symmetryClass', symmetryClass)
#print('sameresid', sameresid)
for equivresidgroup in sameresid:
resnum = equivresidgroup[0]
listofsym = symmetryClass[idxof1statm[resnum]: idxof1statm[resnum +1]]
#print(listofsym)
unique_sym = set(listofsym)
equiv_sym = [[i+idxof1statm[resnum] for i, v in enumerate(listofsym) if v == value] for value in unique_sym]
equiv_sym = self.removeSingleElemList(equiv_sym)
#print('equiv_sym', equiv_sym)
# change index to ID
equiv_ID = []
for lst in equiv_sym:
newlist = []
for item in lst:
newlist.append(atomid[item])
equiv_ID.append(newlist)
for i in equiv_ID:
i.sort()
equiv_ids.append(i) # weird:\
needtoremove = []
for idx, equiv_id in enumerate(equiv_ids):
if len(set(equiv_id)) == 1:
needtoremove.append(idx)
needtoremove.sort(reverse = True)
for i in needtoremove:
del equiv_ids[i]
if self.inp is not None:
new_charge_equals = self.convert_charge_equal(self.inp.charge_equal, self.atomidinfo)
else:
new_charge_equals = []
equiv_ids_comb = []
for i in equiv_ids:
equiv_ids_comb.append(i)
for i in new_charge_equals:
equiv_ids_comb.append(i)
for i in equiv_ids_comb:
i.sort()
equiv_ids_comb.sort()
newatomid = atomid.copy()
newatomidinfo = copy.deepcopy(self.atomidinfo)
for equiv_id in equiv_ids_comb:
newid = equiv_id[0]
for i in equiv_id[1:]:
newatomid = [newid if x ==i else x for x in newatomid]
for j in self.atomidinfo[i]:
newatomidinfo[newid].append(j)
del newatomidinfo[i]
if self.inp is not None:
if self.inp.symmetry == False:
self.atomids.append(atomid)
self.atomidinfo = self.atomidinfo
else:
self.atomids.append(newatomid)
self.atomidinfo = newatomidinfo
else:
self.atomids.append(newatomid)
self.atomidinfo = newatomidinfo
self.elems.append(atomicNum)
self.resnames.append(resname)
self.atomnames.append(atomname)
self.resnumbers.append(resnumber)
self.listofpolars.append(listofpolar)
if self.inp is not None:
chargeinfo = self.gen_chargeinfo(self.inp.resChargeDict, newatomid, self.atomidinfo, resnumber)
else:
indices = list(range(len(elem)))
charge = None
number = len(self.elems)+1
resname = 'mol%d' %(number)
chargeinfo = [[indices, resname, charge]]
self.listofchargeinfo.append(chargeinfo)
self.nmols.append(len(xyzs))
for xyz in xyzs:
self.xyzs.append(xyz)
for rdmol in listofrdmol:
self.mols.append(rdmol)
### Above is the same with addCoordFiles in Molecule_OEMol ###
##########################################################################
def main():
# cwd = current working directory in which input folder exists
cwd = os.getcwd()
# read respyte.yml
inp = Input('%s/input/respyte.yml' % (cwd))
# Create molecule object
if inp.cheminformatics == 'openeye':
molecule = Molecule_OEMol()
elif inp.cheminformatics == 'rdkit':
molecule = Molecule_RDMol()
else:
molecule = Molecule_respyte()
molecule.addInp(inp)
for idx, i in enumerate(inp.nmols):
molN = 'mol%d' % (idx+1)
wkd = '%s/input/molecules/%s' % (cwd, molN)
coordfilepath = []
espffilepath = []
for j in range(i):
confN = 'conf%d' % (j+1)
path = wkd + '/%s' % (confN)
pdbfile = path + '/%s_%s.pdb' % (molN, confN)
mol2file = path + '/%s_%s.mol2' % (molN, confN)
xyzfile = path + '/%s_%s.xyz' % (molN, confN)
if os.path.isfile(pdbfile):
coordpath = pdbfile
coordfilepath.append(coordpath)
elif os.path.isfile(mol2file):
coordpath = mol2file
coordfilepath.append(coordpath)
elif os.path.isfile(xyzfile):
coordpath = xyzfile
coordfilepath.append(coordpath)
print('This folder doesn not contain pdb or mol2 file format. ')
else:
raise RuntimeError(" Coordinate file should have pdb or mol2 file format! ")
espfpath = path + '/%s_%s.espf' %(molN, confN)
if not os.path.isfile(espfpath):
raise RuntimeError('%s file doesnt exist!!! '% espfpath)
else:
espffilepath.append(espfpath)
molecule.addCoordFiles(*coordfilepath)
# print('-----------------------------------------------------------------------------------------')
# print(' ## Check molecule information of %s ' % molN )
# print('-----------------------------------------------------------------------------------------')
# #print('elems: ', molecule.elems,'\n')
# print('atom ids: ', molecule.atomids,'\n')
# #print('resnames: ',molecule.resnames,'\n')
# print('polar atoms: ', molecule.listofpolars[-1],'\n')
# #print('charge info: ',molecule.listofchargeinfo[-1],'\n')
# #print('atomidinfo', molecule.atomidinfo)
#print('-----------------------------------------------------------------------------------------')
# print(molecule.nmols, len(molecule.nmols))
# print(len(molecule.elems), len(molecule.atomids), len(molecule.resnames), len(molecule.listofpolars), len(molecule.xyzs))
for idx, i in enumerate(molecule.nmols):
print('molecule %i' %(idx+1))
print(molecule.listofpolars[idx])
if __name__ == '__main__':
main()
| [
"copy.deepcopy",
"openeye.oechem.OEPerceiveSymmetry",
"os.getcwd",
"respyte.readinp_resp.Input",
"openeye.oechem.OEAssignHybridization",
"openeye.oechem.oemolistream",
"openeye.oechem.OEGraphMol",
"os.path.isfile",
"openeye.oechem.OEMol",
"numpy.array",
"pathlib.Path",
"openeye.oechem.OEReadMo... | [((37108, 37119), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (37117, 37119), False, 'import os, sys, copy\n'), ((37153, 37188), 'respyte.readinp_resp.Input', 'Input', (["('%s/input/respyte.yml' % cwd)"], {}), "('%s/input/respyte.yml' % cwd)\n", (37158, 37188), False, 'from respyte.readinp_resp import Input\n'), ((233, 345), 'warnings.warn', 'warn', (['""" The Openeye module cannot be imported. ( Please provide equivGoups and listofpolar manually.)"""'], {}), "(\n ' The Openeye module cannot be imported. ( Please provide equivGoups and listofpolar manually.)'\n )\n", (237, 345), False, 'from warnings import warn\n'), ((15155, 15172), 'numpy.array', 'np.array', (['gridxyz'], {}), '(gridxyz)\n', (15163, 15172), True, 'import numpy as np\n'), ((15194, 15210), 'numpy.array', 'np.array', (['espval'], {}), '(espval)\n', (15202, 15210), True, 'import numpy as np\n'), ((15231, 15246), 'numpy.array', 'np.array', (['efval'], {}), '(efval)\n', (15239, 15246), True, 'import numpy as np\n'), ((37922, 37945), 'os.path.isfile', 'os.path.isfile', (['pdbfile'], {}), '(pdbfile)\n', (37936, 37945), False, 'import os, sys, copy\n'), ((15654, 15668), 'openeye.oechem.OEMol', 'oechem.OEMol', ([], {}), '()\n', (15666, 15668), True, 'import openeye.oechem as oechem\n'), ((16645, 16675), 'openeye.oechem.oemolistream', 'oechem.oemolistream', (['coordFile'], {}), '(coordFile)\n', (16664, 16675), True, 'import openeye.oechem as oechem\n'), ((16700, 16719), 'openeye.oechem.OEGraphMol', 'oechem.OEGraphMol', ([], {}), '()\n', (16717, 16719), True, 'import openeye.oechem as oechem\n'), ((16736, 16769), 'openeye.oechem.OEReadMolecule', 'oechem.OEReadMolecule', (['ifs', 'oemol'], {}), '(ifs, oemol)\n', (16757, 16769), True, 'import openeye.oechem as oechem\n'), ((16828, 16860), 'openeye.oechem.OEPerceiveSymmetry', 'oechem.OEPerceiveSymmetry', (['oemol'], {}), '(oemol)\n', (16853, 16860), True, 'import openeye.oechem as oechem\n'), ((38048, 38072), 'os.path.isfile', 'os.path.isfile', (['mol2file'], {}), '(mol2file)\n', (38062, 38072), False, 'import os, sys, copy\n'), ((38556, 38580), 'os.path.isfile', 'os.path.isfile', (['espfpath'], {}), '(espfpath)\n', (38570, 38580), False, 'import os, sys, copy\n'), ((8501, 8514), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (8509, 8514), True, 'import numpy as np\n'), ((16470, 16483), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (16478, 16483), True, 'import numpy as np\n'), ((19872, 19907), 'openeye.oechem.OEAssignHybridization', 'oechem.OEAssignHybridization', (['oemol'], {}), '(oemol)\n', (19900, 19907), True, 'import openeye.oechem as oechem\n'), ((24336, 24366), 'copy.deepcopy', 'copy.deepcopy', (['self.atomidinfo'], {}), '(self.atomidinfo)\n', (24349, 24366), False, 'import os, sys, copy\n'), ((27657, 27670), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (27665, 27670), True, 'import numpy as np\n'), ((34939, 34969), 'copy.deepcopy', 'copy.deepcopy', (['self.atomidinfo'], {}), '(self.atomidinfo)\n', (34952, 34969), False, 'import os, sys, copy\n'), ((38176, 38199), 'os.path.isfile', 'os.path.isfile', (['xyzfile'], {}), '(xyzfile)\n', (38190, 38199), False, 'import os, sys, copy\n'), ((9735, 9750), 'pathlib.Path', 'Path', (['coordFile'], {}), '(coordFile)\n', (9739, 9750), False, 'from pathlib import Path\n'), ((18041, 18056), 'pathlib.Path', 'Path', (['coordFile'], {}), '(coordFile)\n', (18045, 18056), False, 'from pathlib import Path\n'), ((29212, 29227), 'pathlib.Path', 'Path', (['coordFile'], {}), '(coordFile)\n', (29216, 29227), False, 'from pathlib import Path\n')] |
import pandas as pd
import numpy as np
import re
from tensorflow import keras
from sklearn.feature_extraction.text import TfidfVectorizer
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import codecs
def read_glove_vecs(glove_file):
with open(glove_file, 'r', encoding='utf-8') as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
curr_word = line[0]
words.add(curr_word)
word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
return words, word_to_vec_map
class GloveWrapper:
def __init__(self, stop_words_l: list):
"""
:param stop_words_l: words to be cleaned in
"""
self.stop_words_l = stop_words_l
# reading Glove word embeddings into a dictionary with "word" as key and values as word vectors
_, self.embeddings_index = read_glove_vecs(glove_file='../../glove.6B/glove.6B.50d.txt')
def get_embeddings(self, doc_list: list):
"""
:param doc_list: list of document string
:return: vector of words
"""
documents_df = pd.DataFrame(doc_list, columns=['documents'])
documents_df['documents_cleaned'] = documents_df.documents.apply(lambda x: " ".join(
re.sub(r'[^a-zA-Z]', ' ', w).lower() for w in x.split() if
re.sub(r'[^a-zA-Z]', ' ', w).lower() not in self.stop_words_l))
tokenizer = Tokenizer()
tokenizer.fit_on_texts(documents_df.documents_cleaned)
tokenized_documents = tokenizer.texts_to_sequences(documents_df.documents_cleaned)
tokenized_paded_documents = pad_sequences(tokenized_documents, maxlen=64, padding='post')
vocab_size = len(tokenizer.word_index) + 1
# creating embedding matrix
# every row is a vector representation from the vocabulary indexed by the tokenizer index.
embedding_matrix = np.zeros((vocab_size, 50))
for word, i in tokenizer.word_index.items():
embedding_vector = self.embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# prepare tfidf things
tfidf_vectoriser = TfidfVectorizer(max_features=50)
tfidf_vectoriser.fit(documents_df.documents_cleaned)
tfidf_vectors = tfidf_vectoriser.transform(documents_df.documents_cleaned).toarray()
# calculating average of word vectors of a document weighted by tf-idf
document_embeddings = np.zeros((len(tokenized_paded_documents), 50))
words = tfidf_vectoriser.get_feature_names()
# instead of creating document-word embeddings, directly creating document embeddings
for i in range(documents_df.shape[0]):
for j in range(len(words)):
document_embeddings[i] += embedding_matrix[tokenizer.word_index[words[j]]] * tfidf_vectors[i][j]
return document_embeddings
| [
"pandas.DataFrame",
"tensorflow.keras.preprocessing.text.Tokenizer",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.zeros",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"numpy.array",
"re.sub"
] | [((1220, 1265), 'pandas.DataFrame', 'pd.DataFrame', (['doc_list'], {'columns': "['documents']"}), "(doc_list, columns=['documents'])\n", (1232, 1265), True, 'import pandas as pd\n'), ((1527, 1538), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (1536, 1538), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((1729, 1790), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['tokenized_documents'], {'maxlen': '(64)', 'padding': '"""post"""'}), "(tokenized_documents, maxlen=64, padding='post')\n", (1742, 1790), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2005, 2031), 'numpy.zeros', 'np.zeros', (['(vocab_size, 50)'], {}), '((vocab_size, 50))\n', (2013, 2031), True, 'import numpy as np\n'), ((2308, 2340), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(50)'}), '(max_features=50)\n', (2323, 2340), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((588, 624), 'numpy.array', 'np.array', (['line[1:]'], {'dtype': 'np.float64'}), '(line[1:], dtype=np.float64)\n', (596, 624), True, 'import numpy as np\n'), ((1371, 1398), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'w'], {}), "('[^a-zA-Z]', ' ', w)\n", (1377, 1398), False, 'import re\n'), ((1442, 1469), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'w'], {}), "('[^a-zA-Z]', ' ', w)\n", (1448, 1469), False, 'import re\n')] |
import os
import json
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose import with_setup
from nose.tools import assert_equal, assert_not_equal, assert_true
import nibabel
from nilearn.datasets import utils
from nilearn.datasets.tests import test_utils as tst
from nilearn._utils.niimg_conversions import check_niimg
from sammba.data_fetchers import atlas
from sammba import testing_data
def setup_mock():
return tst.setup_mock(utils, atlas)
def teardown_mock():
return tst.teardown_mock(utils, atlas)
@with_setup(setup_mock, teardown_mock)
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fetch_atlas_dorr_2008():
datadir = os.path.join(tst.tmpdir, 'dorr_2008')
os.mkdir(datadir)
dummy = open(os.path.join(
datadir, 'c57_brain_atlas_labels.csv'), 'w')
dummy.write("\n1,amygdala,51,151\n27,fourth ventricle,118,118")
dummy.close()
# Default resolution
bunch = atlas.fetch_atlas_dorr_2008(data_dir=tst.tmpdir, verbose=0)
assert_equal(len(tst.mock_url_request.urls), 2)
assert_equal(bunch['t2'],
os.path.join(datadir, 'Dorr_2008_average.nii.gz'))
assert_equal(bunch['maps'],
os.path.join(datadir, 'Dorr_2008_labels.nii.gz'))
# test resampling
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
anat_img = check_niimg(anat_file)
anat_img.to_filename(bunch['t2'])
anat_img = check_niimg(anat_file, dtype=int)
anat_img.to_filename(bunch['maps'])
bunch = atlas.fetch_atlas_dorr_2008(
data_dir=tst.tmpdir, verbose=0, downsample='100')
assert_equal(bunch['t2'],
os.path.join(datadir, 'Dorr_2008_average_100um.nii.gz'))
assert_equal(bunch['maps'],
os.path.join(datadir, 'Dorr_2008_labels_100um.nii.gz'))
assert_array_almost_equal(nibabel.load(bunch['t2']).header.get_zooms(),
(.1, .1, .1))
assert_array_almost_equal(nibabel.load(bunch['maps']).header.get_zooms(),
(.1, .1, .1))
assert_equal(nibabel.load(bunch['maps']).get_data().dtype, np.dtype(int))
assert_equal(len(tst.mock_url_request.urls), 2)
assert_equal(len(bunch['names']), 3)
assert_equal(len(bunch['labels']), 3)
# test with 'minc' format
bunch = atlas.fetch_atlas_dorr_2008(data_dir=tst.tmpdir, verbose=0,
image_format='minc')
assert_equal(len(tst.mock_url_request.urls), 4)
assert_equal(bunch['t2'],
os.path.join(datadir, 'male-female-mouse-atlas.mnc'))
assert_equal(bunch['maps'],
os.path.join(datadir, 'c57_fixed_labels_resized.mnc'))
assert_equal(len(bunch['names']), 3)
assert_equal(len(bunch['labels']), 3)
assert_not_equal(bunch.description, '')
@with_setup(setup_mock, teardown_mock)
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fetch_masks_dorr_2008():
# create a dummy csv file for dorr labels
datadir = os.path.join(tst.tmpdir, 'dorr_2008')
os.mkdir(datadir)
dummy_csv = open(os.path.join(
datadir, 'c57_brain_atlas_labels.csv'), 'w')
dummy_csv.write("Structure,right label,left label"
"\n1,amygdala,51,151\n19,corpus callosum,8,68"
"\n27,fourth ventricle,118,118"
"\n38,lateral ventricle,57,77")
dummy_csv.close()
dorr_atlas = atlas.fetch_atlas_dorr_2008(data_dir=tst.tmpdir, verbose=0)
# create a dummy atlas image
dummy_atlas_data = np.zeros((100, 100, 100))
dummy_atlas_data[:10, :10, :10] = 51
dummy_atlas_data[50:90, 50:90, 50:90] = 151
dummy_atlas_data[10:20, :10, 10:30] = 8
dummy_atlas_data[90:100, :10, 10:30] = 68
dummy_atlas_data[10:20, 50:90, 10:20] = 118
dummy_atlas_data[40:60, 30:40, 40:50] = 57
dummy_atlas_data[60:70, 30:40, 40:50] = 77
dummy_atlas_img = nibabel.Nifti1Image(dummy_atlas_data, np.eye(4))
dummy_atlas_img.to_filename(dorr_atlas.maps)
dorr_masks = atlas.fetch_masks_dorr_2008(data_dir=tst.tmpdir, verbose=0)
assert_true(os.path.isfile(dorr_masks.brain))
assert_true(os.path.isfile(dorr_masks.gm))
assert_true(os.path.isfile(dorr_masks.cc))
assert_true(os.path.isfile(dorr_masks.ventricles))
@with_setup(setup_mock, teardown_mock)
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fetch_atlas_waxholm_rat_2014():
datadir = os.path.join(tst.tmpdir, 'waxholm_rat_2014')
os.mkdir(datadir)
# Create dummy json labels file
json_filename = os.path.join(datadir, 'WHS_SD_rat_labels.json')
with open(json_filename, 'w') as json_content:
json.dump({"216": "Spinal cord"}, json_content)
# default resolution
bunch = atlas.fetch_atlas_waxholm_rat_2014(
data_dir=tst.tmpdir, verbose=0)
assert_equal(
bunch['t2star'],
os.path.join(datadir, 'WHS_SD_rat_T2star_v1_01_downsample3.nii.gz'))
assert_equal(
bunch['maps'],
os.path.join(datadir, 'WHS_SD_rat_atlas_v1_01_downsample3.nii.gz'))
assert_equal(len(tst.mock_url_request.urls), 2)
assert_not_equal(bunch.description, '')
# Downsampled 2 times
bunch = atlas.fetch_atlas_waxholm_rat_2014(
data_dir=tst.tmpdir, verbose=0, downsample='78')
assert_equal(
bunch['t2star'],
os.path.join(datadir, 'WHS_SD_rat_T2star_v1_01_downsample2.nii.gz'))
assert_equal(
bunch['maps'],
os.path.join(datadir, 'WHS_SD_rat_atlas_v1_01_downsample2.nii.gz'))
assert_equal(len(tst.mock_url_request.urls), 4)
# test resampling
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
anat_img = check_niimg(anat_file)
anat_img.to_filename(bunch['t2star'])
anat_img = check_niimg(anat_file, dtype=int)
anat_img.to_filename(bunch['maps'])
bunch = atlas.fetch_atlas_waxholm_rat_2014(
data_dir=tst.tmpdir, verbose=0, downsample='200')
assert_equal(len(tst.mock_url_request.urls), 4)
assert_equal(
bunch['t2star'],
os.path.join(datadir, 'WHS_SD_rat_T2star_v1_01_200um.nii.gz'))
assert_equal(
bunch['maps'],
os.path.join(datadir, 'WHS_SD_rat_atlas_v1_01_200um.nii.gz'))
assert_array_almost_equal(nibabel.load(bunch['t2star']).header.get_zooms(),
(.2, .2, .2))
assert_array_almost_equal(nibabel.load(bunch['maps']).header.get_zooms(),
(.2, .2, .2))
| [
"os.mkdir",
"json.dump",
"sammba.data_fetchers.atlas.fetch_atlas_waxholm_rat_2014",
"nibabel.load",
"nilearn._utils.niimg_conversions.check_niimg",
"sammba.data_fetchers.atlas.fetch_masks_dorr_2008",
"os.path.dirname",
"numpy.dtype",
"numpy.zeros",
"nilearn.datasets.tests.test_utils.setup_mock",
... | [((548, 585), 'nose.with_setup', 'with_setup', (['setup_mock', 'teardown_mock'], {}), '(setup_mock, teardown_mock)\n', (558, 585), False, 'from nose import with_setup\n'), ((587, 638), 'nose.with_setup', 'with_setup', (['tst.setup_tmpdata', 'tst.teardown_tmpdata'], {}), '(tst.setup_tmpdata, tst.teardown_tmpdata)\n', (597, 638), False, 'from nose import with_setup\n'), ((2880, 2917), 'nose.with_setup', 'with_setup', (['setup_mock', 'teardown_mock'], {}), '(setup_mock, teardown_mock)\n', (2890, 2917), False, 'from nose import with_setup\n'), ((2919, 2970), 'nose.with_setup', 'with_setup', (['tst.setup_tmpdata', 'tst.teardown_tmpdata'], {}), '(tst.setup_tmpdata, tst.teardown_tmpdata)\n', (2929, 2970), False, 'from nose import with_setup\n'), ((4342, 4379), 'nose.with_setup', 'with_setup', (['setup_mock', 'teardown_mock'], {}), '(setup_mock, teardown_mock)\n', (4352, 4379), False, 'from nose import with_setup\n'), ((4381, 4432), 'nose.with_setup', 'with_setup', (['tst.setup_tmpdata', 'tst.teardown_tmpdata'], {}), '(tst.setup_tmpdata, tst.teardown_tmpdata)\n', (4391, 4432), False, 'from nose import with_setup\n'), ((450, 478), 'nilearn.datasets.tests.test_utils.setup_mock', 'tst.setup_mock', (['utils', 'atlas'], {}), '(utils, atlas)\n', (464, 478), True, 'from nilearn.datasets.tests import test_utils as tst\n'), ((513, 544), 'nilearn.datasets.tests.test_utils.teardown_mock', 'tst.teardown_mock', (['utils', 'atlas'], {}), '(utils, atlas)\n', (530, 544), True, 'from nilearn.datasets.tests import test_utils as tst\n'), ((687, 724), 'os.path.join', 'os.path.join', (['tst.tmpdir', '"""dorr_2008"""'], {}), "(tst.tmpdir, 'dorr_2008')\n", (699, 724), False, 'import os\n'), ((729, 746), 'os.mkdir', 'os.mkdir', (['datadir'], {}), '(datadir)\n', (737, 746), False, 'import os\n'), ((955, 1014), 'sammba.data_fetchers.atlas.fetch_atlas_dorr_2008', 'atlas.fetch_atlas_dorr_2008', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)'}), '(data_dir=tst.tmpdir, verbose=0)\n', (982, 1014), False, 'from sammba.data_fetchers import atlas\n'), ((1415, 1437), 'nilearn._utils.niimg_conversions.check_niimg', 'check_niimg', (['anat_file'], {}), '(anat_file)\n', (1426, 1437), False, 'from nilearn._utils.niimg_conversions import check_niimg\n'), ((1491, 1524), 'nilearn._utils.niimg_conversions.check_niimg', 'check_niimg', (['anat_file'], {'dtype': 'int'}), '(anat_file, dtype=int)\n', (1502, 1524), False, 'from nilearn._utils.niimg_conversions import check_niimg\n'), ((1578, 1655), 'sammba.data_fetchers.atlas.fetch_atlas_dorr_2008', 'atlas.fetch_atlas_dorr_2008', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)', 'downsample': '"""100"""'}), "(data_dir=tst.tmpdir, verbose=0, downsample='100')\n", (1605, 1655), False, 'from sammba.data_fetchers import atlas\n'), ((2372, 2457), 'sammba.data_fetchers.atlas.fetch_atlas_dorr_2008', 'atlas.fetch_atlas_dorr_2008', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)', 'image_format': '"""minc"""'}), "(data_dir=tst.tmpdir, verbose=0, image_format='minc'\n )\n", (2399, 2457), False, 'from sammba.data_fetchers import atlas\n'), ((2837, 2876), 'nose.tools.assert_not_equal', 'assert_not_equal', (['bunch.description', '""""""'], {}), "(bunch.description, '')\n", (2853, 2876), False, 'from nose.tools import assert_equal, assert_not_equal, assert_true\n'), ((3065, 3102), 'os.path.join', 'os.path.join', (['tst.tmpdir', '"""dorr_2008"""'], {}), "(tst.tmpdir, 'dorr_2008')\n", (3077, 3102), False, 'import os\n'), ((3107, 3124), 'os.mkdir', 'os.mkdir', (['datadir'], {}), '(datadir)\n', (3115, 3124), False, 'import os\n'), ((3478, 3537), 'sammba.data_fetchers.atlas.fetch_atlas_dorr_2008', 'atlas.fetch_atlas_dorr_2008', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)'}), '(data_dir=tst.tmpdir, verbose=0)\n', (3505, 3537), False, 'from sammba.data_fetchers import atlas\n'), ((3595, 3620), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (3603, 3620), True, 'import numpy as np\n'), ((4080, 4139), 'sammba.data_fetchers.atlas.fetch_masks_dorr_2008', 'atlas.fetch_masks_dorr_2008', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)'}), '(data_dir=tst.tmpdir, verbose=0)\n', (4107, 4139), False, 'from sammba.data_fetchers import atlas\n'), ((4488, 4532), 'os.path.join', 'os.path.join', (['tst.tmpdir', '"""waxholm_rat_2014"""'], {}), "(tst.tmpdir, 'waxholm_rat_2014')\n", (4500, 4532), False, 'import os\n'), ((4537, 4554), 'os.mkdir', 'os.mkdir', (['datadir'], {}), '(datadir)\n', (4545, 4554), False, 'import os\n'), ((4611, 4658), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_labels.json"""'], {}), "(datadir, 'WHS_SD_rat_labels.json')\n", (4623, 4658), False, 'import os\n'), ((4804, 4870), 'sammba.data_fetchers.atlas.fetch_atlas_waxholm_rat_2014', 'atlas.fetch_atlas_waxholm_rat_2014', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)'}), '(data_dir=tst.tmpdir, verbose=0)\n', (4838, 4870), False, 'from sammba.data_fetchers import atlas\n'), ((5174, 5213), 'nose.tools.assert_not_equal', 'assert_not_equal', (['bunch.description', '""""""'], {}), "(bunch.description, '')\n", (5190, 5213), False, 'from nose.tools import assert_equal, assert_not_equal, assert_true\n'), ((5253, 5340), 'sammba.data_fetchers.atlas.fetch_atlas_waxholm_rat_2014', 'atlas.fetch_atlas_waxholm_rat_2014', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)', 'downsample': '"""78"""'}), "(data_dir=tst.tmpdir, verbose=0,\n downsample='78')\n", (5287, 5340), False, 'from sammba.data_fetchers import atlas\n'), ((5786, 5808), 'nilearn._utils.niimg_conversions.check_niimg', 'check_niimg', (['anat_file'], {}), '(anat_file)\n', (5797, 5808), False, 'from nilearn._utils.niimg_conversions import check_niimg\n'), ((5866, 5899), 'nilearn._utils.niimg_conversions.check_niimg', 'check_niimg', (['anat_file'], {'dtype': 'int'}), '(anat_file, dtype=int)\n', (5877, 5899), False, 'from nilearn._utils.niimg_conversions import check_niimg\n'), ((5952, 6040), 'sammba.data_fetchers.atlas.fetch_atlas_waxholm_rat_2014', 'atlas.fetch_atlas_waxholm_rat_2014', ([], {'data_dir': 'tst.tmpdir', 'verbose': '(0)', 'downsample': '"""200"""'}), "(data_dir=tst.tmpdir, verbose=0,\n downsample='200')\n", (5986, 6040), False, 'from sammba.data_fetchers import atlas\n'), ((764, 815), 'os.path.join', 'os.path.join', (['datadir', '"""c57_brain_atlas_labels.csv"""'], {}), "(datadir, 'c57_brain_atlas_labels.csv')\n", (776, 815), False, 'import os\n'), ((1114, 1163), 'os.path.join', 'os.path.join', (['datadir', '"""Dorr_2008_average.nii.gz"""'], {}), "(datadir, 'Dorr_2008_average.nii.gz')\n", (1126, 1163), False, 'import os\n'), ((1214, 1262), 'os.path.join', 'os.path.join', (['datadir', '"""Dorr_2008_labels.nii.gz"""'], {}), "(datadir, 'Dorr_2008_labels.nii.gz')\n", (1226, 1262), False, 'import os\n'), ((1316, 1354), 'os.path.dirname', 'os.path.dirname', (['testing_data.__file__'], {}), '(testing_data.__file__)\n', (1331, 1354), False, 'import os\n'), ((1712, 1767), 'os.path.join', 'os.path.join', (['datadir', '"""Dorr_2008_average_100um.nii.gz"""'], {}), "(datadir, 'Dorr_2008_average_100um.nii.gz')\n", (1724, 1767), False, 'import os\n'), ((1818, 1872), 'os.path.join', 'os.path.join', (['datadir', '"""Dorr_2008_labels_100um.nii.gz"""'], {}), "(datadir, 'Dorr_2008_labels_100um.nii.gz')\n", (1830, 1872), False, 'import os\n'), ((2179, 2192), 'numpy.dtype', 'np.dtype', (['int'], {}), '(int)\n', (2187, 2192), True, 'import numpy as np\n'), ((2592, 2644), 'os.path.join', 'os.path.join', (['datadir', '"""male-female-mouse-atlas.mnc"""'], {}), "(datadir, 'male-female-mouse-atlas.mnc')\n", (2604, 2644), False, 'import os\n'), ((2695, 2748), 'os.path.join', 'os.path.join', (['datadir', '"""c57_fixed_labels_resized.mnc"""'], {}), "(datadir, 'c57_fixed_labels_resized.mnc')\n", (2707, 2748), False, 'import os\n'), ((3146, 3197), 'os.path.join', 'os.path.join', (['datadir', '"""c57_brain_atlas_labels.csv"""'], {}), "(datadir, 'c57_brain_atlas_labels.csv')\n", (3158, 3197), False, 'import os\n'), ((4002, 4011), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4008, 4011), True, 'import numpy as np\n'), ((4156, 4188), 'os.path.isfile', 'os.path.isfile', (['dorr_masks.brain'], {}), '(dorr_masks.brain)\n', (4170, 4188), False, 'import os\n'), ((4206, 4235), 'os.path.isfile', 'os.path.isfile', (['dorr_masks.gm'], {}), '(dorr_masks.gm)\n', (4220, 4235), False, 'import os\n'), ((4253, 4282), 'os.path.isfile', 'os.path.isfile', (['dorr_masks.cc'], {}), '(dorr_masks.cc)\n', (4267, 4282), False, 'import os\n'), ((4300, 4337), 'os.path.isfile', 'os.path.isfile', (['dorr_masks.ventricles'], {}), '(dorr_masks.ventricles)\n', (4314, 4337), False, 'import os\n'), ((4718, 4765), 'json.dump', 'json.dump', (["{'216': 'Spinal cord'}", 'json_content'], {}), "({'216': 'Spinal cord'}, json_content)\n", (4727, 4765), False, 'import json\n'), ((4931, 4998), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_T2star_v1_01_downsample3.nii.gz"""'], {}), "(datadir, 'WHS_SD_rat_T2star_v1_01_downsample3.nii.gz')\n", (4943, 4998), False, 'import os\n'), ((5049, 5115), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_atlas_v1_01_downsample3.nii.gz"""'], {}), "(datadir, 'WHS_SD_rat_atlas_v1_01_downsample3.nii.gz')\n", (5061, 5115), False, 'import os\n'), ((5397, 5464), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_T2star_v1_01_downsample2.nii.gz"""'], {}), "(datadir, 'WHS_SD_rat_T2star_v1_01_downsample2.nii.gz')\n", (5409, 5464), False, 'import os\n'), ((5515, 5581), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_atlas_v1_01_downsample2.nii.gz"""'], {}), "(datadir, 'WHS_SD_rat_atlas_v1_01_downsample2.nii.gz')\n", (5527, 5581), False, 'import os\n'), ((5687, 5725), 'os.path.dirname', 'os.path.dirname', (['testing_data.__file__'], {}), '(testing_data.__file__)\n', (5702, 5725), False, 'import os\n'), ((6149, 6210), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_T2star_v1_01_200um.nii.gz"""'], {}), "(datadir, 'WHS_SD_rat_T2star_v1_01_200um.nii.gz')\n", (6161, 6210), False, 'import os\n'), ((6261, 6321), 'os.path.join', 'os.path.join', (['datadir', '"""WHS_SD_rat_atlas_v1_01_200um.nii.gz"""'], {}), "(datadir, 'WHS_SD_rat_atlas_v1_01_200um.nii.gz')\n", (6273, 6321), False, 'import os\n'), ((1904, 1929), 'nibabel.load', 'nibabel.load', (["bunch['t2']"], {}), "(bunch['t2'])\n", (1916, 1929), False, 'import nibabel\n'), ((2024, 2051), 'nibabel.load', 'nibabel.load', (["bunch['maps']"], {}), "(bunch['maps'])\n", (2036, 2051), False, 'import nibabel\n'), ((2133, 2160), 'nibabel.load', 'nibabel.load', (["bunch['maps']"], {}), "(bunch['maps'])\n", (2145, 2160), False, 'import nibabel\n'), ((6353, 6382), 'nibabel.load', 'nibabel.load', (["bunch['t2star']"], {}), "(bunch['t2star'])\n", (6365, 6382), False, 'import nibabel\n'), ((6477, 6504), 'nibabel.load', 'nibabel.load', (["bunch['maps']"], {}), "(bunch['maps'])\n", (6489, 6504), False, 'import nibabel\n')] |
# encoding: utf-8
"""
Description: A python 2.7 implementation of gcForest proposed in [1]. A demo implementation of gcForest library as well as some demo client scripts to demostrate how to use the code. The implementation is flexible enough for modifying the model or
fit your own datasets.
Reference: [1] <NAME> and <NAME>. Deep Forest: Towards an Alternative to Deep Neural Networks. In IJCAI-2017. (https://arxiv.org/abs/1702.08835v2 )
Requirements: This package is developed with Python 2.7, please make sure all the demendencies are installed, which is specified in requirements.txt
ATTN: This package is free for academic usage. You can run it at your own risk. For other purposes, please contact Prof. <NAME>(<EMAIL>)
ATTN2: This package was developed by <NAME>(<EMAIL>). The readme file and demo roughly explains how to use the codes. For any problem concerning the codes, please feel free to contact Mr.Feng.
"""
import numpy as np
from keras.datasets import imdb
from keras.preprocessing import sequence
from .ds_base import ds_base
"""
X_train.len: min,mean,max=11,238,2494
X_test.len: min,mean,max=7,230,2315
"""
class IMDB(ds_base):
def __init__(self, feature='tfidf', **kwargs):
super(IMDB, self).__init__(**kwargs)
if self.conf is not None:
feature = self.conf.get('feature', 'tfidf')
if feature.startswith('tfidf'):
max_features = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
else:
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=None,
skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3)
X, y = self.get_data_by_imageset(X_train, y_train, X_test, y_test)
print('data_set={}, Average sequence length: {}'.format(self.data_set, np.mean(list(map(len, X)))))
#feature
if feature == 'origin':
maxlen = 400
X = sequence.pad_sequences(X, maxlen=maxlen)
elif feature == 'tfidf':
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer(smooth_idf=False)
#transformer = TfidfTransformer(smooth_idf=True)
X_train_bin = np.zeros((len(X_train), max_features), dtype=np.int16)
X_bin = np.zeros((len(X), max_features), dtype=np.int16)
for i, X_i in enumerate(X_train):
X_train_bin[i, :] = np.bincount(X_i, minlength=max_features)
for i, X_i in enumerate(X):
X_bin[i, :] = np.bincount(X_i, minlength=max_features)
transformer.fit(X_train_bin)
X = transformer.transform(X_bin)
X = np.asarray(X.todense())
elif feature == 'tfidf_seq':
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer(smooth_idf=False)
maxlen = 400
N = len(X)
X_bin = np.zeros((N, max_features), dtype=np.int16)
for i, X_i in enumerate(X):
X_bin_i = np.bincount(X_i)
X_bin[i, :len(X_bin_i)] = X_bin_i
tfidf = transformer.fit_transform(X_bin)
tfidf = np.asarray(tfidf.todense())
X_id = sequence.pad_sequences(X, maxlen=maxlen)
X = np.zeros(X_id.shape, dtype=np.float32)
for i in range(N):
X[i, :] = tfidf[i][X_id[i]]
else:
raise ValueError('Unkown feature: ', feature)
X = X[:,np.newaxis,:,np.newaxis]
self.X = self.init_layout_X(X)
self.y = self.init_layout_y(y)
| [
"keras.preprocessing.sequence.pad_sequences",
"numpy.zeros",
"numpy.bincount",
"sklearn.feature_extraction.text.TfidfTransformer",
"keras.datasets.imdb.load_data"
] | [((1462, 1499), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'nb_words': 'max_features'}), '(nb_words=max_features)\n', (1476, 1499), False, 'from keras.datasets import imdb\n'), ((1565, 1674), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'nb_words': 'None', 'skip_top': '(0)', 'maxlen': 'None', 'seed': '(113)', 'start_char': '(1)', 'oov_char': '(2)', 'index_from': '(3)'}), '(nb_words=None, skip_top=0, maxlen=None, seed=113, start_char\n =1, oov_char=2, index_from=3)\n', (1579, 1674), False, 'from keras.datasets import imdb\n'), ((1965, 2005), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X'], {'maxlen': 'maxlen'}), '(X, maxlen=maxlen)\n', (1987, 2005), False, 'from keras.preprocessing import sequence\n'), ((2138, 2172), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(False)'}), '(smooth_idf=False)\n', (2154, 2172), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((2466, 2506), 'numpy.bincount', 'np.bincount', (['X_i'], {'minlength': 'max_features'}), '(X_i, minlength=max_features)\n', (2477, 2506), True, 'import numpy as np\n'), ((2577, 2617), 'numpy.bincount', 'np.bincount', (['X_i'], {'minlength': 'max_features'}), '(X_i, minlength=max_features)\n', (2588, 2617), True, 'import numpy as np\n'), ((2880, 2914), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(False)'}), '(smooth_idf=False)\n', (2896, 2914), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((2983, 3026), 'numpy.zeros', 'np.zeros', (['(N, max_features)'], {'dtype': 'np.int16'}), '((N, max_features), dtype=np.int16)\n', (2991, 3026), True, 'import numpy as np\n'), ((3280, 3320), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X'], {'maxlen': 'maxlen'}), '(X, maxlen=maxlen)\n', (3302, 3320), False, 'from keras.preprocessing import sequence\n'), ((3337, 3375), 'numpy.zeros', 'np.zeros', (['X_id.shape'], {'dtype': 'np.float32'}), '(X_id.shape, dtype=np.float32)\n', (3345, 3375), True, 'import numpy as np\n'), ((3093, 3109), 'numpy.bincount', 'np.bincount', (['X_i'], {}), '(X_i)\n', (3104, 3109), True, 'import numpy as np\n')] |
import runway
import tensorflow as tf
import numpy as np
import sys
import json
import sys
import os
from glob import glob
from lm.modeling import GroverModel, GroverConfig, _top_p_sample, sample
from sample.encoder import get_encoder, format_context, _tokenize_article_pieces, extract_generated_target
import argparse
parser = argparse.ArgumentParser(description='Contextual generation (aka given some metadata we will generate articles')
parser.add_argument(
'-metadata_fn',
dest='metadata_fn',
type=str,
help='Path to a JSONL containing metadata',
)
parser.add_argument(
'-out_fn',
dest='out_fn',
type=str,
help='Out jsonl, which will contain the completed jsons',
)
parser.add_argument(
'-input',
dest='input',
type=str,
help='Text to complete',
)
parser.add_argument(
'-model_config_fn',
dest='model_config_fn',
default='lm/configs/mega.json',
type=str,
help='Configuration JSON for the model',
)
parser.add_argument(
'-target',
dest='target',
default='article',
type=str,
help='What to generate for each item in metadata_fn. can be article (body), title, etc.',
)
parser.add_argument(
'-batch_size',
dest='batch_size',
default=1,
type=int,
help='How many things to generate per context. will split into chunks if need be',
)
parser.add_argument(
'-num_folds',
dest='num_folds',
default=1,
type=int,
help='Number of folds. useful if we want to split up a big file into multiple jobs.',
)
parser.add_argument(
'-fold',
dest='fold',
default=0,
type=int,
help='which fold we are on. useful if we want to split up a big file into multiple jobs.'
)
parser.add_argument(
'-max_batch_size',
dest='max_batch_size',
default=None,
type=int,
help='max batch size. You can leave this out and we will infer one based on the number of hidden layers',
)
parser.add_argument(
'-top_p',
dest='top_p',
default=0.95,
type=float,
help='p to use for top p sampling. if this isn\'t none, use this for everthing'
)
parser.add_argument(
'-samples',
dest='samples',
default=1,
type=int,
help='num_samples',
)
args = parser.parse_args()
encoder = get_encoder()
news_config = GroverConfig.from_json_file(args.model_config_fn)
# We might have to split the batch into multiple chunks if the batch size is too large
default_mbs = {12: 32, 24: 16, 48: 3}
max_batch_size = args.max_batch_size if args.max_batch_size is not None else default_mbs[news_config.num_hidden_layers]
# factorize args.batch_size = (num_chunks * batch_size_per_chunk) s.t. batch_size_per_chunk < max_batch_size
num_chunks = int(np.ceil(args.batch_size / max_batch_size))
batch_size_per_chunk = int(np.ceil(args.batch_size / num_chunks))
print("\n~~\nbatch size={}, max batch size={}, num chunks={}, batch size per chunk={}\n~~\n".format(
args.batch_size, max_batch_size, num_chunks, batch_size_per_chunk), flush=True)
# This controls the top p for each generation.
top_p = np.ones((num_chunks, batch_size_per_chunk), dtype=np.float32) * args.top_p
tf_config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.InteractiveSession(config=tf_config)
def glob_dir(path):
return glob(os.path.join(path, '*'))
@runway.setup(options={'checkpoint_dir': runway.file(is_directory=True)})
def setup(opts):
initial_context = tf.placeholder(tf.int32, [batch_size_per_chunk, None])
p_for_topp = tf.placeholder(tf.float32, [batch_size_per_chunk])
eos_token = tf.placeholder(tf.int32, [])
tokens, probs = sample(news_config=news_config, initial_context=initial_context,
eos_token=eos_token, ignore_ids=None, p_for_topp=p_for_topp,
do_topk=False)
saver = tf.train.Saver()
checkpoint_folder = glob_dir(opts['checkpoint_dir'])[0]
checkpoint_path = '.'.join(glob_dir(checkpoint_folder)[0].split('.')[:-1])
saver.restore(sess, checkpoint_path)
return {
'tokens': tokens,
'probs': probs,
'initial_context': initial_context,
'eos_token': eos_token,
'p_for_topp': p_for_topp
}
@runway.command('generate', inputs={'prompt': runway.text}, outputs={'text': runway.text})
def generate(model, inputs):
text = inputs['prompt']
encoded = _tokenize_article_pieces(encoder, text)
context_formatted = []
context_formatted.extend(encoded[:-1])
ignore_ids_np = np.array(encoder.special_tokens_onehot)
ignore_ids_np[encoder.endoftext] = 0
gens = []
gens_raw = []
gen_probs = []
for chunk_i in range(num_chunks):
tokens_out, probs_out = sess.run([model['tokens'], model['probs']],
feed_dict={model['initial_context']: [context_formatted] * batch_size_per_chunk,
model['eos_token']: 60000,
model['p_for_topp']: top_p[chunk_i]})
for t_i, p_i in zip(tokens_out, probs_out):
extraction = extract_generated_target(output_tokens=t_i, encoder=encoder, target=args.target)
gens.append(extraction['extraction'])
return gens[0]
if __name__ == "__main__":
runway.run() | [
"runway.run",
"argparse.ArgumentParser",
"numpy.ceil",
"tensorflow.train.Saver",
"os.path.join",
"numpy.ones",
"lm.modeling.sample",
"sample.encoder.extract_generated_target",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"numpy.array",
"tensorflow.InteractiveSession",
"sample.encoder.... | [((331, 447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Contextual generation (aka given some metadata we will generate articles"""'}), "(description=\n 'Contextual generation (aka given some metadata we will generate articles')\n", (354, 447), False, 'import argparse\n'), ((2239, 2252), 'sample.encoder.get_encoder', 'get_encoder', ([], {}), '()\n', (2250, 2252), False, 'from sample.encoder import get_encoder, format_context, _tokenize_article_pieces, extract_generated_target\n'), ((2267, 2316), 'lm.modeling.GroverConfig.from_json_file', 'GroverConfig.from_json_file', (['args.model_config_fn'], {}), '(args.model_config_fn)\n', (2294, 2316), False, 'from lm.modeling import GroverModel, GroverConfig, _top_p_sample, sample\n'), ((3128, 3169), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (3142, 3169), True, 'import tensorflow as tf\n'), ((3178, 3217), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (3199, 3217), True, 'import tensorflow as tf\n'), ((4171, 4264), 'runway.command', 'runway.command', (['"""generate"""'], {'inputs': "{'prompt': runway.text}", 'outputs': "{'text': runway.text}"}), "('generate', inputs={'prompt': runway.text}, outputs={'text':\n runway.text})\n", (4185, 4264), False, 'import runway\n'), ((2690, 2731), 'numpy.ceil', 'np.ceil', (['(args.batch_size / max_batch_size)'], {}), '(args.batch_size / max_batch_size)\n', (2697, 2731), True, 'import numpy as np\n'), ((2760, 2797), 'numpy.ceil', 'np.ceil', (['(args.batch_size / num_chunks)'], {}), '(args.batch_size / num_chunks)\n', (2767, 2797), True, 'import numpy as np\n'), ((3040, 3101), 'numpy.ones', 'np.ones', (['(num_chunks, batch_size_per_chunk)'], {'dtype': 'np.float32'}), '((num_chunks, batch_size_per_chunk), dtype=np.float32)\n', (3047, 3101), True, 'import numpy as np\n'), ((3394, 3448), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size_per_chunk, None]'], {}), '(tf.int32, [batch_size_per_chunk, None])\n', (3408, 3448), True, 'import tensorflow as tf\n'), ((3466, 3516), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size_per_chunk]'], {}), '(tf.float32, [batch_size_per_chunk])\n', (3480, 3516), True, 'import tensorflow as tf\n'), ((3533, 3561), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[]'], {}), '(tf.int32, [])\n', (3547, 3561), True, 'import tensorflow as tf\n'), ((3582, 3727), 'lm.modeling.sample', 'sample', ([], {'news_config': 'news_config', 'initial_context': 'initial_context', 'eos_token': 'eos_token', 'ignore_ids': 'None', 'p_for_topp': 'p_for_topp', 'do_topk': '(False)'}), '(news_config=news_config, initial_context=initial_context, eos_token=\n eos_token, ignore_ids=None, p_for_topp=p_for_topp, do_topk=False)\n', (3588, 3727), False, 'from lm.modeling import GroverModel, GroverConfig, _top_p_sample, sample\n'), ((3789, 3805), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3803, 3805), True, 'import tensorflow as tf\n'), ((4332, 4371), 'sample.encoder._tokenize_article_pieces', '_tokenize_article_pieces', (['encoder', 'text'], {}), '(encoder, text)\n', (4356, 4371), False, 'from sample.encoder import get_encoder, format_context, _tokenize_article_pieces, extract_generated_target\n'), ((4462, 4501), 'numpy.array', 'np.array', (['encoder.special_tokens_onehot'], {}), '(encoder.special_tokens_onehot)\n', (4470, 4501), True, 'import numpy as np\n'), ((5263, 5275), 'runway.run', 'runway.run', ([], {}), '()\n', (5273, 5275), False, 'import runway\n'), ((3255, 3278), 'os.path.join', 'os.path.join', (['path', '"""*"""'], {}), "(path, '*')\n", (3267, 3278), False, 'import os\n'), ((3322, 3352), 'runway.file', 'runway.file', ([], {'is_directory': '(True)'}), '(is_directory=True)\n', (3333, 3352), False, 'import runway\n'), ((5079, 5164), 'sample.encoder.extract_generated_target', 'extract_generated_target', ([], {'output_tokens': 't_i', 'encoder': 'encoder', 'target': 'args.target'}), '(output_tokens=t_i, encoder=encoder, target=args.target\n )\n', (5103, 5164), False, 'from sample.encoder import get_encoder, format_context, _tokenize_article_pieces, extract_generated_target\n')] |
# Project hiatus
# script used to evaluate our models and analyse the results
# 16/11/2020
# <NAME>
# loading required packages
import os
import numpy as np
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
import torch
from sklearn.linear_model import LinearRegression
import sklearn
import random
# for manual visualisation
from rasterio.plot import show
# putting the right work directory
os.chdir("/home/adminlocal/Bureau/GIT/hiatus_change_detection")
# importing our functions
import utils as fun
import train as train
import evaluate as eval_model
import metrics as fun_metrics
import warnings
warnings.filterwarnings('ignore')
print(
"""
Loading the model and the data
""")
# loading the dataset, getting a raster for later data visualisation
# after every epoch
import frejus_dataset
# loading the data
train_data, gt_change, numpy_rasters = frejus_dataset.get_datasets(["1954","1966","1970", "1978", "1989"])
## loading the model
name_model = "AE_Mmodal+DAN+split"
dict_model = torch.load("evaluation_models/"+name_model)
args = dict_model["args"]
trained_model = fun.load_model_from_dict(dict_model)
# setting the seed
fun.set_seed(args.seed, args.cuda)
## we take a test set of the gt_change for evaluation (20%)
# creating a new dict for gt test
gt_change_test = {}
# getting a single subset list throughout the years
train_idx, val_idx = train_test_split(list(range(len(gt_change["1970"]))), test_size=0.20, random_state=1)
# loading the GT
for year in gt_change:
gt_change[year] = Subset(gt_change[year], train_idx)
print(
"""
Checking performance on ground truth change maps
We output the code subtraction with the model and on the baseline (simple
rasters subtraction)
""")
## generating prediction for the model
pred, y, classes = eval_model.generate_prediction_model(gt_change, trained_model,
args)
## evaluate the baseline
# get prediction and targets with the baseline
pred_alt, pred_rad, y = eval_model.generate_prediction_baseline(gt_change)
## making the ROC curve
threshold=fun_metrics.visualize_roc(y, pred_alt, return_thresh=True)
fun_metrics.iou_accuracy(pred_alt, threshold, y, classes)
threshold=fun_metrics.visualize_roc(y, pred_rad, return_thresh=True)
fun_metrics.iou_accuracy(pred_rad, threshold, y, classes)
# ROC for the model
threshold=fun_metrics.visualize_roc(y, pred, return_thresh = True)
## getting the IUC and the accuracy
fun_metrics.iou_accuracy(pred, threshold, y, classes)
print(
"""
Visualizing change detection on the ground truth
""")
for i in range(30,40):
# loading the raster
nb = i
rast1 = gt_change["1954"][nb][None,1:,:,:]
rast2 = gt_change["1970"][nb][None,1:,:,:]
# loading the gt
gts = [gt_change["1954"][nb][None,0,:,:].squeeze(),
gt_change["1970"][nb][None,0,:,:].squeeze()]
cmap, dccode, code1, code2 = fun.change_detection(rast1, rast2, trained_model,
args,
visualization=True,
threshold=threshold, gts=gts)
print(
"""
Performing normalized mutual information for continuous variables
""")
# load the data and the baselines
codes_clean, labels_clean = fun.prepare_codes_metrics(gt_change, args, trained_model)
dem_clean = fun.prepare_data_metrics(gt_change, 1)
rad_clean = fun.prepare_data_metrics(gt_change, 2)
## getting the number of pixels per classes
nb_build = np.count_nonzero(labels_clean == 1)
nb_road = np.count_nonzero(labels_clean == 2)
nb_field = np.count_nonzero(labels_clean == 3)
nb_classes = (nb_build, nb_road, nb_field)
## spliting the dataset according to the class
# loading the data
buildings_idx = labels_clean == 1
roads_idx = labels_clean == 2
fields_idx = labels_clean == 3
# putting into a list
classes_idx = [buildings_idx, roads_idx, fields_idx]
# calculating the NMI for the codes
fun_metrics.NMI_continuous_discrete(labels_clean, codes_clean,
nb_classes, [1,2,3], classes_idx)
# calculating the NMI for the dem
fun_metrics.NMI_continuous_discrete(labels_clean, dem_clean[:,None],
nb_classes, [1,2,3], classes_idx)
# calculating the NMI for the rad
fun_metrics.NMI_continuous_discrete(labels_clean, rad_clean[:,None],
nb_classes, [1,2,3], classes_idx)
# calculating the NMI for the both inputs
dem_rad = np.concatenate((rad_clean[:,None], dem_clean[:,None]), axis=1)
fun_metrics.NMI_continuous_discrete(labels_clean, dem_rad,
nb_classes, [1,2,3], classes_idx)
print(
"""
Making a linear SVM
""")
## linear svm with the model
conf_mat_model, class_report_model, scores_cv = fun_metrics.svm_accuracy_estimation(codes_clean,
labels_clean)
## linear svm with the dem
conf_mat_dem, class_report_dem, scores_cv = fun_metrics.svm_accuracy_estimation(dem_clean,
labels_clean)
## linear svm with the rad
conf_mat_rad, class_report_rad, scores_cv = fun_metrics.svm_accuracy_estimation(rad_clean,
labels_clean)
### Linear svm but distinct geographical locations
# getting ids for training and validation sets
train_idx, val_idx = train_test_split(list(range(len(gt_change["1954"]))), test_size=0.25)
# loading two dictionary for cross-validation
gt_change_train = {}
gt_change_test = {}
# creating test and train data on distinct locations
for year in gt_change:
gt_change_train[year] = Subset(gt_change[year], train_idx)
gt_change_test[year] = Subset(gt_change[year], val_idx)
# data for train
codes_train, labels_train = fun.prepare_codes_metrics(gt_change_train, args, trained_model)
dem_train = fun.prepare_data_metrics(gt_change_train, 1)
rad_train= fun.prepare_data_metrics(gt_change_train, 2)
# data for test
codes_test, labels_test = fun.prepare_codes_metrics(gt_change_test, args, trained_model)
dem_test = fun.prepare_data_metrics(gt_change_test, 1)
rad_test = fun.prepare_data_metrics(gt_change_test, 2)
## linear svm with the model
conf_mat_model, class_report_model, scores_cv_model = fun_metrics.svm_accuracy_estimation_2(codes_train, codes_test, labels_train, labels_test, cv=False)
## linear svm with the dem
conf_mat_dem, class_report_dem, scores_cv_dem = fun_metrics.svm_accuracy_estimation_2(dem_train, dem_test, labels_train, labels_test, cv=False)
## linear svm with the rad
conf_mat_rad, class_report_rad, scores_cv_rad = fun_metrics.svm_accuracy_estimation_2(rad_train, rad_test, labels_train, labels_test, cv=False)
## testing with only one year for train
# getting ids for training and validation sets
gt_change_train = {}
gt_change_test = {}
for year in gt_change:
if year == "1970":
gt_change_train[year] =gt_change[year]
else:
gt_change_test[year] = gt_change[year]
# data for train
codes_train, labels_train = fun.prepare_codes_metrics(gt_change_train, args, trained_model)
dem_train = fun.prepare_data_metrics(gt_change_train, 1)
rad_train= fun.prepare_data_metrics(gt_change_train, 2)
# data for test
codes_test, labels_test = fun.prepare_codes_metrics(gt_change_test, args, trained_model)
dem_test = fun.prepare_data_metrics(gt_change_test, 1)
rad_test = fun.prepare_data_metrics(gt_change_test, 2)
## linear svm with the model
conf_mat_model, class_report_model, scores_cv_model = fun_metrics.svm_accuracy_estimation_2(codes_train, codes_test, labels_train, labels_test, cv=False)
## linear svm with the dem
conf_mat_dem, class_report_dem, scores_cv_dem = fun_metrics.svm_accuracy_estimation_2(dem_train, dem_test, labels_train, labels_test, cv=False)
## linear svm with the rad
conf_mat_rad, class_report_rad, scores_cv_rad = fun_metrics.svm_accuracy_estimation_2(rad_train, rad_test, labels_train, labels_test, cv=False)
print("""
Now we do transfer learning (bayesian model)
""")
## loading the pre trained model
dict_model = torch.load("evaluation_models/test_transfer_aleo")
dict_model["args"].epochs = 1
dict_model["args"].defiance = 1
dict_model["args"].save = 0
dict_model["args"].load_best_model = 1
dict_model["args"].grad_clip = 0
dict_model["args"].name_model = "bayesian_model"
# updating the args
args = dict_model["args"]
# starting the run
trained_model = train.train_full(args, train_data, gt_change, dict_model)
print("""
Performing change detection with the alternative model (training the model
and then assessing the result)
""")
# list of years
years = ["1954","1966", "1970", "1978", "1989"]
# loading the data
import frejus_dataset
train_data, gt_change, numpy_rasters = frejus_dataset.get_datasets(["1954","1966","1970", "1978", "1989"])
# loading the args of the pre-trained model
dict_model = torch.load("evaluation_models/pre_trained_baseline")
args = dict_model["args"]
# setting the number of epochs
args.epochs = 5
args.save = 0
# getting th year for the first rasters
for year1 in years:
# getting the year for the second raster
for year2 in years:
# checking that both year are not the same year
if year1 != year2 and year2 > year1:
# naming the model
args.name_model = year1+"to"+year2+"_baseline"
# loading the data
train_data, _, numpy_rasters = frejus_dataset.get_datasets([year1,year2])
# taking two years and converting into torch
numpy_rasters[year1] = [fun.torch_raster(raster, cuda=False) for raster in numpy_rasters[year1]]
numpy_rasters[year2] = [fun.torch_raster(raster, cuda=False) for raster in numpy_rasters[year2]]
# training and saving the model
_ = train.train_full_alternative_model(args, numpy_rasters, dict_model)
## evaluating the model
pred, y, classes = eval_model.generate_prediction_baseline_model(gt_change, args)
# ROC
threshold=fun_metrics.visualize_roc(y, pred, return_thresh=False)
# accuracy and IoU
fun_metrics.iou_accuracy(pred, 0.69, y, classes)
print("""
Visualizing change detection on the ground truth
""")
for i in range(10):
# loading the raster
nb = i
rast1 = gt_change["1954"][nb][None,1:,:,:]
rast2 = gt_change["1970"][nb][None,1:,:,:]
# loading the gt
gts = [gt_change["1954"][nb][None,0,:,:].squeeze(),
gt_change["1970"][nb][None,0,:,:].squeeze()]
fun.change_detection_baseline(rast1, rast2, ["1954", "1970"], args,
visualization=True,
threshold=1.3, gts=gts)
print("""
Estimating correlation between codes, DEM and rad
""")
# getting the index for cross-validation
train_idx, val_idx = train_test_split(list(range(len(gt_change["1954"]))), test_size=0.25)
# empty dicts to store train and test sets
gt_change_train = {}
gt_change_test = {}
# loading train and test sets
for year in gt_change:
gt_change_train[year] = Subset(gt_change[year], train_idx)
gt_change_test[year] = Subset(gt_change[year], val_idx)
# data for train
codes_train, labels_train = fun.prepare_codes_metrics(gt_change_train, args, trained_model)
dem_train = fun.prepare_data_metrics(gt_change_train, 1)
rad_train= fun.prepare_data_metrics(gt_change_train, 2)
# data for test
codes_test, labels_test = fun.prepare_codes_metrics(gt_change_test, args, trained_model)
dem_test = fun.prepare_data_metrics(gt_change_test, 1)
rad_test = fun.prepare_data_metrics(gt_change_test, 2)
# training the model for dem
lr_dem = LinearRegression()
lr_dem.fit(codes_train, dem_train)
pred_dem = lr_dem.predict(codes_test)
mae_dem = sum(abs(pred_dem - dem_test)) / dem_test.shape[0]
r2_dem = sklearn.metrics.r2_score(dem_test, pred_dem)
#print(mae_dem)
print("R2 for dem is %1.2f" % (r2_dem))
print("\n")
print(abs(lr_dem.coef_).mean())
# training the model for rad
lr_rad = LinearRegression()
lr_rad.fit(codes_train, rad_train)
pred_rad = lr_rad.predict(codes_test)
mae_rad = sum(abs(pred_rad - rad_test)) / dem_test.shape[0]
r2_rad = sklearn.metrics.r2_score(rad_test, pred_rad)
#print(mae_rad)
print("R2 for rad is %1.2f" % (r2_rad))
print("\n")
print(abs(lr_rad.coef_).mean())
### computing the MI
# adding test data to train data
codes_train = np.concatenate((codes_train, codes_test), axis=0)
dem_train = np.concatenate((dem_train, dem_test), axis=None)
rad_train = np.concatenate((rad_train, rad_test), axis=None)
## binning the data
# getting the value of the quantiles
values_dem_cut = np.quantile(dem_train, [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
values_rad_cut = np.quantile(rad_train, [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
# binning the data with the quantiles
dem_discrete = np.digitize(dem_train,bins=values_dem_cut)
rad_discrete = np.digitize(rad_train,bins=values_rad_cut)
# lists to store class related indexes
classes_dem_idx = []
classes_rad_idx = []
# lists to store the number of samples per class
nb_classes_dem = []
nb_classes_rad = []
for i in range(10):
## class related data for DEM
# boolean per class
class_idx = dem_discrete == i
classes_dem_idx.append(class_idx)
# number of sample of the class
nb_classes_dem.append(np.count_nonzero(dem_discrete == i))
# same opertation, for the radiometry
class_idx = rad_discrete == i
classes_rad_idx.append(class_idx)
nb_classes_rad.append(np.count_nonzero(rad_discrete == i))
# calculating the NMI for DEM
mi_dem = fun_metrics.NMI_continuous_discrete(dem_discrete, codes_train,
nb_classes_dem, list(range(10)), classes_dem_idx)
print("%1.2f" % (mi_dem))
# calculating the NMI for rad
mi_rad = fun_metrics.NMI_continuous_discrete(rad_discrete, codes_train,
nb_classes_rad, list(range(10)), classes_rad_idx)
print("%1.2f" % (mi_rad))
print("""
calculating the MI per raster
""")
# getting a random raster from the GT
nb = random.randint(0, 40)
raster = gt_change["1970"][nb]
# getting the MI per raster
print("rad")
fun.MI_raster(raster, "AE_rad")
print("\n")
print("Mmodal")
fun.MI_raster(raster, "AE_Mmodal", visu=True)
print("\n")
print("DAN")
fun.MI_raster(raster, "AE_Mmodal+DAN")
print("\n")
print("""
Doing tsne visualization on the ground truth
""")
# tsne on a single raster with different models
nb = random.randint(0, 40)
raster = gt_change["1970"][nb]
fun.tsne_visualization(raster, trained_model, "AE_rad")
fun.tsne_visualization(raster, trained_model, "AE_rad+DAN")
fun.tsne_visualization(raster, trained_model, "AE_Mmodal")
fun.tsne_visualization(raster, trained_model, "AE_Mmodal+DAN")
# tsne on the whole dataset with different model
fun.tsne_dataset(gt_change, "AE_rad")
fun.tsne_dataset(gt_change, "AE_rad+DAN")
fun.tsne_dataset(gt_change, "AE_Mmodal")
fun.tsne_dataset(gt_change, "AE_Mmodal+DAN")
fun.tsne_dataset(gt_change, "AE_Mmodal+DAN+split")
print(
"""
We now test the results for several models
""")
print("AE_rad")
eval_model.evaluate_model("AE_rad", gt_change)
print("AE_rad+DAN")
eval_model.evaluate_model("AE_rad+DAN", gt_change)
print("AE_Mmodal")
eval_model.evaluate_model("AE_Mmodal", gt_change)
print("AE_Mmodal+DAN")
eval_model.evaluate_model("AE_Mmodal+DAN", gt_change)
print("AE_Mmodal+DAN+split")
eval_model.evaluate_model("AE_Mmodal+DAN+split", gt_change)
print("AE_alt+DAN")
eval_model.evaluate_model("AE_alt+DAN", gt_change)
print("bayesian_model")
eval_model.evaluate_model("bayesian_model", gt_change)
print(
"""
Visualizing some predictions for the autoencoder
""")
# removing the year vectors
datasets = [raster[0] for raster in train_data]
for i in range(10,15):
# visualizing training raster
raster = datasets[i]
fun.visualize(raster, third_dim=False)
# visualizing prediction
pred = trained_model.predict(raster[None,:,:,:].float().cuda(), args)[0].cpu()
pred = fun.numpy_raster(pred)
fun.visualize(pred, third_dim=False, defiance=args.defiance)
# scatter plot for the defiance
if args.defiance:
fun.scatter_aleo(raster[1,:,:], pred[1,:,:], pred[2,:,:])
print(
'''
Now we are going to visualize various embeddings in the model itself
''')
# visualizing for a random index number the inner embeddings
fun.view_u(datasets, trained_model, args, random.randint(0, 900))
# visualizing embedding inside the model
nb = random.randint(0, 900)
print(nb)
fun.view_u(numpy_rasters["1989"], trained_model, args, nb)
fun.view_u(numpy_rasters["1970"], trained_model, args, nb)
137
print(
"""
Performing change detection analysis on some examples
""")
# loading two random rasters
nb = random.randint(0, 900)
print(i)
rast1 = numpy_rasters["1954"][i][None,:,:,:]
rast2 = numpy_rasters["1989"][i][None,:,:,:]
# computing change raster
cmap, dccode, code1, code2 = fun.change_detection(rast1, rast2, trained_model, args,
threshold=threshold, visualization=True)
| [
"utils.visualize",
"evaluate.evaluate_model",
"frejus_dataset.get_datasets",
"utils.set_seed",
"evaluate.generate_prediction_baseline",
"sklearn.metrics.r2_score",
"utils.load_model_from_dict",
"metrics.svm_accuracy_estimation_2",
"utils.change_detection",
"utils.scatter_aleo",
"evaluate.generat... | [((436, 499), 'os.chdir', 'os.chdir', (['"""/home/adminlocal/Bureau/GIT/hiatus_change_detection"""'], {}), "('/home/adminlocal/Bureau/GIT/hiatus_change_detection')\n", (444, 499), False, 'import os\n'), ((648, 681), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (671, 681), False, 'import warnings\n'), ((904, 973), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (["['1954', '1966', '1970', '1978', '1989']"], {}), "(['1954', '1966', '1970', '1978', '1989'])\n", (931, 973), False, 'import frejus_dataset\n'), ((1042, 1087), 'torch.load', 'torch.load', (["('evaluation_models/' + name_model)"], {}), "('evaluation_models/' + name_model)\n", (1052, 1087), False, 'import torch\n'), ((1128, 1164), 'utils.load_model_from_dict', 'fun.load_model_from_dict', (['dict_model'], {}), '(dict_model)\n', (1152, 1164), True, 'import utils as fun\n'), ((1185, 1219), 'utils.set_seed', 'fun.set_seed', (['args.seed', 'args.cuda'], {}), '(args.seed, args.cuda)\n', (1197, 1219), True, 'import utils as fun\n'), ((1817, 1885), 'evaluate.generate_prediction_model', 'eval_model.generate_prediction_model', (['gt_change', 'trained_model', 'args'], {}), '(gt_change, trained_model, args)\n', (1853, 1885), True, 'import evaluate as eval_model\n'), ((2029, 2079), 'evaluate.generate_prediction_baseline', 'eval_model.generate_prediction_baseline', (['gt_change'], {}), '(gt_change)\n', (2068, 2079), True, 'import evaluate as eval_model\n'), ((2115, 2173), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred_alt'], {'return_thresh': '(True)'}), '(y, pred_alt, return_thresh=True)\n', (2140, 2173), True, 'import metrics as fun_metrics\n'), ((2174, 2231), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred_alt', 'threshold', 'y', 'classes'], {}), '(pred_alt, threshold, y, classes)\n', (2198, 2231), True, 'import metrics as fun_metrics\n'), ((2242, 2300), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred_rad'], {'return_thresh': '(True)'}), '(y, pred_rad, return_thresh=True)\n', (2267, 2300), True, 'import metrics as fun_metrics\n'), ((2301, 2358), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred_rad', 'threshold', 'y', 'classes'], {}), '(pred_rad, threshold, y, classes)\n', (2325, 2358), True, 'import metrics as fun_metrics\n'), ((2390, 2444), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred'], {'return_thresh': '(True)'}), '(y, pred, return_thresh=True)\n', (2415, 2444), True, 'import metrics as fun_metrics\n'), ((2484, 2537), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred', 'threshold', 'y', 'classes'], {}), '(pred, threshold, y, classes)\n', (2508, 2537), True, 'import metrics as fun_metrics\n'), ((3354, 3411), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change', 'args', 'trained_model'], {}), '(gt_change, args, trained_model)\n', (3379, 3411), True, 'import utils as fun\n'), ((3424, 3462), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change', '(1)'], {}), '(gt_change, 1)\n', (3448, 3462), True, 'import utils as fun\n'), ((3475, 3513), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change', '(2)'], {}), '(gt_change, 2)\n', (3499, 3513), True, 'import utils as fun\n'), ((3570, 3605), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels_clean == 1)'], {}), '(labels_clean == 1)\n', (3586, 3605), True, 'import numpy as np\n'), ((3616, 3651), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels_clean == 2)'], {}), '(labels_clean == 2)\n', (3632, 3651), True, 'import numpy as np\n'), ((3663, 3698), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels_clean == 3)'], {}), '(labels_clean == 3)\n', (3679, 3698), True, 'import numpy as np\n'), ((4018, 4120), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'codes_clean', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, codes_clean, nb_classes,\n [1, 2, 3], classes_idx)\n', (4053, 4120), True, 'import metrics as fun_metrics\n'), ((4185, 4294), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'dem_clean[:, None]', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, dem_clean[:, None],\n nb_classes, [1, 2, 3], classes_idx)\n', (4220, 4294), True, 'import metrics as fun_metrics\n'), ((4359, 4468), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'rad_clean[:, None]', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, rad_clean[:, None],\n nb_classes, [1, 2, 3], classes_idx)\n', (4394, 4468), True, 'import metrics as fun_metrics\n'), ((4551, 4615), 'numpy.concatenate', 'np.concatenate', (['(rad_clean[:, None], dem_clean[:, None])'], {'axis': '(1)'}), '((rad_clean[:, None], dem_clean[:, None]), axis=1)\n', (4565, 4615), True, 'import numpy as np\n'), ((4614, 4713), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'dem_rad', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, dem_rad, nb_classes, [1, \n 2, 3], classes_idx)\n', (4649, 4713), True, 'import metrics as fun_metrics\n'), ((4863, 4925), 'metrics.svm_accuracy_estimation', 'fun_metrics.svm_accuracy_estimation', (['codes_clean', 'labels_clean'], {}), '(codes_clean, labels_clean)\n', (4898, 4925), True, 'import metrics as fun_metrics\n'), ((5071, 5131), 'metrics.svm_accuracy_estimation', 'fun_metrics.svm_accuracy_estimation', (['dem_clean', 'labels_clean'], {}), '(dem_clean, labels_clean)\n', (5106, 5131), True, 'import metrics as fun_metrics\n'), ((5277, 5337), 'metrics.svm_accuracy_estimation', 'fun_metrics.svm_accuracy_estimation', (['rad_clean', 'labels_clean'], {}), '(rad_clean, labels_clean)\n', (5312, 5337), True, 'import metrics as fun_metrics\n'), ((5935, 5998), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_train', 'args', 'trained_model'], {}), '(gt_change_train, args, trained_model)\n', (5960, 5998), True, 'import utils as fun\n'), ((6011, 6055), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(1)'], {}), '(gt_change_train, 1)\n', (6035, 6055), True, 'import utils as fun\n'), ((6067, 6111), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(2)'], {}), '(gt_change_train, 2)\n', (6091, 6111), True, 'import utils as fun\n'), ((6155, 6217), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_test', 'args', 'trained_model'], {}), '(gt_change_test, args, trained_model)\n', (6180, 6217), True, 'import utils as fun\n'), ((6229, 6272), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(1)'], {}), '(gt_change_test, 1)\n', (6253, 6272), True, 'import utils as fun\n'), ((6284, 6327), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(2)'], {}), '(gt_change_test, 2)\n', (6308, 6327), True, 'import utils as fun\n'), ((6412, 6515), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['codes_train', 'codes_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(codes_train, codes_test, labels_train,\n labels_test, cv=False)\n', (6449, 6515), True, 'import metrics as fun_metrics\n'), ((6588, 6687), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['dem_train', 'dem_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(dem_train, dem_test, labels_train,\n labels_test, cv=False)\n', (6625, 6687), True, 'import metrics as fun_metrics\n'), ((6760, 6859), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['rad_train', 'rad_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(rad_train, rad_test, labels_train,\n labels_test, cv=False)\n', (6797, 6859), True, 'import metrics as fun_metrics\n'), ((7182, 7245), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_train', 'args', 'trained_model'], {}), '(gt_change_train, args, trained_model)\n', (7207, 7245), True, 'import utils as fun\n'), ((7258, 7302), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(1)'], {}), '(gt_change_train, 1)\n', (7282, 7302), True, 'import utils as fun\n'), ((7314, 7358), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(2)'], {}), '(gt_change_train, 2)\n', (7338, 7358), True, 'import utils as fun\n'), ((7402, 7464), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_test', 'args', 'trained_model'], {}), '(gt_change_test, args, trained_model)\n', (7427, 7464), True, 'import utils as fun\n'), ((7476, 7519), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(1)'], {}), '(gt_change_test, 1)\n', (7500, 7519), True, 'import utils as fun\n'), ((7531, 7574), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(2)'], {}), '(gt_change_test, 2)\n', (7555, 7574), True, 'import utils as fun\n'), ((7659, 7762), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['codes_train', 'codes_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(codes_train, codes_test, labels_train,\n labels_test, cv=False)\n', (7696, 7762), True, 'import metrics as fun_metrics\n'), ((7835, 7934), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['dem_train', 'dem_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(dem_train, dem_test, labels_train,\n labels_test, cv=False)\n', (7872, 7934), True, 'import metrics as fun_metrics\n'), ((8007, 8106), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['rad_train', 'rad_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(rad_train, rad_test, labels_train,\n labels_test, cv=False)\n', (8044, 8106), True, 'import metrics as fun_metrics\n'), ((8218, 8268), 'torch.load', 'torch.load', (['"""evaluation_models/test_transfer_aleo"""'], {}), "('evaluation_models/test_transfer_aleo')\n", (8228, 8268), False, 'import torch\n'), ((8564, 8621), 'train.train_full', 'train.train_full', (['args', 'train_data', 'gt_change', 'dict_model'], {}), '(args, train_data, gt_change, dict_model)\n', (8580, 8621), True, 'import train as train\n'), ((8902, 8971), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (["['1954', '1966', '1970', '1978', '1989']"], {}), "(['1954', '1966', '1970', '1978', '1989'])\n", (8929, 8971), False, 'import frejus_dataset\n'), ((9028, 9080), 'torch.load', 'torch.load', (['"""evaluation_models/pre_trained_baseline"""'], {}), "('evaluation_models/pre_trained_baseline')\n", (9038, 9080), False, 'import torch\n'), ((10150, 10212), 'evaluate.generate_prediction_baseline_model', 'eval_model.generate_prediction_baseline_model', (['gt_change', 'args'], {}), '(gt_change, args)\n', (10195, 10212), True, 'import evaluate as eval_model\n'), ((10230, 10285), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred'], {'return_thresh': '(False)'}), '(y, pred, return_thresh=False)\n', (10255, 10285), True, 'import metrics as fun_metrics\n'), ((10306, 10354), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred', '(0.69)', 'y', 'classes'], {}), '(pred, 0.69, y, classes)\n', (10330, 10354), True, 'import metrics as fun_metrics\n'), ((11503, 11566), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_train', 'args', 'trained_model'], {}), '(gt_change_train, args, trained_model)\n', (11528, 11566), True, 'import utils as fun\n'), ((11579, 11623), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(1)'], {}), '(gt_change_train, 1)\n', (11603, 11623), True, 'import utils as fun\n'), ((11635, 11679), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(2)'], {}), '(gt_change_train, 2)\n', (11659, 11679), True, 'import utils as fun\n'), ((11723, 11785), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_test', 'args', 'trained_model'], {}), '(gt_change_test, args, trained_model)\n', (11748, 11785), True, 'import utils as fun\n'), ((11797, 11840), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(1)'], {}), '(gt_change_test, 1)\n', (11821, 11840), True, 'import utils as fun\n'), ((11852, 11895), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(2)'], {}), '(gt_change_test, 2)\n', (11876, 11895), True, 'import utils as fun\n'), ((11935, 11953), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11951, 11953), False, 'from sklearn.linear_model import LinearRegression\n'), ((12104, 12148), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['dem_test', 'pred_dem'], {}), '(dem_test, pred_dem)\n', (12128, 12148), False, 'import sklearn\n'), ((12289, 12307), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12305, 12307), False, 'from sklearn.linear_model import LinearRegression\n'), ((12458, 12502), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['rad_test', 'pred_rad'], {}), '(rad_test, pred_rad)\n', (12482, 12502), False, 'import sklearn\n'), ((12677, 12726), 'numpy.concatenate', 'np.concatenate', (['(codes_train, codes_test)'], {'axis': '(0)'}), '((codes_train, codes_test), axis=0)\n', (12691, 12726), True, 'import numpy as np\n'), ((12739, 12787), 'numpy.concatenate', 'np.concatenate', (['(dem_train, dem_test)'], {'axis': 'None'}), '((dem_train, dem_test), axis=None)\n', (12753, 12787), True, 'import numpy as np\n'), ((12800, 12848), 'numpy.concatenate', 'np.concatenate', (['(rad_train, rad_test)'], {'axis': 'None'}), '((rad_train, rad_test), axis=None)\n', (12814, 12848), True, 'import numpy as np\n'), ((12924, 12993), 'numpy.quantile', 'np.quantile', (['dem_train', '[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]'], {}), '(dem_train, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n', (12935, 12993), True, 'import numpy as np\n'), ((13003, 13072), 'numpy.quantile', 'np.quantile', (['rad_train', '[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]'], {}), '(rad_train, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n', (13014, 13072), True, 'import numpy as np\n'), ((13119, 13162), 'numpy.digitize', 'np.digitize', (['dem_train'], {'bins': 'values_dem_cut'}), '(dem_train, bins=values_dem_cut)\n', (13130, 13162), True, 'import numpy as np\n'), ((13177, 13220), 'numpy.digitize', 'np.digitize', (['rad_train'], {'bins': 'values_rad_cut'}), '(rad_train, bins=values_rad_cut)\n', (13188, 13220), True, 'import numpy as np\n'), ((14386, 14407), 'random.randint', 'random.randint', (['(0)', '(40)'], {}), '(0, 40)\n', (14400, 14407), False, 'import random\n'), ((14481, 14512), 'utils.MI_raster', 'fun.MI_raster', (['raster', '"""AE_rad"""'], {}), "(raster, 'AE_rad')\n", (14494, 14512), True, 'import utils as fun\n'), ((14541, 14586), 'utils.MI_raster', 'fun.MI_raster', (['raster', '"""AE_Mmodal"""'], {'visu': '(True)'}), "(raster, 'AE_Mmodal', visu=True)\n", (14554, 14586), True, 'import utils as fun\n'), ((14612, 14650), 'utils.MI_raster', 'fun.MI_raster', (['raster', '"""AE_Mmodal+DAN"""'], {}), "(raster, 'AE_Mmodal+DAN')\n", (14625, 14650), True, 'import utils as fun\n'), ((14792, 14813), 'random.randint', 'random.randint', (['(0)', '(40)'], {}), '(0, 40)\n', (14806, 14813), False, 'import random\n'), ((14845, 14900), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_rad"""'], {}), "(raster, trained_model, 'AE_rad')\n", (14867, 14900), True, 'import utils as fun\n'), ((14901, 14960), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_rad+DAN"""'], {}), "(raster, trained_model, 'AE_rad+DAN')\n", (14923, 14960), True, 'import utils as fun\n'), ((14961, 15019), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_Mmodal"""'], {}), "(raster, trained_model, 'AE_Mmodal')\n", (14983, 15019), True, 'import utils as fun\n'), ((15020, 15082), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_Mmodal+DAN"""'], {}), "(raster, trained_model, 'AE_Mmodal+DAN')\n", (15042, 15082), True, 'import utils as fun\n'), ((15133, 15170), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_rad"""'], {}), "(gt_change, 'AE_rad')\n", (15149, 15170), True, 'import utils as fun\n'), ((15171, 15212), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_rad+DAN"""'], {}), "(gt_change, 'AE_rad+DAN')\n", (15187, 15212), True, 'import utils as fun\n'), ((15213, 15253), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_Mmodal"""'], {}), "(gt_change, 'AE_Mmodal')\n", (15229, 15253), True, 'import utils as fun\n'), ((15254, 15298), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_Mmodal+DAN"""'], {}), "(gt_change, 'AE_Mmodal+DAN')\n", (15270, 15298), True, 'import utils as fun\n'), ((15299, 15349), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_Mmodal+DAN+split"""'], {}), "(gt_change, 'AE_Mmodal+DAN+split')\n", (15315, 15349), True, 'import utils as fun\n'), ((15428, 15474), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_rad"""', 'gt_change'], {}), "('AE_rad', gt_change)\n", (15453, 15474), True, 'import evaluate as eval_model\n'), ((15495, 15545), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_rad+DAN"""', 'gt_change'], {}), "('AE_rad+DAN', gt_change)\n", (15520, 15545), True, 'import evaluate as eval_model\n'), ((15565, 15614), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal"""', 'gt_change'], {}), "('AE_Mmodal', gt_change)\n", (15590, 15614), True, 'import evaluate as eval_model\n'), ((15638, 15691), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal+DAN"""', 'gt_change'], {}), "('AE_Mmodal+DAN', gt_change)\n", (15663, 15691), True, 'import evaluate as eval_model\n'), ((15722, 15781), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal+DAN+split"""', 'gt_change'], {}), "('AE_Mmodal+DAN+split', gt_change)\n", (15747, 15781), True, 'import evaluate as eval_model\n'), ((15802, 15852), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_alt+DAN"""', 'gt_change'], {}), "('AE_alt+DAN', gt_change)\n", (15827, 15852), True, 'import evaluate as eval_model\n'), ((15877, 15931), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""bayesian_model"""', 'gt_change'], {}), "('bayesian_model', gt_change)\n", (15902, 15931), True, 'import evaluate as eval_model\n'), ((16845, 16867), 'random.randint', 'random.randint', (['(0)', '(900)'], {}), '(0, 900)\n', (16859, 16867), False, 'import random\n'), ((16879, 16937), 'utils.view_u', 'fun.view_u', (["numpy_rasters['1989']", 'trained_model', 'args', 'nb'], {}), "(numpy_rasters['1989'], trained_model, args, nb)\n", (16889, 16937), True, 'import utils as fun\n'), ((16938, 16996), 'utils.view_u', 'fun.view_u', (["numpy_rasters['1970']", 'trained_model', 'args', 'nb'], {}), "(numpy_rasters['1970'], trained_model, args, nb)\n", (16948, 16996), True, 'import utils as fun\n'), ((17121, 17143), 'random.randint', 'random.randint', (['(0)', '(900)'], {}), '(0, 900)\n', (17135, 17143), False, 'import random\n'), ((17299, 17399), 'utils.change_detection', 'fun.change_detection', (['rast1', 'rast2', 'trained_model', 'args'], {'threshold': 'threshold', 'visualization': '(True)'}), '(rast1, rast2, trained_model, args, threshold=threshold,\n visualization=True)\n', (17319, 17399), True, 'import utils as fun\n'), ((1557, 1591), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (1563, 1591), False, 'from torch.utils.data import Subset\n'), ((2940, 3049), 'utils.change_detection', 'fun.change_detection', (['rast1', 'rast2', 'trained_model', 'args'], {'visualization': '(True)', 'threshold': 'threshold', 'gts': 'gts'}), '(rast1, rast2, trained_model, args, visualization=True,\n threshold=threshold, gts=gts)\n', (2960, 3049), True, 'import utils as fun\n'), ((5794, 5828), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (5800, 5828), False, 'from torch.utils.data import Subset\n'), ((5856, 5888), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'val_idx'], {}), '(gt_change[year], val_idx)\n', (5862, 5888), False, 'from torch.utils.data import Subset\n'), ((10737, 10852), 'utils.change_detection_baseline', 'fun.change_detection_baseline', (['rast1', 'rast2', "['1954', '1970']", 'args'], {'visualization': '(True)', 'threshold': '(1.3)', 'gts': 'gts'}), "(rast1, rast2, ['1954', '1970'], args,\n visualization=True, threshold=1.3, gts=gts)\n", (10766, 10852), True, 'import utils as fun\n'), ((11358, 11392), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (11364, 11392), False, 'from torch.utils.data import Subset\n'), ((11420, 11452), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'val_idx'], {}), '(gt_change[year], val_idx)\n', (11426, 11452), False, 'from torch.utils.data import Subset\n'), ((16180, 16218), 'utils.visualize', 'fun.visualize', (['raster'], {'third_dim': '(False)'}), '(raster, third_dim=False)\n', (16193, 16218), True, 'import utils as fun\n'), ((16347, 16369), 'utils.numpy_raster', 'fun.numpy_raster', (['pred'], {}), '(pred)\n', (16363, 16369), True, 'import utils as fun\n'), ((16374, 16434), 'utils.visualize', 'fun.visualize', (['pred'], {'third_dim': '(False)', 'defiance': 'args.defiance'}), '(pred, third_dim=False, defiance=args.defiance)\n', (16387, 16434), True, 'import utils as fun\n'), ((16774, 16796), 'random.randint', 'random.randint', (['(0)', '(900)'], {}), '(0, 900)\n', (16788, 16796), False, 'import random\n'), ((13610, 13645), 'numpy.count_nonzero', 'np.count_nonzero', (['(dem_discrete == i)'], {}), '(dem_discrete == i)\n', (13626, 13645), True, 'import numpy as np\n'), ((13792, 13827), 'numpy.count_nonzero', 'np.count_nonzero', (['(rad_discrete == i)'], {}), '(rad_discrete == i)\n', (13808, 13827), True, 'import numpy as np\n'), ((16506, 16569), 'utils.scatter_aleo', 'fun.scatter_aleo', (['raster[1, :, :]', 'pred[1, :, :]', 'pred[2, :, :]'], {}), '(raster[1, :, :], pred[1, :, :], pred[2, :, :])\n', (16522, 16569), True, 'import utils as fun\n'), ((9604, 9647), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (['[year1, year2]'], {}), '([year1, year2])\n', (9631, 9647), False, 'import frejus_dataset\n'), ((10021, 10088), 'train.train_full_alternative_model', 'train.train_full_alternative_model', (['args', 'numpy_rasters', 'dict_model'], {}), '(args, numpy_rasters, dict_model)\n', (10055, 10088), True, 'import train as train\n'), ((9753, 9789), 'utils.torch_raster', 'fun.torch_raster', (['raster'], {'cuda': '(False)'}), '(raster, cuda=False)\n', (9769, 9789), True, 'import utils as fun\n'), ((9862, 9898), 'utils.torch_raster', 'fun.torch_raster', (['raster'], {'cuda': '(False)'}), '(raster, cuda=False)\n', (9878, 9898), True, 'import utils as fun\n')] |
import numpy as np
class RosToRave(object):
"take in ROS joint_states messages and use it to update an openrave robot"
def __init__(self, robot, ros_names):
self.initialized = False
self.ros_names = ros_names
inds_ros2rave = np.array([robot.GetJointIndex(name) for name in self.ros_names])
self.good_ros_inds = np.flatnonzero(inds_ros2rave != -1) # ros joints inds with matching rave joint
self.rave_inds = inds_ros2rave[self.good_ros_inds] # openrave indices corresponding to those joints
def convert(self, ros_values):
return [ros_values[i_ros] for i_ros in self.good_ros_inds]
def set_values(self, robot, ros_values):
rave_values = [ros_values[i_ros] for i_ros in self.good_ros_inds]
robot.SetDOFValues(rave_values,self.rave_inds, 0)
| [
"numpy.flatnonzero"
] | [((372, 407), 'numpy.flatnonzero', 'np.flatnonzero', (['(inds_ros2rave != -1)'], {}), '(inds_ros2rave != -1)\n', (386, 407), True, 'import numpy as np\n')] |
from keras.models import Sequential
from keras.layers import Conv2D,MaxPool2D
from keras.layers import Activation,Dropout,Flatten,Dense
from keras.callbacks import EarlyStopping
from keras.models import model_from_json
from keras.models import load_model
import numpy as np
import csv
import cv2
import os
from keras.callbacks import ModelCheckpoint
from keras.layers.normalization import BatchNormalization
def CNN_Arch(image_size):
model = Sequential()
model.add(Conv2D(32, (3,3), strides=(1,1), activation='relu', input_shape=image_size))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(32, (3,3), strides=(1,1), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(1,1)))
model.add(Conv2D(64, (3,3), strides=(1,1), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(32, (3,3), strides=(1,1), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
input_file = open('training_data.csv','r')
reader = csv.reader(input_file)
x_data = []
y_data = []
zero = 0
one = 0
for row in reader:
image_name = row[0]
#print(image_name)
img = cv2.imread(image_name)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
r,c = img.shape
img = cv2.resize(img,(100,100))
imgray = img.reshape([100,100,1])
#imgray = img.reshape([r, c, 1])
count = 0
x_data.append(imgray)
y_data.append(int(row[1]))
if int(row[1]) == 0:
zero += 1
else:
one += 1
# callbacks we have to define
print(zero, one)
print("training data loaded.")
class_weight = {0: 5.0, 1: 1.0}
x_test = []
y_test = []
for root, dirs, files in os.walk('./normalsVsAbnormalsV1'):
i = 0
for file in files:
i = i + 1
image_name = os.path.join(root, file)
img = cv2.imread(image_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (100, 100))
img = img.reshape([100, 100, 1])
if i < 50:
x_data.append(img)
if "abnormalsJPG" in image_name:
y_data.append(1)
else:
y_data.append(0)
else:
x_test.append(img)
if "abnormalsJPG" in image_name:
y_test.append(1)
else:
y_test.append(0)
x_data = np.array(x_data)
y_data = np.array(y_data)
x_test = np.array(x_test)
y_test - np.array(y_test)
BATCH_SIZE = 256
image_size = x_data[0].shape
#print(image_size)
model = CNN_Arch(image_size)
EPOCHS = 50
# Early stopping callback
PATIENCE = 10
early_stopping = EarlyStopping(monitor='val_loss', patience=PATIENCE, verbose=0, mode='auto')
callbacks = [early_stopping]
filepath = "./model_cnn_Avi/cnn.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model.fit(x_data,y_data, epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=callbacks, verbose=1,validation_data=(x_test,y_test),class_weight=class_weight)
model.save_weights("CNN_Model_Arch1.h5")
| [
"csv.reader",
"keras.callbacks.ModelCheckpoint",
"cv2.cvtColor",
"keras.layers.Dropout",
"os.walk",
"keras.layers.MaxPool2D",
"keras.layers.Flatten",
"cv2.imread",
"keras.callbacks.EarlyStopping",
"numpy.array",
"keras.layers.Conv2D",
"keras.layers.Dense",
"keras.models.Sequential",
"os.pa... | [((1303, 1325), 'csv.reader', 'csv.reader', (['input_file'], {}), '(input_file)\n', (1313, 1325), False, 'import csv\n'), ((1957, 1990), 'os.walk', 'os.walk', (['"""./normalsVsAbnormalsV1"""'], {}), "('./normalsVsAbnormalsV1')\n", (1964, 1990), False, 'import os\n'), ((2625, 2641), 'numpy.array', 'np.array', (['x_data'], {}), '(x_data)\n', (2633, 2641), True, 'import numpy as np\n'), ((2651, 2667), 'numpy.array', 'np.array', (['y_data'], {}), '(y_data)\n', (2659, 2667), True, 'import numpy as np\n'), ((2677, 2693), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (2685, 2693), True, 'import numpy as np\n'), ((2886, 2962), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'PATIENCE', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='val_loss', patience=PATIENCE, verbose=0, mode='auto')\n", (2899, 2962), False, 'from keras.callbacks import EarlyStopping\n'), ((3044, 3136), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_acc', verbose=1, save_best_only=True,\n mode='max')\n", (3059, 3136), False, 'from keras.callbacks import ModelCheckpoint\n'), ((448, 460), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (458, 460), False, 'from keras.models import Sequential\n'), ((1444, 1466), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (1454, 1466), False, 'import cv2\n'), ((1477, 1514), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1489, 1514), False, 'import cv2\n'), ((1544, 1571), 'cv2.resize', 'cv2.resize', (['img', '(100, 100)'], {}), '(img, (100, 100))\n', (1554, 1571), False, 'import cv2\n'), ((2703, 2719), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2711, 2719), True, 'import numpy as np\n'), ((475, 552), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""', 'input_shape': 'image_size'}), "(32, (3, 3), strides=(1, 1), activation='relu', input_shape=image_size)\n", (481, 552), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((566, 609), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (575, 609), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((623, 676), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""'}), "(32, (3, 3), strides=(1, 1), activation='relu')\n", (629, 676), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((690, 733), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(1, 1)'}), '(pool_size=(2, 2), strides=(1, 1))\n', (699, 733), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((747, 800), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""'}), "(64, (3, 3), strides=(1, 1), activation='relu')\n", (753, 800), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((814, 857), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (823, 857), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((871, 924), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""'}), "(32, (3, 3), strides=(1, 1), activation='relu')\n", (877, 924), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((938, 981), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (947, 981), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((995, 1004), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1002, 1004), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1020, 1048), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1025, 1048), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1064, 1076), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1071, 1076), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1092, 1122), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1097, 1122), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((2064, 2088), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2076, 2088), False, 'import os\n'), ((2103, 2125), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (2113, 2125), False, 'import cv2\n'), ((2140, 2177), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2152, 2177), False, 'import cv2\n'), ((2192, 2219), 'cv2.resize', 'cv2.resize', (['img', '(100, 100)'], {}), '(img, (100, 100))\n', (2202, 2219), False, 'import cv2\n')] |
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
""" This file is trying to simulate the Rayleigh channel without any channel information"""
tf.set_random_seed(100)
np.random.seed(100)
def generator_conditional(z, conditioning): # need to change the structure
z_combine = tf.concat([z, conditioning], 1)
G_h1 = tf.nn.relu(tf.matmul(z_combine, G_W1) + G_b1)
G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
G_h3 = tf.nn.relu(tf.matmul(G_h2, G_W3) + G_b3)
G_logit = tf.matmul(G_h3, G_W4) + G_b4
return G_logit
def discriminator_conditional(X, conditioning): # need to change the structure
z_combine = tf.concat([X, conditioning], 1)
D_h1_real = tf.nn.relu(tf.matmul(z_combine / 4, D_W1) + D_b1)
#D_h2_real = tf.reduce_mean(tf.nn.relu(tf.matmul(D_h1_real, D_W2) + D_b2), axis=0, keep_dims=True)
D_h2_real = tf.nn.relu(tf.matmul(D_h1_real, D_W2) + D_b2)
D_h3_real = tf.nn.relu(tf.matmul(D_h2_real, D_W3) + D_b3)
D_logit = tf.matmul(D_h3_real, D_W4) + D_b4
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def sample_Z(sample_size):
''' Sampling the generation noise Z from normal distribution '''
return np.random.normal(size=sample_size)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
number = 200
h_r = np.random.normal(scale=np.sqrt(2) / 2, size=number)
h_i = np.random.normal(scale=np.sqrt(2) / 2, size=number)
h_complex = h_r + 1j * h_i
def generate_real_samples_with_labels_Rayleigh(number=100):
h_r = np.random.normal(scale=np.sqrt(2) / 2, size=number)
h_i = np.random.normal(scale=np.sqrt(2) / 2, size=number)
h_complex = h_r + 1j * h_i
labels_index = np.random.choice(len(mean_set_QAM), number)
data = mean_set_QAM[labels_index]
received_data = h_complex * data
received_data = np.hstack(
(np.real(received_data).reshape(len(data), 1), np.imag(received_data).reshape(len(data), 1)))
gaussion_random = np.random.multivariate_normal([0, 0], [[0.01, 0], [0, 0.01]], number).astype(np.float32)
received_data = received_data + gaussion_random
conditioning = np.hstack((np.real(data).reshape(len(data), 1), np.imag(data).reshape(len(data), 1),
h_r.reshape(len(data), 1), h_i.reshape(len(data), 1))) / 3
return received_data, conditioning
""" ==== Here is the main function ==== """
mean_set_QAM = np.asarray([-3 - 3j, -3 - 1j, -3 + 1j, -3 + 3j, -1 - 3j, -1 - 1j, -1 + 1j, -1 + 3j,
1 - 3j, 1 - 1j, 1 + 1j, 1 + 3j, 3 - 3j, 3 - 1j, 3 + 1j, 3 + 3j
], dtype=np.complex64)
batch_size = 512
condition_depth = 2
condition_dim = 4
Z_dim = 16
model = 'ChannelGAN_Rayleigh_'
data_size = 10000
data, one_hot_labels = generate_real_samples_with_labels_Rayleigh(data_size)
D_W1 = tf.Variable(xavier_init([2 + condition_dim, 32]))
D_b1 = tf.Variable(tf.zeros(shape=[32]))
D_W2 = tf.Variable(xavier_init([32, 32]))
D_b2 = tf.Variable(tf.zeros(shape=[32]))
D_W3 = tf.Variable(xavier_init([32, 32]))
D_b3 = tf.Variable(tf.zeros(shape=[32]))
D_W4 = tf.Variable(xavier_init([32, 1]))
D_b4 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3, D_W4, D_b4]
G_W1 = tf.Variable(xavier_init([Z_dim + condition_dim, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 128]))
G_b2 = tf.Variable(tf.zeros(shape=[128]))
G_W3 = tf.Variable(xavier_init([128, 128]))
G_b3 = tf.Variable(tf.zeros(shape=[128]))
G_W4 = tf.Variable(xavier_init([128, 2]))
G_b4 = tf.Variable(tf.zeros(shape=[2]))
theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3, G_W4, G_b4]
R_sample = tf.placeholder(tf.float32, shape=[None, 2])
Z = tf.placeholder(tf.float32, shape=[None, Z_dim])
Condition = tf.placeholder(tf.float32, shape=[None, condition_dim])
G_sample = generator_conditional(Z, Condition)
D_prob_real, D_logit_real = discriminator_conditional(R_sample, Condition)
D_prob_fake, D_logit_fake = discriminator_conditional(G_sample, Condition)
D_loss = tf.reduce_mean(D_logit_fake) - tf.reduce_mean(D_logit_real)
G_loss = -1 * tf.reduce_mean(D_logit_fake)
lambdda = 5
alpha = tf.random_uniform(shape=tf.shape(R_sample), minval=0., maxval=1.)
differences = G_sample - R_sample
interpolates = R_sample + (alpha * differences)
_, D_inter = discriminator_conditional(interpolates, Condition)
gradients = tf.gradients(D_inter, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes - 1.0) ** 2)
D_loss += lambdda * gradient_penalty
D_solver = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(G_loss, var_list=theta_G)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
save_fig_path = model+"images"
if not os.path.exists(save_fig_path):
os.makedirs(save_fig_path)
i = 0
plt.figure(figsize=(5, 5))
plt.plot(data[:1000, 0], data[:1000, 1], 'b.')
# axes = plt.gca()
# axes.set_xlim([-4, 4])
# axes.set_ylim([-4, 4])
# plt.title('True data distribution')
# plt.savefig(save_fig_path + '/real.png', bbox_inches='tight')
np_samples = []
plot_every = 1000
plt.figure(figsize=(5, 5))
xmax = 4
saver = tf.train.Saver()
for it in range(750000):
start_idx = it * batch_size % data_size
if start_idx + batch_size >= len(data):
continue
X_mb = data[start_idx:start_idx + batch_size, :]
one_hot_labels_mb = one_hot_labels[start_idx:start_idx + batch_size, :]
for d_idx in range(10):
_, D_loss_curr = sess.run([D_solver, D_loss],
feed_dict={R_sample: X_mb, Z: sample_Z((batch_size, Z_dim)),
Condition: one_hot_labels_mb})
# print("Inner loop Losses: D are", D_loss_curr)
# print("start_idx is", start_idx, "shape of one hot label", one_hot_labels_mb.shape, X_mb.shape )
#_, D_loss_curr = sess.run([D_solver, D_loss],
# feed_dict={R_sample: X_mb, Z: sample_Z((batch_size, Z_dim)), Condition: one_hot_labels_mb})
_, G_loss_curr = sess.run([G_solver, G_loss],
feed_dict={R_sample: X_mb, Z: sample_Z((batch_size, Z_dim)), Condition: one_hot_labels_mb})
if (it + 1) % plot_every == 0:
save_path = saver.save(sess, './Models/ChannelGAN_model_step_' + str(it) + '.ckpt')
print("Start Plotting")
colors = ['b.', 'r+', 'm.', 'c.', 'k.', 'g.', 'y.', 'm.', \
'bo', 'ro', 'mo', 'co', 'ko', 'go', 'yo', 'bo']
colors = ['b.', 'b+', 'bx', 'b^', 'b^', 'bx', 'b+', 'b.', \
'b.', 'b+', 'bx', 'b^', 'b^', 'bx', 'b+', 'b.']
plt.clf()
samples = np.array([])
for channel_idx in range(10):
plt.clf()
number = 20 #
h_r = np.random.normal(scale=np.sqrt(2) / 2)
h_i = np.random.normal(scale=np.sqrt(2) / 2)
h_r = np.tile(h_r, number)
h_i = np.tile(h_i, number)
for idx in range(len(mean_set_QAM)):
labels_index = np.tile(idx, number)
h_complex = h_r + 1j * h_i
# labels_index = np.random.choice(len(mean_set_QAM), number)
data_t = mean_set_QAM[labels_index]
transmit_data = h_complex * data_t
# print("shapes", transmit_data.shape, h_complex.shape, data_t.shape)
transmit_data = np.hstack((np.real(transmit_data).reshape(len(transmit_data), 1),
np.imag(transmit_data).reshape(len(transmit_data), 1)))
gaussion_random = np.random.multivariate_normal([0, 0], [[0.03, 0], [0, 0.03]], number).astype(
np.float32)
received_data = transmit_data + gaussion_random
conditioning = np.hstack(
(np.real(data_t).reshape(len(data_t), 1), np.imag(data_t).reshape(len(data_t), 1),
h_r.reshape(len(data_t), 1), h_i.reshape(len(data_t), 1))) /3
samples_component = sess.run(G_sample, feed_dict={Z: sample_Z((number, Z_dim)), Condition: conditioning})
plt.plot(samples_component[:, 0], samples_component[:, 1], colors[idx])
plt.plot(transmit_data[:, 0], transmit_data[:, 1], colors[idx])
#plt.plot(samples_component[:, 0], samples_component[:, 1], 'k.')
#plt.plot(transmit_data[:, 0], transmit_data[:, 1], 'b*')
axes = plt.gca()
axes.set_xlim([-4, 4])
axes.set_ylim([-4, 4])
xlabel = r'$Re\{y_n\}$'
ylabel = r'$Imag\{y_n\}$'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
plt.savefig( save_fig_path + '/' + str(channel_idx) + '_{}_noise_1.eps'.format(str(i).zfill(3)),
bbox_inches='tight')
plt.savefig(save_fig_path + '/' + str(channel_idx) + '_{}_noise_1.png'.format(str(i).zfill(3)),
bbox_inches='tight')
axes.set_xlim([-4, 4])
axes.set_ylim([-4, 4])
plt.title('Iter: {}, loss(D): {:2.2f}, loss(G):{:2.2f}'.format(it + 1, D_loss_curr, G_loss_curr))
plt.savefig(save_fig_path + '/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
| [
"numpy.random.seed",
"matplotlib.pyplot.clf",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"matplotlib.pyplot.figure",
"numpy.imag",
"numpy.tile",
"numpy.random.normal",
"matplotlib.pyplot.gca",
"tensorflow.sqrt",
"matplotlib.pyplot.xlabel",
"os.path.exists",
"tensorflow.concat",
... | [((61, 82), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (75, 82), False, 'import matplotlib\n'), ((308, 331), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(100)'], {}), '(100)\n', (326, 331), True, 'import tensorflow as tf\n'), ((332, 351), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (346, 351), True, 'import numpy as np\n'), ((2635, 2844), 'numpy.asarray', 'np.asarray', (['[-3 - 3.0j, -3 - 1.0j, -3 + 1.0j, -3 + 3.0j, -1 - 3.0j, -1 - 1.0j, -1 + \n 1.0j, -1 + 3.0j, 1 - 3.0j, 1 - 1.0j, 1 + 1.0j, 1 + 3.0j, 3 - 3.0j, 3 - \n 1.0j, 3 + 1.0j, 3 + 3.0j]'], {'dtype': 'np.complex64'}), '([-3 - 3.0j, -3 - 1.0j, -3 + 1.0j, -3 + 3.0j, -1 - 3.0j, -1 - \n 1.0j, -1 + 1.0j, -1 + 3.0j, 1 - 3.0j, 1 - 1.0j, 1 + 1.0j, 1 + 3.0j, 3 -\n 3.0j, 3 - 1.0j, 3 + 1.0j, 3 + 3.0j], dtype=np.complex64)\n', (2645, 2844), True, 'import numpy as np\n'), ((3883, 3926), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 2]'}), '(tf.float32, shape=[None, 2])\n', (3897, 3926), True, 'import tensorflow as tf\n'), ((3931, 3978), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, Z_dim]'}), '(tf.float32, shape=[None, Z_dim])\n', (3945, 3978), True, 'import tensorflow as tf\n'), ((3991, 4046), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, condition_dim]'}), '(tf.float32, shape=[None, condition_dim])\n', (4005, 4046), True, 'import tensorflow as tf\n'), ((4738, 4773), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((slopes - 1.0) ** 2)'], {}), '((slopes - 1.0) ** 2)\n', (4752, 4773), True, 'import tensorflow as tf\n'), ((5041, 5053), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5051, 5053), True, 'import tensorflow as tf\n'), ((5205, 5231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5215, 5231), True, 'import matplotlib.pyplot as plt\n'), ((5232, 5278), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:1000, 0]', 'data[:1000, 1]', '"""b."""'], {}), "(data[:1000, 0], data[:1000, 1], 'b.')\n", (5240, 5278), True, 'import matplotlib.pyplot as plt\n'), ((5484, 5510), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5494, 5510), True, 'import matplotlib.pyplot as plt\n'), ((5528, 5544), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5542, 5544), True, 'import tensorflow as tf\n'), ((445, 476), 'tensorflow.concat', 'tf.concat', (['[z, conditioning]', '(1)'], {}), '([z, conditioning], 1)\n', (454, 476), True, 'import tensorflow as tf\n'), ((799, 830), 'tensorflow.concat', 'tf.concat', (['[X, conditioning]', '(1)'], {}), '([X, conditioning], 1)\n', (808, 830), True, 'import tensorflow as tf\n'), ((1185, 1207), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['D_logit'], {}), '(D_logit)\n', (1198, 1207), True, 'import tensorflow as tf\n'), ((1344, 1378), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'sample_size'}), '(size=sample_size)\n', (1360, 1378), True, 'import numpy as np\n'), ((1481, 1531), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'size', 'stddev': 'xavier_stddev'}), '(shape=size, stddev=xavier_stddev)\n', (1497, 1531), True, 'import tensorflow as tf\n'), ((3127, 3147), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[32]'}), '(shape=[32])\n', (3135, 3147), True, 'import tensorflow as tf\n'), ((3210, 3230), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[32]'}), '(shape=[32])\n', (3218, 3230), True, 'import tensorflow as tf\n'), ((3293, 3313), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[32]'}), '(shape=[32])\n', (3301, 3313), True, 'import tensorflow as tf\n'), ((3375, 3394), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[1]'}), '(shape=[1])\n', (3383, 3394), True, 'import tensorflow as tf\n'), ((3536, 3557), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[128]'}), '(shape=[128])\n', (3544, 3557), True, 'import tensorflow as tf\n'), ((3622, 3643), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[128]'}), '(shape=[128])\n', (3630, 3643), True, 'import tensorflow as tf\n'), ((3708, 3729), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[128]'}), '(shape=[128])\n', (3716, 3729), True, 'import tensorflow as tf\n'), ((3792, 3811), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[2]'}), '(shape=[2])\n', (3800, 3811), True, 'import tensorflow as tf\n'), ((4254, 4282), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['D_logit_fake'], {}), '(D_logit_fake)\n', (4268, 4282), True, 'import tensorflow as tf\n'), ((4285, 4313), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['D_logit_real'], {}), '(D_logit_real)\n', (4299, 4313), True, 'import tensorflow as tf\n'), ((4328, 4356), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['D_logit_fake'], {}), '(D_logit_fake)\n', (4342, 4356), True, 'import tensorflow as tf\n'), ((4601, 4638), 'tensorflow.gradients', 'tf.gradients', (['D_inter', '[interpolates]'], {}), '(D_inter, [interpolates])\n', (4613, 4638), True, 'import tensorflow as tf\n'), ((5063, 5096), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5094, 5096), True, 'import tensorflow as tf\n'), ((5137, 5166), 'os.path.exists', 'os.path.exists', (['save_fig_path'], {}), '(save_fig_path)\n', (5151, 5166), False, 'import os\n'), ((5172, 5198), 'os.makedirs', 'os.makedirs', (['save_fig_path'], {}), '(save_fig_path)\n', (5183, 5198), False, 'import os\n'), ((652, 673), 'tensorflow.matmul', 'tf.matmul', (['G_h3', 'G_W4'], {}), '(G_h3, G_W4)\n', (661, 673), True, 'import tensorflow as tf\n'), ((1138, 1164), 'tensorflow.matmul', 'tf.matmul', (['D_h3_real', 'D_W4'], {}), '(D_h3_real, D_W4)\n', (1147, 1164), True, 'import tensorflow as tf\n'), ((1449, 1470), 'tensorflow.sqrt', 'tf.sqrt', (['(in_dim / 2.0)'], {}), '(in_dim / 2.0)\n', (1456, 1470), True, 'import tensorflow as tf\n'), ((4401, 4419), 'tensorflow.shape', 'tf.shape', (['R_sample'], {}), '(R_sample)\n', (4409, 4419), True, 'import tensorflow as tf\n'), ((4673, 4693), 'tensorflow.square', 'tf.square', (['gradients'], {}), '(gradients)\n', (4682, 4693), True, 'import tensorflow as tf\n'), ((4822, 4888), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)', 'beta1': '(0.5)', 'beta2': '(0.9)'}), '(learning_rate=0.0001, beta1=0.5, beta2=0.9)\n', (4844, 4888), True, 'import tensorflow as tf\n'), ((4933, 4999), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)', 'beta1': '(0.5)', 'beta2': '(0.9)'}), '(learning_rate=0.0001, beta1=0.5, beta2=0.9)\n', (4955, 4999), True, 'import tensorflow as tf\n'), ((7002, 7011), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7009, 7011), True, 'import matplotlib.pyplot as plt\n'), ((7030, 7042), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7038, 7042), True, 'import numpy as np\n'), ((499, 525), 'tensorflow.matmul', 'tf.matmul', (['z_combine', 'G_W1'], {}), '(z_combine, G_W1)\n', (508, 525), True, 'import tensorflow as tf\n'), ((556, 577), 'tensorflow.matmul', 'tf.matmul', (['G_h1', 'G_W2'], {}), '(G_h1, G_W2)\n', (565, 577), True, 'import tensorflow as tf\n'), ((608, 629), 'tensorflow.matmul', 'tf.matmul', (['G_h2', 'G_W3'], {}), '(G_h2, G_W3)\n', (617, 629), True, 'import tensorflow as tf\n'), ((858, 888), 'tensorflow.matmul', 'tf.matmul', (['(z_combine / 4)', 'D_W1'], {}), '(z_combine / 4, D_W1)\n', (867, 888), True, 'import tensorflow as tf\n'), ((1027, 1053), 'tensorflow.matmul', 'tf.matmul', (['D_h1_real', 'D_W2'], {}), '(D_h1_real, D_W2)\n', (1036, 1053), True, 'import tensorflow as tf\n'), ((1089, 1115), 'tensorflow.matmul', 'tf.matmul', (['D_h2_real', 'D_W3'], {}), '(D_h2_real, D_W3)\n', (1098, 1115), True, 'import tensorflow as tf\n'), ((1576, 1586), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1583, 1586), True, 'import numpy as np\n'), ((1634, 1644), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1641, 1644), True, 'import numpy as np\n'), ((2200, 2269), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[0, 0]', '[[0.01, 0], [0, 0.01]]', 'number'], {}), '([0, 0], [[0.01, 0], [0, 0.01]], number)\n', (2229, 2269), True, 'import numpy as np\n'), ((7093, 7102), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7100, 7102), True, 'import matplotlib.pyplot as plt\n'), ((7262, 7282), 'numpy.tile', 'np.tile', (['h_r', 'number'], {}), '(h_r, number)\n', (7269, 7282), True, 'import numpy as np\n'), ((7301, 7321), 'numpy.tile', 'np.tile', (['h_i', 'number'], {}), '(h_i, number)\n', (7308, 7321), True, 'import numpy as np\n'), ((8830, 8839), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8837, 8839), True, 'import matplotlib.pyplot as plt\n'), ((8996, 9014), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (9006, 9014), True, 'import matplotlib.pyplot as plt\n'), ((9027, 9045), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (9037, 9045), True, 'import matplotlib.pyplot as plt\n'), ((9058, 9068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9066, 9068), True, 'import matplotlib.pyplot as plt\n'), ((1785, 1795), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1792, 1795), True, 'import numpy as np\n'), ((1847, 1857), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1854, 1857), True, 'import numpy as np\n'), ((7402, 7422), 'numpy.tile', 'np.tile', (['idx', 'number'], {}), '(idx, number)\n', (7409, 7422), True, 'import numpy as np\n'), ((8503, 8574), 'matplotlib.pyplot.plot', 'plt.plot', (['samples_component[:, 0]', 'samples_component[:, 1]', 'colors[idx]'], {}), '(samples_component[:, 0], samples_component[:, 1], colors[idx])\n', (8511, 8574), True, 'import matplotlib.pyplot as plt\n'), ((8591, 8654), 'matplotlib.pyplot.plot', 'plt.plot', (['transmit_data[:, 0]', 'transmit_data[:, 1]', 'colors[idx]'], {}), '(transmit_data[:, 0], transmit_data[:, 1], colors[idx])\n', (8599, 8654), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2107), 'numpy.real', 'np.real', (['received_data'], {}), '(received_data)\n', (2092, 2107), True, 'import numpy as np\n'), ((2131, 2153), 'numpy.imag', 'np.imag', (['received_data'], {}), '(received_data)\n', (2138, 2153), True, 'import numpy as np\n'), ((2371, 2384), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (2378, 2384), True, 'import numpy as np\n'), ((2408, 2421), 'numpy.imag', 'np.imag', (['data'], {}), '(data)\n', (2415, 2421), True, 'import numpy as np\n'), ((7171, 7181), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7178, 7181), True, 'import numpy as np\n'), ((7228, 7238), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7235, 7238), True, 'import numpy as np\n'), ((7963, 8032), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[0, 0]', '[[0.03, 0], [0, 0.03]]', 'number'], {}), '([0, 0], [[0.03, 0], [0, 0.03]], number)\n', (7992, 8032), True, 'import numpy as np\n'), ((7775, 7797), 'numpy.real', 'np.real', (['transmit_data'], {}), '(transmit_data)\n', (7782, 7797), True, 'import numpy as np\n'), ((7873, 7895), 'numpy.imag', 'np.imag', (['transmit_data'], {}), '(transmit_data)\n', (7880, 7895), True, 'import numpy as np\n'), ((8200, 8215), 'numpy.real', 'np.real', (['data_t'], {}), '(data_t)\n', (8207, 8215), True, 'import numpy as np\n'), ((8241, 8256), 'numpy.imag', 'np.imag', (['data_t'], {}), '(data_t)\n', (8248, 8256), True, 'import numpy as np\n')] |
# <markdowncell> Import scipy.io for matlab loading, numpy and teneto
# <codecell>
import scipy.io as sio
import scipy.stats as sps
import numpy as np
import teneto
import matplotlib.pyplot as plt
import plot_surf
import pandas as pd
import os
import markdown2
import tenetostats
coord = sio.matlab.loadmat('./data/coord_power264.mat')['coord']
netid=np.array(list(map(int,sio.loadmat('./data/networkassignment')['PowerNetClass'])))
netid[netid==-1]=13
network = np.array([1,3,4,5,7,8,9,10,11,12,13])
netlab = np.array(['SM','CO','AU','DM','V','FP','SA','Sub','VA','DA','U'])
plotnet = [5,1,7,8,9,3,4,10,11,12,13]
plotorder=[]
for n in plotnet:
fid = np.where(netid==n)[0]
plotorder=plotorder + list(fid)
plotorder = np.array(plotorder)
PowerInfo = pd.read_csv('./data/PowerInfo.csv')
plotcol = np.zeros([14,3])
plotcol[1,:] = [255,102,0]
plotcol[3,:] = [255,255,0]
plotcol[4,:] = [128,0,128]
plotcol[5,:] = [128,0,0]
plotcol[7,:] = [255,0,100]
plotcol[8,:] = [124,124,220]
plotcol[9,:] = [0,255,0]
plotcol[10,:] = [255,0,255]
plotcol[11,:] = [0,255,255]
plotcol[12,:] = [0,128,0]
plotcol[13,:] = [0,0,0]
plotcol=plotcol/255
colvec=np.zeros([264,3])
for n in range(0,264):
colvec[n,:]=plotcol[netid[n],:]
# <markdowncell> Set matplotlib color style
# <codecell>
plt.rcParams['image.cmap'] = 'gist_gray'
# <markdowncell> load data
# <codecell>
closeness_eo=np.load('./data/closeness_eo.npy')
closeness_ec=np.load('./data/closeness_ec.npy')
degree_eo=np.load('./data/degree_eo.npy')
degree_ec=np.load('./data/degree_ec.npy')
# Bec=np.load('./data/burst_ec.npy')
# Beo=np.load('./data/burst_eo.npy')
# CEeo=np.load('./data/efficiency_centrality_eo.npy')thon
# CEec=np.load('./data/efficiency_centrality_ec.npy')
closeness_eo = np.stack(closeness_eo)
closeness_ec = np.stack(closeness_ec)
degree_eo = np.stack(degree_eo)
degree_ec = np.stack(degree_ec)
# <markdowncell> Plot on brains
# <codecell>
plotvar=np.mean(degree_eo,axis=0)
plotvar=(plotvar-plotvar.min())/(plotvar.max()-plotvar.min())*5
plot_surf.plot_brain_surface(coord,plotvar,colvec,'./figures/tdegree_brain.png')
plotvar=np.mean(closeness_eo,axis=0)
plotvar=(plotvar-plotvar.min())/(plotvar.max()-plotvar.min())*5
plot_surf.plot_brain_surface(coord,plotvar,colvec,'./figures/closeness_brain.png')
plotvar=np.mean(degree_ec,axis=0)
plotvar=(plotvar-plotvar.min())/(plotvar.max()-plotvar.min())*5
plot_surf.plot_brain_surface(coord,plotvar,colvec,'./figures/tdegree_ec_brain.png')
plotvar=np.mean(closeness_ec,axis=0)
plotvar=(plotvar-plotvar.min())/(plotvar.max()-plotvar.min())*5
plot_surf.plot_brain_surface(coord,plotvar,colvec,'./figures/closeness_ec_brain.png')
# plotvar=np.mean(Beo,axis=0)
# plotvar=(plotvar-plotvar.min())/(plotvar.max()-plotvar.min())*5
# plot_surf.plot_brain_surface(coord,plotvar,colvec,'./figures/bursts_brain.png')
#
# <markdowncell> Plot on brains
# <codecell>
pth=np.zeros([1000,264])
np.random.seed(2017)
for p in range(0,1000):
porder= np.argsort(np.random.rand(264,46),axis=0)
pdeg = np.zeros(264)
for s in range(0,46):
pdeg += degree_eo[s,porder[:,s]]
pth[p,:]=pdeg/46
pth=np.sort(pth,axis=0)
thD=pth[950,:].max()
pth=np.zeros([1000,264])
np.random.seed(2017)
for p in range(0,1000):
porder= np.argsort(np.random.rand(264,46),axis=0)
pdeg = np.zeros(264)
for s in range(0,46):
pdeg += degree_ec[s,porder[:,s]]
pth[p,:]=pdeg/46
pth=np.sort(pth,axis=0)
thDec=pth[950,:].max()
pth=np.zeros([1000,264])
np.random.seed(2017)
for p in range(0,1000):
porder= np.argsort(np.random.rand(264,46),axis=0)
pdeg = np.zeros(264)
for s in range(0,46):
pdeg += closeness_ec[s,porder[:,s]]
pth[p,:]=pdeg/46
pth=np.sort(pth,axis=0)
thCec=pth[950,:].max()
pth=np.zeros([1000,264])
np.random.seed(2017)
for p in range(0,1000):
porder= np.argsort(np.random.rand(264,46),axis=0)
pdeg = np.zeros(264)
for s in range(0,46):
pdeg += closeness_eo[s,porder[:,s]]
pth[p,:]=pdeg/46
pth=np.sort(pth,axis=0)
thC=pth[950,:].max()
eD = np.mean(degree_eo,axis=0)
eC = np.mean(closeness_eo,axis=0)
eDec = np.mean(degree_ec,axis=0)
eCec = np.mean(closeness_ec,axis=0)
def print_table_of_node_and_all(val,valname,threshold,sname,title,tag='1'):
PowerInfo = pd.read_csv('./data/PowerInfo.csv')
sigVals=val[np.where(val>threshold)[0]]
results=PowerInfo.iloc[PowerInfo.index[val>threshold]]
results=results[['coord_x','coord_y','coord_z','network','aal']]
results.rename(columns={'coord_x':'x','coord_y':'y','coord_z':'z','aal':'AAL','network':'Network'},inplace=True)
results[valname]=np.around(sigVals,3)
results.sort_values(by=valname,ascending=False,inplace=True)
# Get column names
cols = results.columns
# Create a new DataFrame with just the markdown
# strings
hdrresults = pd.DataFrame([['---',]*len(cols)], columns=cols)
#Create a new concatenated DataFrame
results = pd.concat([hdrresults, results])
results.to_csv(sname + '.md', sep="|", index=False)
with open(sname + ".md", "a") as myfile:
myfile.write("\n \pagenumbering{gobble} \n Table " + tag + ": " + title)
os.system('pandoc ' + sname + '.md -o ' + sname + '.pdf')
print_table_of_node_and_all(eD,'D',thD,'degree_eo_top','Temporal Degree Centrality during eyes open condition. Nodes with degree centrality where p\<0.05, their designated network and corresponding AAL. XYZ are MNI cordinates.','1')
print_table_of_node_and_all(eC,'C',thC,'closeness_eo_top','Closeness Centrality during eyes open condition. Nodes with closeness centrality where p\<0.05, their designated network and corresponding AAL. XYZ are MNI cordinates.','3')
print_table_of_node_and_all(eDec,'D',thDec,'degree_ec_top','Temporal Degree Centrality during eyes closed condition. Nodes with degree centrality where p\<0.05, their designated network and corresponding AAL. XYZ are MNI cordinates.','2')
print_table_of_node_and_all(eCec,'C',thCec,'closeness_ec_top','Closeness Centrality during eyes closed condition. Nodes with closeness centrality where p\<0.05, their designated network and corresponding AAL. XYZ are MNI cordinates.','4')
B=np.mean(Beo,axis=0)
thB = np.sort(B)[-27]
print_table_of_node_and_all(B,'B',thB,'burstiness_eo_top','Burstiness during eyes open condition. Top 10 percent of nodes, their designated network and corresponding AAL. XYZ are MNI cordinates.')
B=np.mean(Bec,axis=0)
thB = np.sort(B)[-27]
print_table_of_node_and_all(B,'B',thB,'burstiness_ec_top','Burstiness during eyes closed condition. Top 10 percent of nodes, their designated network and corresponding AAL. XYZ are MNI cordinates.')
def spornlike_barplot(dat,col,ylim,th,ax):
sig = np.where(dat>th)[0]
ax=fig.add_subplot(212)
ax.bar(range(0,264),dat,edgecolor=[0.45,0.45,0.45],facecolor=[0.45,0.45,0.45],width=1.0)
ax.bar(range(0,len(sig)),dat[sig],edgecolor=[1,1,0],facecolor=[1,1,0],width=1.0)
ax.set_xlim([-0.5,264.5])
ax.set_ylim(ylim)
ax.plot(range(-1,265),np.zeros(266)+th,linestyle='--',color='k')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax=fig.add_subplot(211)
ax.scatter(range(0,264),np.zeros(264),s=30,edgecolor='none',c=col)
ax.set_ylim([-1,1])
ax.set_xlim([-0.5,264.5])
#ax[0].set_aspect('equal')
ax.axis('off')
return ax
sorted_dat= np.array(list(reversed(np.sort(eC))))
sorted_net = netid[np.array(list(reversed(np.argsort(eC))))]
fig = plt.figure(figsize=(20,5))
spornlike_barplot(sorted_dat,plotcol[sorted_net,:],[.155,.18],thC,ax)
plt.tight_layout()
fig.show()
fig.savefig('./figures/closeness_eo_spornsplot.pdf',r=600)
sorted_dat= np.array(list(reversed(np.sort(eCec))))
sorted_net = netid[np.array(list(reversed(np.argsort(eCec))))]
fig = plt.figure(figsize=(20,5))
spornlike_barplot(sorted_dat,plotcol[sorted_net,:],[.145,.17],thCec,ax)
plt.tight_layout()
fig.show()
fig.savefig('./figures/closeness_ec_spornsplot.pdf',r=600)
sorted_dat= np.array(list(reversed(np.sort(eD))))
sorted_net = netid[np.array(list(reversed(np.argsort(eD))))]
fig = plt.figure(figsize=(20,5))
spornlike_barplot(sorted_dat,plotcol[sorted_net,:],[1800,1950],thD,ax)
plt.tight_layout()
fig.show()
fig.savefig('./figures/tdegree_eo_spornsplot.pdf',r=600)
sorted_dat= np.array(list(reversed(np.sort(eDec))))
sorted_net = netid[np.array(list(reversed(np.argsort(eDec))))]
fig = plt.figure(figsize=(20,5))
spornlike_barplot(sorted_dat,plotcol[sorted_net,:],[1800,1950],thDec,ax)
plt.tight_layout()
fig.show()
fig.savefig('./figures/tdegree_ec_spornsplot.pdf',r=600)
eC_net=np.zeros(11)
eCec_net=np.zeros(11)
for i,net in enumerate(plotnet):
eC_net[i] = np.mean(eC[netid==net])
eCec_net[i] = np.mean(eCec[netid==net])
pdegnode=np.zeros(264)
for r in range(0,264):
ptmp,tmp=tenetostats.shufflegroups(degree_eo[:,r],degree_ec[:,r],pnum=10000)
pdegnode[r]=ptmp
pdegnode_correct=teneto.misc.correct_pvalues_for_multiple_testing(pdegnode)
sig = np.where(pdegnode_correct<0.05)[0]
degree_eo[:,sig]-degree_ec[:,sig]
np.mean(degree_eo[:,sig],axis=0)-np.mean(degree_ec[:,sig],axis=0)
pclosenode=np.zeros(264)
for r in range(0,264):
ptmp,tmp=tenetostats.shufflegroups(closeness_eo[:,r],closeness_ec[:,r],pnum=10000)
pclosenode[r]=ptmp
pclosenode_correct=teneto.misc.correct_pvalues_for_multiple_testing(pclosenode)
sig = np.where(pclosenode_correct<0.05)[0]
np.mean(closeness_eo[:,sig],axis=0)-np.mean(closeness_ec[:,sig],axis=0)
| [
"numpy.load",
"numpy.random.seed",
"scipy.io.matlab.loadmat",
"scipy.io.loadmat",
"pandas.read_csv",
"numpy.argsort",
"numpy.around",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.tight_layout",
"tenetostats.shufflegroups",
"pandas.concat",
"teneto.misc.correct_pvalues_for_mul... | [((465, 512), 'numpy.array', 'np.array', (['[1, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]'], {}), '([1, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13])\n', (473, 512), True, 'import numpy as np\n'), ((512, 587), 'numpy.array', 'np.array', (["['SM', 'CO', 'AU', 'DM', 'V', 'FP', 'SA', 'Sub', 'VA', 'DA', 'U']"], {}), "(['SM', 'CO', 'AU', 'DM', 'V', 'FP', 'SA', 'Sub', 'VA', 'DA', 'U'])\n", (520, 587), True, 'import numpy as np\n'), ((727, 746), 'numpy.array', 'np.array', (['plotorder'], {}), '(plotorder)\n', (735, 746), True, 'import numpy as np\n'), ((759, 794), 'pandas.read_csv', 'pd.read_csv', (['"""./data/PowerInfo.csv"""'], {}), "('./data/PowerInfo.csv')\n", (770, 794), True, 'import pandas as pd\n'), ((806, 823), 'numpy.zeros', 'np.zeros', (['[14, 3]'], {}), '([14, 3])\n', (814, 823), True, 'import numpy as np\n'), ((1144, 1162), 'numpy.zeros', 'np.zeros', (['[264, 3]'], {}), '([264, 3])\n', (1152, 1162), True, 'import numpy as np\n'), ((1377, 1411), 'numpy.load', 'np.load', (['"""./data/closeness_eo.npy"""'], {}), "('./data/closeness_eo.npy')\n", (1384, 1411), True, 'import numpy as np\n'), ((1425, 1459), 'numpy.load', 'np.load', (['"""./data/closeness_ec.npy"""'], {}), "('./data/closeness_ec.npy')\n", (1432, 1459), True, 'import numpy as np\n'), ((1470, 1501), 'numpy.load', 'np.load', (['"""./data/degree_eo.npy"""'], {}), "('./data/degree_eo.npy')\n", (1477, 1501), True, 'import numpy as np\n'), ((1512, 1543), 'numpy.load', 'np.load', (['"""./data/degree_ec.npy"""'], {}), "('./data/degree_ec.npy')\n", (1519, 1543), True, 'import numpy as np\n'), ((1746, 1768), 'numpy.stack', 'np.stack', (['closeness_eo'], {}), '(closeness_eo)\n', (1754, 1768), True, 'import numpy as np\n'), ((1784, 1806), 'numpy.stack', 'np.stack', (['closeness_ec'], {}), '(closeness_ec)\n', (1792, 1806), True, 'import numpy as np\n'), ((1819, 1838), 'numpy.stack', 'np.stack', (['degree_eo'], {}), '(degree_eo)\n', (1827, 1838), True, 'import numpy as np\n'), ((1851, 1870), 'numpy.stack', 'np.stack', (['degree_ec'], {}), '(degree_ec)\n', (1859, 1870), True, 'import numpy as np\n'), ((1925, 1951), 'numpy.mean', 'np.mean', (['degree_eo'], {'axis': '(0)'}), '(degree_eo, axis=0)\n', (1932, 1951), True, 'import numpy as np\n'), ((2015, 2102), 'plot_surf.plot_brain_surface', 'plot_surf.plot_brain_surface', (['coord', 'plotvar', 'colvec', '"""./figures/tdegree_brain.png"""'], {}), "(coord, plotvar, colvec,\n './figures/tdegree_brain.png')\n", (2043, 2102), False, 'import plot_surf\n'), ((2105, 2134), 'numpy.mean', 'np.mean', (['closeness_eo'], {'axis': '(0)'}), '(closeness_eo, axis=0)\n', (2112, 2134), True, 'import numpy as np\n'), ((2198, 2287), 'plot_surf.plot_brain_surface', 'plot_surf.plot_brain_surface', (['coord', 'plotvar', 'colvec', '"""./figures/closeness_brain.png"""'], {}), "(coord, plotvar, colvec,\n './figures/closeness_brain.png')\n", (2226, 2287), False, 'import plot_surf\n'), ((2290, 2316), 'numpy.mean', 'np.mean', (['degree_ec'], {'axis': '(0)'}), '(degree_ec, axis=0)\n', (2297, 2316), True, 'import numpy as np\n'), ((2380, 2470), 'plot_surf.plot_brain_surface', 'plot_surf.plot_brain_surface', (['coord', 'plotvar', 'colvec', '"""./figures/tdegree_ec_brain.png"""'], {}), "(coord, plotvar, colvec,\n './figures/tdegree_ec_brain.png')\n", (2408, 2470), False, 'import plot_surf\n'), ((2473, 2502), 'numpy.mean', 'np.mean', (['closeness_ec'], {'axis': '(0)'}), '(closeness_ec, axis=0)\n', (2480, 2502), True, 'import numpy as np\n'), ((2566, 2658), 'plot_surf.plot_brain_surface', 'plot_surf.plot_brain_surface', (['coord', 'plotvar', 'colvec', '"""./figures/closeness_ec_brain.png"""'], {}), "(coord, plotvar, colvec,\n './figures/closeness_ec_brain.png')\n", (2594, 2658), False, 'import plot_surf\n'), ((2884, 2905), 'numpy.zeros', 'np.zeros', (['[1000, 264]'], {}), '([1000, 264])\n', (2892, 2905), True, 'import numpy as np\n'), ((2905, 2925), 'numpy.random.seed', 'np.random.seed', (['(2017)'], {}), '(2017)\n', (2919, 2925), True, 'import numpy as np\n'), ((3121, 3141), 'numpy.sort', 'np.sort', (['pth'], {'axis': '(0)'}), '(pth, axis=0)\n', (3128, 3141), True, 'import numpy as np\n'), ((3167, 3188), 'numpy.zeros', 'np.zeros', (['[1000, 264]'], {}), '([1000, 264])\n', (3175, 3188), True, 'import numpy as np\n'), ((3188, 3208), 'numpy.random.seed', 'np.random.seed', (['(2017)'], {}), '(2017)\n', (3202, 3208), True, 'import numpy as np\n'), ((3404, 3424), 'numpy.sort', 'np.sort', (['pth'], {'axis': '(0)'}), '(pth, axis=0)\n', (3411, 3424), True, 'import numpy as np\n'), ((3453, 3474), 'numpy.zeros', 'np.zeros', (['[1000, 264]'], {}), '([1000, 264])\n', (3461, 3474), True, 'import numpy as np\n'), ((3474, 3494), 'numpy.random.seed', 'np.random.seed', (['(2017)'], {}), '(2017)\n', (3488, 3494), True, 'import numpy as np\n'), ((3693, 3713), 'numpy.sort', 'np.sort', (['pth'], {'axis': '(0)'}), '(pth, axis=0)\n', (3700, 3713), True, 'import numpy as np\n'), ((3742, 3763), 'numpy.zeros', 'np.zeros', (['[1000, 264]'], {}), '([1000, 264])\n', (3750, 3763), True, 'import numpy as np\n'), ((3763, 3783), 'numpy.random.seed', 'np.random.seed', (['(2017)'], {}), '(2017)\n', (3777, 3783), True, 'import numpy as np\n'), ((3982, 4002), 'numpy.sort', 'np.sort', (['pth'], {'axis': '(0)'}), '(pth, axis=0)\n', (3989, 4002), True, 'import numpy as np\n'), ((4032, 4058), 'numpy.mean', 'np.mean', (['degree_eo'], {'axis': '(0)'}), '(degree_eo, axis=0)\n', (4039, 4058), True, 'import numpy as np\n'), ((4063, 4092), 'numpy.mean', 'np.mean', (['closeness_eo'], {'axis': '(0)'}), '(closeness_eo, axis=0)\n', (4070, 4092), True, 'import numpy as np\n'), ((4099, 4125), 'numpy.mean', 'np.mean', (['degree_ec'], {'axis': '(0)'}), '(degree_ec, axis=0)\n', (4106, 4125), True, 'import numpy as np\n'), ((4132, 4161), 'numpy.mean', 'np.mean', (['closeness_ec'], {'axis': '(0)'}), '(closeness_ec, axis=0)\n', (4139, 4161), True, 'import numpy as np\n'), ((6154, 6174), 'numpy.mean', 'np.mean', (['Beo'], {'axis': '(0)'}), '(Beo, axis=0)\n', (6161, 6174), True, 'import numpy as np\n'), ((6395, 6415), 'numpy.mean', 'np.mean', (['Bec'], {'axis': '(0)'}), '(Bec, axis=0)\n', (6402, 6415), True, 'import numpy as np\n'), ((7521, 7548), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (7531, 7548), True, 'import matplotlib.pyplot as plt\n'), ((7618, 7636), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7634, 7636), True, 'import matplotlib.pyplot as plt\n'), ((7829, 7856), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (7839, 7856), True, 'import matplotlib.pyplot as plt\n'), ((7928, 7946), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7944, 7946), True, 'import matplotlib.pyplot as plt\n'), ((8135, 8162), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (8145, 8162), True, 'import matplotlib.pyplot as plt\n'), ((8233, 8251), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8249, 8251), True, 'import matplotlib.pyplot as plt\n'), ((8442, 8469), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (8452, 8469), True, 'import matplotlib.pyplot as plt\n'), ((8542, 8560), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8558, 8560), True, 'import matplotlib.pyplot as plt\n'), ((8639, 8651), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (8647, 8651), True, 'import numpy as np\n'), ((8661, 8673), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (8669, 8673), True, 'import numpy as np\n'), ((8802, 8815), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (8810, 8815), True, 'import numpy as np\n'), ((8959, 9017), 'teneto.misc.correct_pvalues_for_multiple_testing', 'teneto.misc.correct_pvalues_for_multiple_testing', (['pdegnode'], {}), '(pdegnode)\n', (9007, 9017), False, 'import teneto\n'), ((9173, 9186), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (9181, 9186), True, 'import numpy as np\n'), ((9339, 9399), 'teneto.misc.correct_pvalues_for_multiple_testing', 'teneto.misc.correct_pvalues_for_multiple_testing', (['pclosenode'], {}), '(pclosenode)\n', (9387, 9399), False, 'import teneto\n'), ((290, 337), 'scipy.io.matlab.loadmat', 'sio.matlab.loadmat', (['"""./data/coord_power264.mat"""'], {}), "('./data/coord_power264.mat')\n", (308, 337), True, 'import scipy.io as sio\n'), ((3015, 3028), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (3023, 3028), True, 'import numpy as np\n'), ((3298, 3311), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (3306, 3311), True, 'import numpy as np\n'), ((3584, 3597), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (3592, 3597), True, 'import numpy as np\n'), ((3873, 3886), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (3881, 3886), True, 'import numpy as np\n'), ((4257, 4292), 'pandas.read_csv', 'pd.read_csv', (['"""./data/PowerInfo.csv"""'], {}), "('./data/PowerInfo.csv')\n", (4268, 4292), True, 'import pandas as pd\n'), ((4603, 4624), 'numpy.around', 'np.around', (['sigVals', '(3)'], {}), '(sigVals, 3)\n', (4612, 4624), True, 'import numpy as np\n'), ((4926, 4958), 'pandas.concat', 'pd.concat', (['[hdrresults, results]'], {}), '([hdrresults, results])\n', (4935, 4958), True, 'import pandas as pd\n'), ((5145, 5202), 'os.system', 'os.system', (["('pandoc ' + sname + '.md -o ' + sname + '.pdf')"], {}), "('pandoc ' + sname + '.md -o ' + sname + '.pdf')\n", (5154, 5202), False, 'import os\n'), ((6180, 6190), 'numpy.sort', 'np.sort', (['B'], {}), '(B)\n', (6187, 6190), True, 'import numpy as np\n'), ((6421, 6431), 'numpy.sort', 'np.sort', (['B'], {}), '(B)\n', (6428, 6431), True, 'import numpy as np\n'), ((8723, 8748), 'numpy.mean', 'np.mean', (['eC[netid == net]'], {}), '(eC[netid == net])\n', (8730, 8748), True, 'import numpy as np\n'), ((8765, 8792), 'numpy.mean', 'np.mean', (['eCec[netid == net]'], {}), '(eCec[netid == net])\n', (8772, 8792), True, 'import numpy as np\n'), ((8852, 8923), 'tenetostats.shufflegroups', 'tenetostats.shufflegroups', (['degree_eo[:, r]', 'degree_ec[:, r]'], {'pnum': '(10000)'}), '(degree_eo[:, r], degree_ec[:, r], pnum=10000)\n', (8877, 8923), False, 'import tenetostats\n'), ((9024, 9057), 'numpy.where', 'np.where', (['(pdegnode_correct < 0.05)'], {}), '(pdegnode_correct < 0.05)\n', (9032, 9057), True, 'import numpy as np\n'), ((9093, 9127), 'numpy.mean', 'np.mean', (['degree_eo[:, sig]'], {'axis': '(0)'}), '(degree_eo[:, sig], axis=0)\n', (9100, 9127), True, 'import numpy as np\n'), ((9126, 9160), 'numpy.mean', 'np.mean', (['degree_ec[:, sig]'], {'axis': '(0)'}), '(degree_ec[:, sig], axis=0)\n', (9133, 9160), True, 'import numpy as np\n'), ((9223, 9300), 'tenetostats.shufflegroups', 'tenetostats.shufflegroups', (['closeness_eo[:, r]', 'closeness_ec[:, r]'], {'pnum': '(10000)'}), '(closeness_eo[:, r], closeness_ec[:, r], pnum=10000)\n', (9248, 9300), False, 'import tenetostats\n'), ((9406, 9441), 'numpy.where', 'np.where', (['(pclosenode_correct < 0.05)'], {}), '(pclosenode_correct < 0.05)\n', (9414, 9441), True, 'import numpy as np\n'), ((9443, 9480), 'numpy.mean', 'np.mean', (['closeness_eo[:, sig]'], {'axis': '(0)'}), '(closeness_eo[:, sig], axis=0)\n', (9450, 9480), True, 'import numpy as np\n'), ((9479, 9516), 'numpy.mean', 'np.mean', (['closeness_ec[:, sig]'], {'axis': '(0)'}), '(closeness_ec[:, sig], axis=0)\n', (9486, 9516), True, 'import numpy as np\n'), ((657, 677), 'numpy.where', 'np.where', (['(netid == n)'], {}), '(netid == n)\n', (665, 677), True, 'import numpy as np\n'), ((2973, 2996), 'numpy.random.rand', 'np.random.rand', (['(264)', '(46)'], {}), '(264, 46)\n', (2987, 2996), True, 'import numpy as np\n'), ((3256, 3279), 'numpy.random.rand', 'np.random.rand', (['(264)', '(46)'], {}), '(264, 46)\n', (3270, 3279), True, 'import numpy as np\n'), ((3542, 3565), 'numpy.random.rand', 'np.random.rand', (['(264)', '(46)'], {}), '(264, 46)\n', (3556, 3565), True, 'import numpy as np\n'), ((3831, 3854), 'numpy.random.rand', 'np.random.rand', (['(264)', '(46)'], {}), '(264, 46)\n', (3845, 3854), True, 'import numpy as np\n'), ((6692, 6710), 'numpy.where', 'np.where', (['(dat > th)'], {}), '(dat > th)\n', (6700, 6710), True, 'import numpy as np\n'), ((7241, 7254), 'numpy.zeros', 'np.zeros', (['(264)'], {}), '(264)\n', (7249, 7254), True, 'import numpy as np\n'), ((4309, 4334), 'numpy.where', 'np.where', (['(val > threshold)'], {}), '(val > threshold)\n', (4317, 4334), True, 'import numpy as np\n'), ((6996, 7009), 'numpy.zeros', 'np.zeros', (['(266)'], {}), '(266)\n', (7004, 7009), True, 'import numpy as np\n'), ((7439, 7450), 'numpy.sort', 'np.sort', (['eC'], {}), '(eC)\n', (7446, 7450), True, 'import numpy as np\n'), ((7743, 7756), 'numpy.sort', 'np.sort', (['eCec'], {}), '(eCec)\n', (7750, 7756), True, 'import numpy as np\n'), ((8053, 8064), 'numpy.sort', 'np.sort', (['eD'], {}), '(eD)\n', (8060, 8064), True, 'import numpy as np\n'), ((8356, 8369), 'numpy.sort', 'np.sort', (['eDec'], {}), '(eDec)\n', (8363, 8369), True, 'import numpy as np\n'), ((375, 414), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/networkassignment"""'], {}), "('./data/networkassignment')\n", (386, 414), True, 'import scipy.io as sio\n'), ((7496, 7510), 'numpy.argsort', 'np.argsort', (['eC'], {}), '(eC)\n', (7506, 7510), True, 'import numpy as np\n'), ((7802, 7818), 'numpy.argsort', 'np.argsort', (['eCec'], {}), '(eCec)\n', (7812, 7818), True, 'import numpy as np\n'), ((8110, 8124), 'numpy.argsort', 'np.argsort', (['eD'], {}), '(eD)\n', (8120, 8124), True, 'import numpy as np\n'), ((8415, 8431), 'numpy.argsort', 'np.argsort', (['eDec'], {}), '(eDec)\n', (8425, 8431), True, 'import numpy as np\n')] |
import libpclproc
import numpy
a = numpy.zeros((3,3,3),dtype='float32')
a[0,1,2] = 5
a[1,1,2] = 7
a[2,1,2] = 9
libpclproc.process(a)
| [
"numpy.zeros",
"libpclproc.process"
] | [((36, 75), 'numpy.zeros', 'numpy.zeros', (['(3, 3, 3)'], {'dtype': '"""float32"""'}), "((3, 3, 3), dtype='float32')\n", (47, 75), False, 'import numpy\n'), ((113, 134), 'libpclproc.process', 'libpclproc.process', (['a'], {}), '(a)\n', (131, 134), False, 'import libpclproc\n')] |
# -*- coding: utf-8 -*-
import copy
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def generate(x, y):
return [[0 for _ in range(x)] for _ in range(y)]
def get_new_piece(matrix, piece_size):
piece = []
for x in range(x_length):
for y in range(y_length):
if matrix[y][x] == 0:
piece.append([x, y])
break
if len(piece) > 0:
break
for i in range(piece_size - 1):
neighbors = get_rand_neighbor(piece, matrix)
random.shuffle(neighbors)
for x, y in neighbors:
if [x, y] not in piece:
piece.append([x, y])
break
return piece
def get_rand_neighbor(piece, matrix):
neighbors = []
for x, y in piece:
for dx, dy in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
if x - dx < 0 or x - dx >= len(matrix[0]):
pass
elif y - dy < 0 or y - dy >= len(matrix):
pass
elif matrix[y - dy][x - dx] == 0:
neighbors.append([x - dx, y - dy])
return neighbors
def depict(matrix):
plt.imshow(matrix, interpolation="nearest", cmap=plt.cm.gist_ncar)
plt.xticks(np.arange(len(matrix[0])), range(len(matrix[0])), rotation=90)
plt.yticks(np.arange(len(matrix)), range(len(matrix)))
plt.tight_layout()
plt.show()
def shape_key(piece):
distances = []
adjacents = {}
for i in range(len(piece)):
xy1 = piece[i]
for j in range(len(piece)):
if i < j:
xy2 = piece[j]
distance = (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2
distances.append(distance)
if distance == 1:
if i not in adjacents.keys():
adjacents[i] = []
adjacents[i].append(j)
if j not in adjacents.keys():
adjacents[j] = []
adjacents[j].append(i)
return (
"".join(str(sorted(distances)))
+ "_"
+ "".join(str(sorted([len(adjacents[k]) for k in adjacents.keys()])))
)
def get_new_pieces(matrix, x_length, y_length, piece_size):
piece = []
for x in range(x_length):
for y in range(y_length):
if matrix[y][x] == 0:
piece.append([x, y])
break
if len(piece) > 0:
break
result_pieces = []
waiting = []
waiting.append(piece)
while len(waiting) > 0:
piece = waiting.pop()
neighbors = get_rand_neighbor(piece, matrix)
for x, y in neighbors:
if [x, y] not in piece:
new_piece = copy.deepcopy(piece)
new_piece.append([x, y])
if len(new_piece) == piece_size:
new_piece = sorted(new_piece)
if new_piece not in result_pieces:
result_pieces.append(new_piece)
else:
waiting.append(new_piece)
return sorted(result_pieces)
def get_connected_subgraphs(matrix):
neigh = {}
for y in range(len(matrix)):
for x in range(len(matrix[y])):
if matrix[y][x] == 0:
neigh[",".join([str(x), str(y)])] = []
for dx, dy in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
if x - dx < 0 or x - dx >= len(matrix[0]):
pass
elif y - dy < 0 or y - dy >= len(matrix):
pass
elif matrix[y - dy][x - dx] == 0:
neigh[",".join([str(x), str(y)])].append([x - dx, y - dy])
connected_subgraphs = []
found = []
for k, v in neigh.items():
if k in found:
continue
connected_subgraph = [k]
waiting = list(v)
found.append(k)
while len(waiting):
x, y = waiting.pop()
n = ",".join([str(x), str(y)])
if n in found:
continue
connected_subgraph.append(n)
found.append(n)
if n in neigh.keys():
waiting += neigh[n]
connected_subgraphs.append(connected_subgraph)
return connected_subgraphs
def get_face(piece, matrix):
interface = 0
surface = 0
for x, y in piece:
for dx, dy in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
if x - dx < 0 or x - dx >= len(matrix[0]):
pass
elif y - dy < 0 or y - dy >= len(matrix):
pass
elif matrix[y - dy][x - dx] == 0:
surface += 1
else:
interface += 1
return interface, surface
def half_puzzle(x_length, y_length, piece_size, same_piece_limit):
best_score = x_length * y_length
best_matrix = generate(x_length, y_length)
n_depict = 0
n_pieces = int(x_length * y_length / piece_size)
waiting = []
piece_id = 1
matrix = generate(x_length, y_length)
for new_piece in get_new_pieces(matrix, x_length, y_length, piece_size):
pieces2count = {}
key = shape_key(new_piece)
pieces2count[key] = 1
new_matrix = copy.deepcopy(matrix)
for x, y in new_piece:
new_matrix[y][x] = piece_id
pieces = [new_piece]
waiting.append([0, piece_id + 1, pieces, new_matrix, pieces2count])
trial = 0
random.shuffle(waiting)
while len(waiting) > 0:
trial += 1
if trial > 530000:
break
delta, piece_id, pieces, matrix, pieces2count = waiting.pop()
score = sum([sum([1 if x == 0 else 0 for x in mat]) for mat in matrix])
if len(get_connected_subgraphs(matrix)) > 1:
continue
if best_score >= score:
best_score = score
best_matrix = matrix
if score == (x_length * y_length) / 2:
yield (best_matrix)
continue
new_pieces = get_new_pieces(matrix, x_length, y_length, piece_size)
for new_piece in new_pieces:
new_pieces2count = copy.deepcopy(pieces2count)
key = shape_key(new_piece)
if key not in new_pieces2count.keys():
new_pieces2count[key] = 0
new_pieces2count[key] += 1
if new_pieces2count[key] > same_piece_limit:
continue
new_pieces = copy.deepcopy(pieces)
new_pieces.append(new_piece)
new_matrix = copy.deepcopy(matrix)
for x, y in new_piece:
new_matrix[y][x] = piece_id
face = get_face(new_piece, matrix)
if len(get_connected_subgraphs(matrix)) > 1:
continue
waiting.append(
[face[0], piece_id + 1, new_pieces, new_matrix, new_pieces2count]
)
if random.random() < 0.05:
random.shuffle(waiting)
elif random.random() < 0.95:
waiting = sorted(waiting)
return matrix
def same_piece_within_limit(matrix, same_piece_limit):
id2piece = {}
for y in range(len(matrix)):
for x in range(len(matrix[y])):
if matrix[y][x] not in id2piece.keys():
id2piece[matrix[y][x]] = []
id2piece[matrix[y][x]].append([x, y])
key2count = {}
for id, piece in id2piece.items():
key = shape_key(piece)
if key not in key2count.keys():
key2count[key] = 0
key2count[key] += 1
if key2count[key] > same_piece_limit:
return False
return True
def find_some(x_length=8, y_length=5, piece_size=4, same_piece_limit=2, max_trial=5):
index = 0
matrix_history = []
keta = int(x_length * y_length / piece_size / 2)
for matrix in half_puzzle(x_length, y_length, piece_size, same_piece_limit):
for prev_matrix in matrix_history:
matrix3 = np.flipud(np.fliplr(np.array(matrix)))
if (prev_matrix + matrix3).min().min() > 0:
matrix3 += keta
matrix3 = np.where(matrix3 == keta, 0, matrix3)
combined_matrix = prev_matrix + matrix3
if same_piece_within_limit(combined_matrix, same_piece_limit):
yield combined_matrix
index += 1
matrix_history.append(matrix)
if index > max_trial:
break
return True
| [
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"random.shuffle",
"random.random",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.tight_layout"
] | [((1170, 1236), 'matplotlib.pyplot.imshow', 'plt.imshow', (['matrix'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.gist_ncar'}), "(matrix, interpolation='nearest', cmap=plt.cm.gist_ncar)\n", (1180, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1396), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1394, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1409, 1411), True, 'import matplotlib.pyplot as plt\n'), ((5463, 5486), 'random.shuffle', 'random.shuffle', (['waiting'], {}), '(waiting)\n', (5477, 5486), False, 'import random\n'), ((564, 589), 'random.shuffle', 'random.shuffle', (['neighbors'], {}), '(neighbors)\n', (578, 589), False, 'import random\n'), ((5246, 5267), 'copy.deepcopy', 'copy.deepcopy', (['matrix'], {}), '(matrix)\n', (5259, 5267), False, 'import copy\n'), ((6147, 6174), 'copy.deepcopy', 'copy.deepcopy', (['pieces2count'], {}), '(pieces2count)\n', (6160, 6174), False, 'import copy\n'), ((6454, 6475), 'copy.deepcopy', 'copy.deepcopy', (['pieces'], {}), '(pieces)\n', (6467, 6475), False, 'import copy\n'), ((6542, 6563), 'copy.deepcopy', 'copy.deepcopy', (['matrix'], {}), '(matrix)\n', (6555, 6563), False, 'import copy\n'), ((6910, 6925), 'random.random', 'random.random', ([], {}), '()\n', (6923, 6925), False, 'import random\n'), ((6946, 6969), 'random.shuffle', 'random.shuffle', (['waiting'], {}), '(waiting)\n', (6960, 6969), False, 'import random\n'), ((2746, 2766), 'copy.deepcopy', 'copy.deepcopy', (['piece'], {}), '(piece)\n', (2759, 2766), False, 'import copy\n'), ((6983, 6998), 'random.random', 'random.random', ([], {}), '()\n', (6996, 6998), False, 'import random\n'), ((8114, 8151), 'numpy.where', 'np.where', (['(matrix3 == keta)', '(0)', 'matrix3'], {}), '(matrix3 == keta, 0, matrix3)\n', (8122, 8151), True, 'import numpy as np\n'), ((7980, 7996), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (7988, 7996), True, 'import numpy as np\n')] |
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from light_bulb import Camera, Controls, Lighting
import math
import numpy as np
curve = np.array([
[2, 2, 0], [1, 1, 0], [1, -1, 0], [2, -2, 0]
])
black_down_tip = np.array([
[1, -1, 0], [1, -.9, 0], [2, -.1, 0]
])
gray_base = np.array([
[2, -.1, 0], [3, 1, 0],
# dashed
[2.8, 1.2, 0], [3, 1.4, 0],
[2.8, 1.6, 0], [3, 1.8, 0],
[2.8, 2., 0], [3, 2.2, 0],
[2.8, 2.4, 0], [3, 2.6, 0],
# end part
[3, 3, 0]
])
ctrlpoints = [
[[-5, -1., -5], [0., -1., -10], [5, -1., -5]],
[[-7.5, -1., -2.5], [0., 5, -10], [7.5, -1., -2.5]],
[[-7.5, -1, 0], [0., 30, 0], [7.5, -1, 0]], # center
[[-7.5, -1., 2.5], [0., 5, 10], [7.5, -1., 2.5]],
[[-5., -1., 5], [0., -1., 10], [5, -1., 5]],
]
glass_part = np.array([
[3, 3, 0], [3.5, 4, 0], [3.52, 4.1, 0], [3.54, 4.2, 0],
[3.54, 6, 0], [3.8, 6.5, 0], [7.2, 10, 0]
])
cubeColor = [0.5, 0.5, 1.0]
cubeSpecular = [1.0, 1.0, 1.0]
def opengl_init():
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, ctrlpoints)
glEnable(GL_MAP2_VERTEX_3)
glEnable(GL_AUTO_NORMAL)
glMapGrid2f(20, 0.0, 1.0, 20, 0.0, 1.0)
def get_dist_xz(p):
d = math.sqrt(p[0] * p[0] + p[2] * p[2])
return d
def surface_revolution(curve, n_slices, cubeColor=None):
n_points = len(curve)
vertices = np.zeros(n_slices * n_points * 3).reshape(n_slices, n_points, 3)
for islice in range(n_slices):
for ipoint in range(n_points):
r = get_dist_xz(curve[ipoint])
z = r * math.sin(2 * math.pi * islice / n_slices)
x = r * math.cos(2 * math.pi * islice / n_slices)
y = curve[ipoint][1]
vertices[islice][ipoint][0] = x
vertices[islice][ipoint][1] = y
vertices[islice][ipoint][2] = z
glPushMatrix()
for islice in range(n_slices):
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, cubeColor)
glBegin(GL_QUAD_STRIP)
glColor3f(*cubeColor)
if islice == n_slices - 1:
next_slice = 0
else:
next_slice = islice + 1
for ipoint in range(n_points):
glVertex3f(vertices[islice][ipoint][0], vertices[islice][ipoint][1],
vertices[islice][ipoint][2])
glVertex3f(vertices[next_slice][ipoint][0], vertices[next_slice][ipoint][1],
vertices[next_slice][ipoint][2])
glEnd()
glPopMatrix()
def draw_top_part():
glPushMatrix()
glColor3f(1, 1, 0.878)
glTranslatef(0, 11, 0)
glEvalMesh2(GL_FILL, 0, 20, 0, 20)
glPopMatrix()
def draw_surface(controls: Controls):
glPushMatrix()
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, cubeColor)
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, cubeSpecular)
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 10.0)
glTranslatef(*controls.translate_vector)
glScale(.7, .7, .7)
# black base
surface_revolution(black_down_tip, 100, (0, 0, 0))
# gray base
surface_revolution(gray_base, 100, (0.431, 0.431, 0.431))
# glass part
surface_revolution(glass_part, 100, (1, 1, 0.878))
# top part
draw_top_part()
glPopMatrix()
def draw_sphere(translate=None):
# draw a sphere right on where light is, helps debug :)
glPushMatrix()
glColor3f(0, 0, 0)
if translate:
glTranslatef(*translate)
q = gluNewQuadric()
gluQuadricDrawStyle(q, GLU_FILL)
gluQuadricNormals(q, GLU_SMOOTH)
gluSphere(q, .7, 50, 50)
glPopMatrix()
def draw_sphere_onctrl():
for seq in ctrlpoints:
for p in seq:
draw_sphere(translate=p)
def event_capture_loop(controls):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
controls.handle_key(event.key)
elif event.type == pygame.MOUSEBUTTONDOWN:
controls.orbital_control(event.button)
def main():
# camera vars
eye = (0, 0, 15)
target = (0, 0, 0)
up = (0, 1, 0)
pygame.init()
display = (1600, 900)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
# base settings
glClearColor(0.761, 0.773, 0.824, 1.)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
lighting = Lighting()
lighting.set_lighting()
# set perspective
gluPerspective(45, (display[0] / display[1]), .1, 50.0)
camera = Camera(eye, target, up)
controls = Controls(camera)
controls.translate_vector = [0, -2, 0]
while True:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
glPushMatrix()
camera.set_look_at()
lighting.set_lighting_position()
opengl_init()
draw_surface(controls)
event_capture_loop(controls)
glPopMatrix()
pygame.display.flip()
pygame.time.wait(33)
if __name__ == '__main__':
main()
| [
"pygame.quit",
"math.sqrt",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.zeros",
"pygame.init",
"light_bulb.Camera",
"pygame.display.flip",
"pygame.time.wait",
"math.sin",
"numpy.array",
"light_bulb.Controls",
"math.cos",
"light_bulb.Lighting"
] | [((184, 240), 'numpy.array', 'np.array', (['[[2, 2, 0], [1, 1, 0], [1, -1, 0], [2, -2, 0]]'], {}), '([[2, 2, 0], [1, 1, 0], [1, -1, 0], [2, -2, 0]])\n', (192, 240), True, 'import numpy as np\n'), ((265, 315), 'numpy.array', 'np.array', (['[[1, -1, 0], [1, -0.9, 0], [2, -0.1, 0]]'], {}), '([[1, -1, 0], [1, -0.9, 0], [2, -0.1, 0]])\n', (273, 315), True, 'import numpy as np\n'), ((333, 500), 'numpy.array', 'np.array', (['[[2, -0.1, 0], [3, 1, 0], [2.8, 1.2, 0], [3, 1.4, 0], [2.8, 1.6, 0], [3, \n 1.8, 0], [2.8, 2.0, 0], [3, 2.2, 0], [2.8, 2.4, 0], [3, 2.6, 0], [3, 3, 0]]'], {}), '([[2, -0.1, 0], [3, 1, 0], [2.8, 1.2, 0], [3, 1.4, 0], [2.8, 1.6, 0\n ], [3, 1.8, 0], [2.8, 2.0, 0], [3, 2.2, 0], [2.8, 2.4, 0], [3, 2.6, 0],\n [3, 3, 0]])\n', (341, 500), True, 'import numpy as np\n'), ((847, 960), 'numpy.array', 'np.array', (['[[3, 3, 0], [3.5, 4, 0], [3.52, 4.1, 0], [3.54, 4.2, 0], [3.54, 6, 0], [3.8,\n 6.5, 0], [7.2, 10, 0]]'], {}), '([[3, 3, 0], [3.5, 4, 0], [3.52, 4.1, 0], [3.54, 4.2, 0], [3.54, 6,\n 0], [3.8, 6.5, 0], [7.2, 10, 0]])\n', (855, 960), True, 'import numpy as np\n'), ((1321, 1357), 'math.sqrt', 'math.sqrt', (['(p[0] * p[0] + p[2] * p[2])'], {}), '(p[0] * p[0] + p[2] * p[2])\n', (1330, 1357), False, 'import math\n'), ((3849, 3867), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3865, 3867), False, 'import pygame\n'), ((4240, 4253), 'pygame.init', 'pygame.init', ([], {}), '()\n', (4251, 4253), False, 'import pygame\n'), ((4284, 4336), 'pygame.display.set_mode', 'pygame.display.set_mode', (['display', '(DOUBLEBUF | OPENGL)'], {}), '(display, DOUBLEBUF | OPENGL)\n', (4307, 4336), False, 'import pygame\n'), ((4569, 4579), 'light_bulb.Lighting', 'Lighting', ([], {}), '()\n', (4577, 4579), False, 'from light_bulb import Camera, Controls, Lighting\n'), ((4704, 4727), 'light_bulb.Camera', 'Camera', (['eye', 'target', 'up'], {}), '(eye, target, up)\n', (4710, 4727), False, 'from light_bulb import Camera, Controls, Lighting\n'), ((4743, 4759), 'light_bulb.Controls', 'Controls', (['camera'], {}), '(camera)\n', (4751, 4759), False, 'from light_bulb import Camera, Controls, Lighting\n'), ((5122, 5143), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (5141, 5143), False, 'import pygame\n'), ((5152, 5172), 'pygame.time.wait', 'pygame.time.wait', (['(33)'], {}), '(33)\n', (5168, 5172), False, 'import pygame\n'), ((1472, 1505), 'numpy.zeros', 'np.zeros', (['(n_slices * n_points * 3)'], {}), '(n_slices * n_points * 3)\n', (1480, 1505), True, 'import numpy as np\n'), ((3919, 3932), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3930, 3932), False, 'import pygame\n'), ((1675, 1716), 'math.sin', 'math.sin', (['(2 * math.pi * islice / n_slices)'], {}), '(2 * math.pi * islice / n_slices)\n', (1683, 1716), False, 'import math\n'), ((1737, 1778), 'math.cos', 'math.cos', (['(2 * math.pi * islice / n_slices)'], {}), '(2 * math.pi * islice / n_slices)\n', (1745, 1778), False, 'import math\n')] |
import numpy as np
from sklearn.neural_network import MLPClassifier
from vnpy.app.cta_strategy import (ArrayManager, BarData)
from vnpy.trader.constant import Direction
import joblib
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
class LSTM_Analyze(ArrayManager):
def __init__(self, size=100):
super().__init__(size)
self.model = load_model('LSTM.h5')
self.atr_value = 0
self.rsi_value = 0
self.cci_value = 0
self.hist = 0
self.std_value = 0
self.percentage_value = 0
self.trend = 0
self.scaler = MinMaxScaler(feature_range=(0, 1))
def get_x_y(self, data, N, offset):
"""
Split data into x (features) and y (target)
"""
x, y = [], []
for i in range(offset, len(data)):
x.append(data[i - N:i])
y.append(data[i])
x = np.array(x)
y = np.array(y)
return x, y
def update_bar(self, bar: BarData):
super().update_bar(bar)
if self.inited == True:
self.predict(60)
def predict_close(self, n):
close_in = self.scaler.fit_transform(self.close_array.reshape(-1, 1))
close_in, _ = self.get_x_y(close_in, n, n)
close_out = self.model.predict(close_in)
close_out = self.scaler.inverse_transform(close_out)
return close_out[-1]
def predict(self, n):
close_out = self.predict_close(n)
if close_out > self.close_array[-1]:
self.trend = Direction.LONG
elif close_out < self.close_array[-1]:
self.trend = Direction.SHORT
else:
self.trend = Direction.NET
class MLP_Analyze(ArrayManager):
def __init__(self, size=100):
super().__init__(size)
self.clf = joblib.load('clf_selected.m')
self.atr_value = 0
self.rsi_value = 0
self.cci_value = 0
self.hist = 0
self.std_value = 0
self.percentage_value = 0
self.trend = 0
def percentage(self, array=False):
v_percent = np.zeros(len(self.close))
for i in range(1, len(self.close)):
if self.close[i - 1] == 0.0:
percentage = 0.0
else:
percentage = ((self.close[i] - self.close[i - 1]) /
self.close[i - 1]) * 100.0
v_percent[i] = percentage
v_percent[v_percent == 'nan'] = 0
v_percent[v_percent == 'inf'] = 0
if array:
return v_percent
else:
return v_percent[-1]
def update_bar(self, bar: BarData):
super().update_bar(bar)
if self.inited == True:
self.predict()
def predict(self, n=60):
macd, signal, self.hist = self.macd(12, 26, 9, array=True)
self.atr_value = self.atr(25, array=True)
self.rsi_value = self.rsi(35, array=True)
self.cci_value = self.cci(30, array=True)
self.std_value = self.std(30, array=True)
self.percentage_value = self.percentage(array=True)
x = np.array([
self.hist[-n:], self.atr_value[-n:], self.rsi_value[-n:],
self.cci_value[-n:], self.std_value[-n:],
self.percentage_value[-n:]
])
x = x.T
y = self.clf.predict(x)
print(y)
if y[-1] == 1:
self.trend = Direction.LONG
elif y[-1] == -1:
self.trend = Direction.SHORT
elif y[-1] == 0:
self.trend = Direction.NET
| [
"keras.models.load_model",
"joblib.load",
"sklearn.preprocessing.MinMaxScaler",
"numpy.array"
] | [((389, 410), 'keras.models.load_model', 'load_model', (['"""LSTM.h5"""'], {}), "('LSTM.h5')\n", (399, 410), False, 'from keras.models import load_model\n'), ((620, 654), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (632, 654), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((915, 926), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (923, 926), True, 'import numpy as np\n'), ((939, 950), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (947, 950), True, 'import numpy as np\n'), ((1822, 1851), 'joblib.load', 'joblib.load', (['"""clf_selected.m"""'], {}), "('clf_selected.m')\n", (1833, 1851), False, 'import joblib\n'), ((3103, 3246), 'numpy.array', 'np.array', (['[self.hist[-n:], self.atr_value[-n:], self.rsi_value[-n:], self.cci_value[-\n n:], self.std_value[-n:], self.percentage_value[-n:]]'], {}), '([self.hist[-n:], self.atr_value[-n:], self.rsi_value[-n:], self.\n cci_value[-n:], self.std_value[-n:], self.percentage_value[-n:]])\n', (3111, 3246), True, 'import numpy as np\n')] |
#!/usr/bin/env python
##############################################
# Author: <NAME>
# Check all features permutations
##############################################
import itertools
import numpy as np
import pandas as pd
import os
from Models.NeuralNetwork import NeuralNetwork
path = os.path.dirname(os.path.abspath(__file__))
features_file_name = "../Datasets/features_extractions/median_9_2_(75-25)_vt_include.csv"
features_file = os.path.join(path, features_file_name)
df_features = pd.read_csv(features_file)
features = {
"ld" : 1, # Length of domain
"ncc" : 2, # Number of consecutive characters
"ed" : 3, # Entropy of domain
"nia" : 4, # Number of IP addresses
"dgia" : 5, # Distinct geolocations of the IP addresses
"atv" : 6, # Average TTL value
"sdt" : 7, # Standard deviation of TTL
"ltd" : 8, # Life time of domain
"atd" : 9, # Active time of domain
"car" : 10, # Communication ASNs Rank
"ccr" : 11, # Communication Countries Rank
# "ndr" : 12, # Number of DNS Records
"ndc" : 13, # Number of DNS changes by passive DNS
# "nsd" : 14, # Number of Subdomains
"etsc" : 15, # Expiration Time of SSL Certificate
# "scv" : 16 # SSL Certificate is Valid
}
y_column_idx = 17
layers = [(80,"relu"),(80,"relu"),(80,"leakyrelu"),(1,'sigmoid')]
outputs = {}
for i in range(2,(len(features)+1)):
combinations = list(itertools.combinations(list(features.keys()),i))
print("Combinations:")
print(combinations)
for combination in combinations:
print("Current combination:")
print(combination)
use_columns = [features[feature_column] for feature_column in combination]
use_columns.append(y_column_idx)
new_df = np.array(df_features[df_features.columns[use_columns]].values)
nn = NeuralNetwork(dataset=new_df, learning_rate = 0.001, threshold=0.5, training_epochs = 20000, degree=1)
nn.build()
nn.train(layers=layers)
scores = nn.predict()
del scores["classification_report"]
scores["confusion_matrix"] = str(scores["confusion_matrix"]).strip()
outputs[str(combination)] = scores
out = pd.DataFrame.from_dict(outputs,orient='index')
out.to_csv("features_permutation_output.csv", encoding="utf-8", sep=";")
| [
"os.path.abspath",
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"Models.NeuralNetwork.NeuralNetwork",
"numpy.array",
"os.path.join"
] | [((459, 497), 'os.path.join', 'os.path.join', (['path', 'features_file_name'], {}), '(path, features_file_name)\n', (471, 497), False, 'import os\n'), ((513, 539), 'pandas.read_csv', 'pd.read_csv', (['features_file'], {}), '(features_file)\n', (524, 539), True, 'import pandas as pd\n'), ((2210, 2257), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['outputs'], {'orient': '"""index"""'}), "(outputs, orient='index')\n", (2232, 2257), True, 'import pandas as pd\n'), ((321, 346), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (336, 346), False, 'import os\n'), ((1774, 1836), 'numpy.array', 'np.array', (['df_features[df_features.columns[use_columns]].values'], {}), '(df_features[df_features.columns[use_columns]].values)\n', (1782, 1836), True, 'import numpy as np\n'), ((1854, 1956), 'Models.NeuralNetwork.NeuralNetwork', 'NeuralNetwork', ([], {'dataset': 'new_df', 'learning_rate': '(0.001)', 'threshold': '(0.5)', 'training_epochs': '(20000)', 'degree': '(1)'}), '(dataset=new_df, learning_rate=0.001, threshold=0.5,\n training_epochs=20000, degree=1)\n', (1867, 1956), False, 'from Models.NeuralNetwork import NeuralNetwork\n')] |
# Copyright <NAME> 2018.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
class RPSTrainer:
def __init__(self):
self.NUM_ACTIONS = 3
self.actionUtility = np.array([
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]
])
self.regretSum = np.zeros(self.NUM_ACTIONS)
self.strategySum = np.zeros(self.NUM_ACTIONS)
self.oppStrategy = [.4, .3, .3]
def normalize(self, strategy):
normalizingSum = np.sum(strategy)
if normalizingSum > 0:
strategy /= normalizingSum
else:
strategy = np.repeat(1 / self.NUM_ACTIONS, self.NUM_ACTIONS)
return strategy
def getStrategy(self):
return self.normalize(self.regretSum.clip(min=0))
def getAverageStrategy(self):
return self.normalize(np.copy(self.strategySum))
def bestResponse(self, utility):
return np.eye(self.NUM_ACTIONS)[np.argmax(utility)]
def exploitability(self, strategy):
utility = np.dot(self.actionUtility, self.oppStrategy)
return np.dot(self.bestResponse(utility) - strategy, utility)
def train(self, iterations, df=None, sample=.001):
for i in range(iterations):
strategy = self.getStrategy()
self.strategySum += strategy
Q_values = np.dot(self.actionUtility, self.oppStrategy)
value = np.dot(strategy, Q_values)
self.regretSum += Q_values - value
if df is None or np.random.random() > sample:
continue
target_policy = self.getAverageStrategy()
df = df.append(
pd.DataFrame(
np.append(
np.array([i, self.exploitability(target_policy)]),
target_policy
).reshape(-1, 2 + self.NUM_ACTIONS),
columns=list(df)
),
ignore_index=True
)
return df
def main():
columns = ['iterations', 'exploitability', 'rock', 'paper', 'scissors']
df = pd.DataFrame(columns=columns)
trainer = RPSTrainer()
df = trainer.train(1000000, df)
target_policy = trainer.getAverageStrategy()
print('Target policy: %s' % (target_policy))
for c in columns[2:]:
plt.loglog(df[columns[0]], df[c], label=c)
plt.xlabel(columns[0])
plt.ylabel('target policy')
plt.legend()
plt.show()
print('Exploitability: %s' % (trainer.exploitability(target_policy)))
plt.loglog(df[columns[0]], df[columns[1]])
plt.xlabel(columns[0])
plt.ylabel(columns[1])
plt.legend()
plt.show()
model = sm.ols(formula="np.log(exploitability) ~ np.log(iterations)", data=df).fit()
print(model.params)
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.copy",
"numpy.eye",
"numpy.argmax",
"matplotlib.pyplot.legend",
"numpy.zeros",
"statsmodels.formula.api.ols",
"numpy.array",
"numpy.random.random",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matp... | [((2435, 2464), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (2447, 2464), True, 'import pandas as pd\n'), ((2725, 2747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['columns[0]'], {}), '(columns[0])\n', (2735, 2747), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2779), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""target policy"""'], {}), "('target policy')\n", (2762, 2779), True, 'import matplotlib.pyplot as plt\n'), ((2784, 2796), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2794, 2796), True, 'import matplotlib.pyplot as plt\n'), ((2801, 2811), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2809, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2937), 'matplotlib.pyplot.loglog', 'plt.loglog', (['df[columns[0]]', 'df[columns[1]]'], {}), '(df[columns[0]], df[columns[1]])\n', (2905, 2937), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2964), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['columns[0]'], {}), '(columns[0])\n', (2952, 2964), True, 'import matplotlib.pyplot as plt\n'), ((2969, 2991), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['columns[1]'], {}), '(columns[1])\n', (2979, 2991), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3008), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3006, 3008), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3021, 3023), True, 'import matplotlib.pyplot as plt\n'), ((409, 455), 'numpy.array', 'np.array', (['[[0, -1, 1], [1, 0, -1], [-1, 1, 0]]'], {}), '([[0, -1, 1], [1, 0, -1], [-1, 1, 0]])\n', (417, 455), True, 'import numpy as np\n'), ((535, 561), 'numpy.zeros', 'np.zeros', (['self.NUM_ACTIONS'], {}), '(self.NUM_ACTIONS)\n', (543, 561), True, 'import numpy as np\n'), ((589, 615), 'numpy.zeros', 'np.zeros', (['self.NUM_ACTIONS'], {}), '(self.NUM_ACTIONS)\n', (597, 615), True, 'import numpy as np\n'), ((721, 737), 'numpy.sum', 'np.sum', (['strategy'], {}), '(strategy)\n', (727, 737), True, 'import numpy as np\n'), ((1286, 1330), 'numpy.dot', 'np.dot', (['self.actionUtility', 'self.oppStrategy'], {}), '(self.actionUtility, self.oppStrategy)\n', (1292, 1330), True, 'import numpy as np\n'), ((2678, 2720), 'matplotlib.pyplot.loglog', 'plt.loglog', (['df[columns[0]]', 'df[c]'], {'label': 'c'}), '(df[columns[0]], df[c], label=c)\n', (2688, 2720), True, 'import matplotlib.pyplot as plt\n'), ((845, 894), 'numpy.repeat', 'np.repeat', (['(1 / self.NUM_ACTIONS)', 'self.NUM_ACTIONS'], {}), '(1 / self.NUM_ACTIONS, self.NUM_ACTIONS)\n', (854, 894), True, 'import numpy as np\n'), ((1085, 1110), 'numpy.copy', 'np.copy', (['self.strategySum'], {}), '(self.strategySum)\n', (1092, 1110), True, 'import numpy as np\n'), ((1174, 1198), 'numpy.eye', 'np.eye', (['self.NUM_ACTIONS'], {}), '(self.NUM_ACTIONS)\n', (1180, 1198), True, 'import numpy as np\n'), ((1199, 1217), 'numpy.argmax', 'np.argmax', (['utility'], {}), '(utility)\n', (1208, 1217), True, 'import numpy as np\n'), ((1624, 1668), 'numpy.dot', 'np.dot', (['self.actionUtility', 'self.oppStrategy'], {}), '(self.actionUtility, self.oppStrategy)\n', (1630, 1668), True, 'import numpy as np\n'), ((1689, 1715), 'numpy.dot', 'np.dot', (['strategy', 'Q_values'], {}), '(strategy, Q_values)\n', (1695, 1715), True, 'import numpy as np\n'), ((3041, 3111), 'statsmodels.formula.api.ols', 'sm.ols', ([], {'formula': '"""np.log(exploitability) ~ np.log(iterations)"""', 'data': 'df'}), "(formula='np.log(exploitability) ~ np.log(iterations)', data=df)\n", (3047, 3111), True, 'import statsmodels.formula.api as sm\n'), ((1809, 1827), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1825, 1827), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
os.chdir('C:/Users/DELL/Desktop/Quant_macro/Pset2')
df = pd.read_excel('download_all.xls')
date = df.ix[4,'Unnamed: 1':]
dates = [np.float(da) for da in date[1:]]
dat = []
dat.append(dates[0])
for idx, dats in enumerate(dates):
mod = (idx+1)%4
if mod == 1:
data = dats
else:
data = dat[-1]+0.25
dat.append(data)
del dat[0]
df = df.ix[6:,'Unnamed: 1':]
df = df.set_index('Unnamed: 1')
variables = ['Compensation of employees', ' National income', "Proprietors' income with IVA and CCAdj",
'Rental income of persons with CCAdj', 'Corporate profits with IVA and CCAdj',
'Net interest and miscellaneous payments', 'Taxes on production and imports',
'Less: Subsidies2']
df = df.ix[variables,:]
df = df.T
df_np = np.array(df)
New_names = ['CE','Y','PI','RI','CP','NI','T','S']
data_l = {}
for idx, var in enumerate(variables):
col = np.array(list(df[var]))
data_l[New_names[idx]] = col
theta = (data_l['RI']+data_l['CP']+data_l['NI']+data_l['T']-data_l['S'])/(data_l['Y']-data_l['PI'])
data_l['PI_k']=theta*data_l['PI']
data_l['PI_h']=(1-theta)*data_l['PI']
del data_l['PI']
data_l['LS'] = (data_l['CE']+data_l['PI_h'])/data_l['Y']
data_l['date'] = dat
f, ax = plt.subplots(1,1)
f.set_figheight(5)
f.set_figwidth(10)
ax.plot(data_l['date'], data_l['LS'])
ax.set_xlabel('date')
ax.set_ylabel('labor share')
ax.set_title('Labor share in the US from 1948-2017')
| [
"numpy.float",
"pandas.read_excel",
"numpy.array",
"matplotlib.pyplot.subplots",
"os.chdir"
] | [((85, 136), 'os.chdir', 'os.chdir', (['"""C:/Users/DELL/Desktop/Quant_macro/Pset2"""'], {}), "('C:/Users/DELL/Desktop/Quant_macro/Pset2')\n", (93, 136), False, 'import os\n'), ((143, 176), 'pandas.read_excel', 'pd.read_excel', (['"""download_all.xls"""'], {}), "('download_all.xls')\n", (156, 176), True, 'import pandas as pd\n'), ((901, 913), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (909, 913), True, 'import numpy as np\n'), ((1375, 1393), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1387, 1393), True, 'import matplotlib.pyplot as plt\n'), ((218, 230), 'numpy.float', 'np.float', (['da'], {}), '(da)\n', (226, 230), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: j-lazo
"""
import cv2
import os
import random
import numpy as np
from scipy.ndimage import zoom
def clipped_zoom(img, zoom_factor, **kwargs):
"""
:param img:
:param zoom_factor:
:param kwargs:
:return:
"""
h, w = img.shape[:2]
# For multichannel images we don't want to apply the zoom factor to the RGB
# dimension, so instead we create a tuple of zoom factors, one per array
# dimension, with 1's for any trailing dimensions after the width and height.
zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)
# Zooming out
if zoom_factor < 1:
# Bounding box of the zoomed-out image within the output array
zh = int(np.round(h * zoom_factor))
zw = int(np.round(w * zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
# Zero-padding
out = np.zeros_like(img)
out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)
# Zooming in
elif zoom_factor > 1:
# Bounding box of the zoomed-in region within the input array
zh = int(np.round(h / zoom_factor))
zw = int(np.round(w / zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)
# `out` might still be slightly larger than `img` due to rounding, so
# trim off any extra pixels at the edges
#trim_top = ((out.shape[0] - h) // 2)
#trim_left = ((out.shape[1] - w) // 2)
#out = out[trim_top:trim_top+h, trim_left:trim_left+w]
# If zoom_factor == 1, just return the input array
else:
out = img
return out
def adjust_brightness(image, gamma=1.0):
"""
:param image:
:param gamma:
:return:
"""
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
def augment_data(files_path):
files = os.listdir(files_path + 'image/')
masks = os.listdir(files_path + 'label/')
for i, element in enumerate(files[:]):
if element not in masks:
print(element, 'has no pair')
print(1.0*i/len(files), element)
img = cv2.imread("".join([files_path, 'image/', element]))
mask = cv2.imread("".join([files_path, 'label/', element]))
rows, cols, channels = img.shape
# define the rotation matrixes
rot1 = cv2.getRotationMatrix2D((cols/2, rows/2), 90, 1)
rot2 = cv2.getRotationMatrix2D((cols/2, rows/2), 180, 1)
rot3 = cv2.getRotationMatrix2D((cols/2, rows/2), 270, 1)
# rotate the images
im_rot1 = cv2.warpAffine(img, rot1, (cols, rows))
im_rot2 = cv2.warpAffine(img, rot2, (cols, rows))
im_rot3 = cv2.warpAffine(img, rot3, (cols, rows))
#rotate the masks
mask_rot1 = cv2.warpAffine(mask, rot1, (cols, rows))
mask_rot2 = cv2.warpAffine(mask, rot2, (cols, rows))
mask_rot3 = cv2.warpAffine(mask, rot3, (cols, rows))
# flip images
horizontal_img = cv2.flip(img, 0)
vertical_img = cv2.flip(img, 1)
#flip masks
horizontal_mask = cv2.flip(mask, 0)
vertical_mask = cv2.flip(mask, 1)
# save the images
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_1', '.png']), im_rot1)
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_2', '.png']), im_rot2)
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_3', '.png']), im_rot3)
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_4', '.png']), horizontal_img)
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_5', '.png']), vertical_img)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_1', '.png']), mask_rot1)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_2', '.png']), mask_rot2)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_3', '.png']), mask_rot3)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_4', '.png']), horizontal_mask)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_5', '.png']), vertical_mask)
# change brightness
list_of_images = [img, im_rot1, im_rot2, im_rot3, horizontal_img, vertical_img]
list_of_masks = [mask, mask_rot1, mask_rot2, mask_rot3, horizontal_mask, vertical_mask]
gammas = [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5]
for i in range(4):
index = random.randint(0,len(list_of_images)-1)
img_choice = list_of_images[index]
mask_choice = list_of_masks[index]
image_brg = adjust_brightness(img_choice, random.choice(gammas))
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_', str(i+6), '.png']), image_brg)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_', str(i+6), '.png']), mask_choice)
index_2 = random.randint(0,len(list_of_images)-1)
img_choice_2 = list_of_images[index_2]
mask_choice_2 = list_of_masks[index_2]
zoom_in_img = clipped_zoom(img_choice_2, 1.2)
zoom_in_mask = clipped_zoom(mask_choice_2, 1.2)
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_10_.png']), zoom_in_img)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_10_.png']), zoom_in_mask)
index_3 = random.randint(0,len(list_of_images)-1)
img_choice_3 = list_of_images[index_3]
mask_choice_3 = list_of_masks[index_3]
zoom_out_img = clipped_zoom(img_choice_3, 0.8)
zoom_out_mask = clipped_zoom(mask_choice_3, 0.8)
cv2.imwrite("".join([files_path, 'image/', element[:-4], '_11_.png']), zoom_out_img)
cv2.imwrite("".join([files_path, 'label/', element[:-4], '_11_.png']), zoom_out_mask)
def main():
path_directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'lumen_data/test/phantom_001_pt2/augmented_data/'
augment_data(path_directory)
if __name__ == "__main__":
main()
| [
"os.listdir",
"numpy.zeros_like",
"random.choice",
"scipy.ndimage.zoom",
"cv2.warpAffine",
"cv2.LUT",
"numpy.arange",
"cv2.flip",
"numpy.round",
"cv2.getRotationMatrix2D"
] | [((1979, 2000), 'cv2.LUT', 'cv2.LUT', (['image', 'table'], {}), '(image, table)\n', (1986, 2000), False, 'import cv2\n'), ((2050, 2083), 'os.listdir', 'os.listdir', (["(files_path + 'image/')"], {}), "(files_path + 'image/')\n", (2060, 2083), False, 'import os\n'), ((2096, 2129), 'os.listdir', 'os.listdir', (["(files_path + 'label/')"], {}), "(files_path + 'label/')\n", (2106, 2129), False, 'import os\n'), ((917, 935), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (930, 935), True, 'import numpy as np\n'), ((976, 1007), 'scipy.ndimage.zoom', 'zoom', (['img', 'zoom_tuple'], {}), '(img, zoom_tuple, **kwargs)\n', (980, 1007), False, 'from scipy.ndimage import zoom\n'), ((2525, 2577), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(90)', '(1)'], {}), '((cols / 2, rows / 2), 90, 1)\n', (2548, 2577), False, 'import cv2\n'), ((2589, 2642), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(180)', '(1)'], {}), '((cols / 2, rows / 2), 180, 1)\n', (2612, 2642), False, 'import cv2\n'), ((2654, 2707), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(270)', '(1)'], {}), '((cols / 2, rows / 2), 270, 1)\n', (2677, 2707), False, 'import cv2\n'), ((2750, 2789), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'rot1', '(cols, rows)'], {}), '(img, rot1, (cols, rows))\n', (2764, 2789), False, 'import cv2\n'), ((2808, 2847), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'rot2', '(cols, rows)'], {}), '(img, rot2, (cols, rows))\n', (2822, 2847), False, 'import cv2\n'), ((2866, 2905), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'rot3', '(cols, rows)'], {}), '(img, rot3, (cols, rows))\n', (2880, 2905), False, 'import cv2\n'), ((2953, 2993), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'rot1', '(cols, rows)'], {}), '(mask, rot1, (cols, rows))\n', (2967, 2993), False, 'import cv2\n'), ((3014, 3054), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'rot2', '(cols, rows)'], {}), '(mask, rot2, (cols, rows))\n', (3028, 3054), False, 'import cv2\n'), ((3075, 3115), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'rot3', '(cols, rows)'], {}), '(mask, rot3, (cols, rows))\n', (3089, 3115), False, 'import cv2\n'), ((3173, 3189), 'cv2.flip', 'cv2.flip', (['img', '(0)'], {}), '(img, 0)\n', (3181, 3189), False, 'import cv2\n'), ((3213, 3229), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (3221, 3229), False, 'import cv2\n'), ((3276, 3293), 'cv2.flip', 'cv2.flip', (['mask', '(0)'], {}), '(mask, 0)\n', (3284, 3293), False, 'import cv2\n'), ((3318, 3335), 'cv2.flip', 'cv2.flip', (['mask', '(1)'], {}), '(mask, 1)\n', (3326, 3335), False, 'import cv2\n'), ((751, 776), 'numpy.round', 'np.round', (['(h * zoom_factor)'], {}), '(h * zoom_factor)\n', (759, 776), True, 'import numpy as np\n'), ((795, 820), 'numpy.round', 'np.round', (['(w * zoom_factor)'], {}), '(w * zoom_factor)\n', (803, 820), True, 'import numpy as np\n'), ((1283, 1344), 'scipy.ndimage.zoom', 'zoom', (['img[top:top + zh, left:left + zw]', 'zoom_tuple'], {}), '(img[top:top + zh, left:left + zw], zoom_tuple, **kwargs)\n', (1287, 1344), False, 'from scipy.ndimage import zoom\n'), ((1140, 1165), 'numpy.round', 'np.round', (['(h / zoom_factor)'], {}), '(h / zoom_factor)\n', (1148, 1165), True, 'import numpy as np\n'), ((1184, 1209), 'numpy.round', 'np.round', (['(w / zoom_factor)'], {}), '(w / zoom_factor)\n', (1192, 1209), True, 'import numpy as np\n'), ((4828, 4849), 'random.choice', 'random.choice', (['gammas'], {}), '(gammas)\n', (4841, 4849), False, 'import random\n'), ((1927, 1944), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (1936, 1944), True, 'import numpy as np\n')] |
import numpy as np
from utils import visualise
from read_mnist import load_data
import random
y_train,x_train,y_test,x_test=load_data()
print("Train data label dim: {}".format(y_train.shape))
print("Train data features dim: {}".format(x_train.shape))
print("Test data label dim: {}".format(y_test.shape))
print("Test data features dim:{}".format(x_test.shape))
# uncomment to visualise dataset
# visualise(x_train)
def sigmoid(x):
return 1/(1+ np.exp(-x))
def sigmoid_grad(x):
return sigmoid(x).T @ (1 - sigmoid(x))
def softmax(x):
for i,f in enumerate(x):
f -= np.max(f) # for numerical stabiluty
p = np.exp(f) / np.sum(np.exp(f))
x[i,:]=p
return x
def cross_entropy(X,y):
"""
X is the output from fully connected layer (num_examples x num_classes)
y is labels (num_examples x 1)
Note that y is not one-hot encoded vector.
It can be computed as y.argmax(axis=1) from one-hot encoded vectors of labels if required.
"""
m = y.shape[0]
p = softmax(X)
log_likelihood = -np.log(p[range(m),y])
loss = np.sum(log_likelihood) / m
return loss
# https://deepnotes.io/softmax-crossentropy
def delta_cross_entropy(X,y):
"""
X is the output from fully connected layer (num_examples x num_classes)
y is labels (num_examples x 1)
Note that y is not one-hot encoded vector.
It can be computed as y.argmax(axis=1) from one-hot encoded vectors of labels if required.
"""
m = y.shape[0]
grad = softmax(X)
grad[range(m),y] -= 1
grad = grad/m
return grad
class NN(object):
def __init__(self, hidden_layers, hidden_neurons, hidden_activation, output_activation):
self.hidden_layers = hidden_layers
self.hidden_neurons = hidden_neurons
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.step_size=0.05
self.W1 = 0.01* np.random.randn(x_train.shape[1],self.hidden_neurons)
self.b1 = np.zeros((1,self.hidden_neurons))
self.W2 = 0.01* np.random.randn(self.hidden_neurons,10)
self.b2 = np.zeros((1,10))
def forward(self,x_train):
# print(self.W.shape,x_train.shape)
score1=np.dot(x_train, self.W1) + self.b1
# print("score1 dims: ", score1.shape)
y = (sigmoid(score1))
# print("y dims: ",y.shape)
score2 = np.dot(y, self.W2) + self.b2
# print("score2 dims: ", score2.shape)
z = softmax(score2)
# print("z (softmax) dims: ",z.shape)
loss=cross_entropy(score2,y_train)
# print("Loss",loss)
return(loss,score2,y,z,score1)
def backward(self):
"J: cross entropy loss"
djdscore2=delta_cross_entropy(score2,y_train)
# updating w2,b2
dW2 = np.dot(y.T, djdscore2)
db2 = np.sum(djdscore2, axis=0)
self.W2 += -self.step_size * dW2
self.b2 += -self.step_size * db2
# print("djdscore2 dims: ",djdscore2.shape)
# print("w2 dims: ",self.W2.shape)
# print("dw2 dims: ",dW2.shape)
# print("db2 dims: ",db2.shape)
# updating w1,b1
dW1 = np.dot(x_train.T, np.dot(np.dot(djdscore2,self.W2.T),sigmoid_grad(score1)))
db1 = np.sum(np.dot(np.dot(djdscore2,self.W2.T),sigmoid_grad(score1)),axis=0)
db1.reshape(1,256)
self.W1 += -self.step_size * dW1
self.b1 += -self.step_size * db1
# print("dW1:", dW1, "db1", db1, "dW2:", dW2, "db2", db2)
# print("sigmoid_grad score1 dims: ",sigmoid_grad(score1).shape)
# print("w1 dims: ",self.W1.shape)
# print("b1 dims: ",self.b1.shape)
# print("dw1 dims: ",dW1.shape)
# print("db1 dims: ",db1.shape)
# def preprocess(X):
# # zero center the data
# X -= np.mean(X, axis = 0)
# return X
#
# # preprocessing the image
# x_train=preprocess(x_train)
# x_test=preprocess(x_test)
model=NN(5,256,"sigmoid","softmax")
epochs=10
for epoch in range(epochs):
loss,score2,y,z,score1 = model.forward(x_train)
print("Loss: {} in {}/{}".format(loss,epoch,epochs))
model.backward()
print(z.shape)
preds= np.argmax(z, axis=1)
print(preds.shape)
# print('training accuracy: %.2f' % (np.mean(preds == y_train)))
print(preds)
| [
"numpy.sum",
"numpy.argmax",
"numpy.random.randn",
"numpy.zeros",
"numpy.max",
"read_mnist.load_data",
"numpy.exp",
"numpy.dot"
] | [((125, 136), 'read_mnist.load_data', 'load_data', ([], {}), '()\n', (134, 136), False, 'from read_mnist import load_data\n'), ((4163, 4183), 'numpy.argmax', 'np.argmax', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (4172, 4183), True, 'import numpy as np\n'), ((587, 596), 'numpy.max', 'np.max', (['f'], {}), '(f)\n', (593, 596), True, 'import numpy as np\n'), ((1084, 1106), 'numpy.sum', 'np.sum', (['log_likelihood'], {}), '(log_likelihood)\n', (1090, 1106), True, 'import numpy as np\n'), ((2001, 2035), 'numpy.zeros', 'np.zeros', (['(1, self.hidden_neurons)'], {}), '((1, self.hidden_neurons))\n', (2009, 2035), True, 'import numpy as np\n'), ((2117, 2134), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (2125, 2134), True, 'import numpy as np\n'), ((2809, 2831), 'numpy.dot', 'np.dot', (['y.T', 'djdscore2'], {}), '(y.T, djdscore2)\n', (2815, 2831), True, 'import numpy as np\n'), ((2846, 2871), 'numpy.sum', 'np.sum', (['djdscore2'], {'axis': '(0)'}), '(djdscore2, axis=0)\n', (2852, 2871), True, 'import numpy as np\n'), ((451, 461), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (457, 461), True, 'import numpy as np\n'), ((635, 644), 'numpy.exp', 'np.exp', (['f'], {}), '(f)\n', (641, 644), True, 'import numpy as np\n'), ((1929, 1983), 'numpy.random.randn', 'np.random.randn', (['x_train.shape[1]', 'self.hidden_neurons'], {}), '(x_train.shape[1], self.hidden_neurons)\n', (1944, 1983), True, 'import numpy as np\n'), ((2059, 2099), 'numpy.random.randn', 'np.random.randn', (['self.hidden_neurons', '(10)'], {}), '(self.hidden_neurons, 10)\n', (2074, 2099), True, 'import numpy as np\n'), ((2226, 2250), 'numpy.dot', 'np.dot', (['x_train', 'self.W1'], {}), '(x_train, self.W1)\n', (2232, 2250), True, 'import numpy as np\n'), ((2393, 2411), 'numpy.dot', 'np.dot', (['y', 'self.W2'], {}), '(y, self.W2)\n', (2399, 2411), True, 'import numpy as np\n'), ((654, 663), 'numpy.exp', 'np.exp', (['f'], {}), '(f)\n', (660, 663), True, 'import numpy as np\n'), ((3196, 3224), 'numpy.dot', 'np.dot', (['djdscore2', 'self.W2.T'], {}), '(djdscore2, self.W2.T)\n', (3202, 3224), True, 'import numpy as np\n'), ((3275, 3303), 'numpy.dot', 'np.dot', (['djdscore2', 'self.W2.T'], {}), '(djdscore2, self.W2.T)\n', (3281, 3303), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from sed.binning import _hist_from_bin_range
from sed.binning import _hist_from_bins
sample1d = np.random.randn(int(1e2), 1)
sample2d = np.random.randn(int(1e2), 2)
sample3d = np.random.randn(int(1e2), 3)
bins1d = (95,)
bins2d = (95, 34)
bins3d = (95, 34, 27)
ranges1d = np.array([[1, 2]])
ranges2d = np.array([[1, 2], [1, 2]])
ranges3d = np.array([[1, 2], [1, 2], [1, 2]])
arrays1d = np.linspace(*ranges1d[0], bins1d[0])
arrays2d = [np.linspace(*ranges2d[i], bins2d[i]) for i in range(2)]
arrays3d = [np.linspace(*ranges3d[i], bins3d[i]) for i in range(3)]
@pytest.mark.parametrize(
"_samples",
[sample1d, sample2d, sample3d],
ids=lambda x: f"samples:{x.shape}",
)
@pytest.mark.parametrize(
"_bins",
[bins1d, bins2d, bins3d],
ids=lambda x: f"bins:{len(x)}",
)
def test_hist_Nd_error_is_raised(_samples, _bins):
with pytest.raises(ValueError):
if _samples.shape[1] == len(_bins):
pytest.skip("Not of interest")
_hist_from_bin_range(_samples, _bins, ranges1d)
def test_hist_Nd_proper_results():
H1 = _hist_from_bin_range(sample3d, bins3d, ranges3d)
H2, _ = np.histogramdd(sample3d, bins3d, ranges3d)
np.testing.assert_allclose(H1, H2)
def test_from_bins_equals_from_bin_range():
H1 = _hist_from_bin_range(sample3d, bins3d, ranges3d)
H2 = _hist_from_bins(sample3d, arrays3d, tuple(b.size for b in arrays3d))
np.testing.assert_allclose(H1, H2)
| [
"numpy.testing.assert_allclose",
"numpy.histogramdd",
"pytest.skip",
"pytest.raises",
"numpy.array",
"numpy.linspace",
"pytest.mark.parametrize",
"sed.binning._hist_from_bin_range"
] | [((306, 324), 'numpy.array', 'np.array', (['[[1, 2]]'], {}), '([[1, 2]])\n', (314, 324), True, 'import numpy as np\n'), ((336, 362), 'numpy.array', 'np.array', (['[[1, 2], [1, 2]]'], {}), '([[1, 2], [1, 2]])\n', (344, 362), True, 'import numpy as np\n'), ((374, 408), 'numpy.array', 'np.array', (['[[1, 2], [1, 2], [1, 2]]'], {}), '([[1, 2], [1, 2], [1, 2]])\n', (382, 408), True, 'import numpy as np\n'), ((420, 456), 'numpy.linspace', 'np.linspace', (['*ranges1d[0]', 'bins1d[0]'], {}), '(*ranges1d[0], bins1d[0])\n', (431, 456), True, 'import numpy as np\n'), ((596, 703), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""_samples"""', '[sample1d, sample2d, sample3d]'], {'ids': "(lambda x: f'samples:{x.shape}')"}), "('_samples', [sample1d, sample2d, sample3d], ids=lambda\n x: f'samples:{x.shape}')\n", (619, 703), False, 'import pytest\n'), ((469, 505), 'numpy.linspace', 'np.linspace', (['*ranges2d[i]', 'bins2d[i]'], {}), '(*ranges2d[i], bins2d[i])\n', (480, 505), True, 'import numpy as np\n'), ((537, 573), 'numpy.linspace', 'np.linspace', (['*ranges3d[i]', 'bins3d[i]'], {}), '(*ranges3d[i], bins3d[i])\n', (548, 573), True, 'import numpy as np\n'), ((1098, 1146), 'sed.binning._hist_from_bin_range', '_hist_from_bin_range', (['sample3d', 'bins3d', 'ranges3d'], {}), '(sample3d, bins3d, ranges3d)\n', (1118, 1146), False, 'from sed.binning import _hist_from_bin_range\n'), ((1159, 1201), 'numpy.histogramdd', 'np.histogramdd', (['sample3d', 'bins3d', 'ranges3d'], {}), '(sample3d, bins3d, ranges3d)\n', (1173, 1201), True, 'import numpy as np\n'), ((1206, 1240), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['H1', 'H2'], {}), '(H1, H2)\n', (1232, 1240), True, 'import numpy as np\n'), ((1296, 1344), 'sed.binning._hist_from_bin_range', '_hist_from_bin_range', (['sample3d', 'bins3d', 'ranges3d'], {}), '(sample3d, bins3d, ranges3d)\n', (1316, 1344), False, 'from sed.binning import _hist_from_bin_range\n'), ((1427, 1461), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['H1', 'H2'], {}), '(H1, H2)\n', (1453, 1461), True, 'import numpy as np\n'), ((882, 907), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (895, 907), False, 'import pytest\n'), ((1004, 1051), 'sed.binning._hist_from_bin_range', '_hist_from_bin_range', (['_samples', '_bins', 'ranges1d'], {}), '(_samples, _bins, ranges1d)\n', (1024, 1051), False, 'from sed.binning import _hist_from_bin_range\n'), ((965, 995), 'pytest.skip', 'pytest.skip', (['"""Not of interest"""'], {}), "('Not of interest')\n", (976, 995), False, 'import pytest\n')] |
import numpy as np
import struct
from paddle import fluid
def get_data(data_path, place):
inputs = []
labels = []
with open(data_path, 'rb') as in_f:
while True:
plen = in_f.read(4)
if plen is None or len(plen) != 4:
break
alllen = struct.unpack('i', plen)[0]
label_len = alllen & 0xFFFF
seq_len = (alllen >> 16) & 0xFFFF
label = in_f.read(4 * label_len)
label = np.frombuffer(
label, dtype=np.int32).reshape([len(label) // 4])
feat = in_f.read(4 * seq_len * 8)
feat = np.frombuffer(
feat, dtype=np.float32).reshape([len(feat) // 4 // 8, 8])
lod_feat = [feat.shape[0]]
minputs = fluid.create_lod_tensor(feat, [lod_feat], place)
infer_data = fluid.core.PaddleTensor()
infer_data.lod = minputs.lod()
infer_data.data = fluid.core.PaddleBuf(np.array(minputs))
infer_data.shape = minputs.shape()
infer_data.dtype = fluid.core.PaddleDType.FLOAT32
infer_label = fluid.core.PaddleTensor()
infer_label.data = fluid.core.PaddleBuf(np.array(label))
infer_label.shape = label.shape
infer_label.dtype = fluid.core.PaddleDType.INT32
inputs.append(infer_data)
labels.append(infer_label)
return inputs, labels
def get_data_with_ptq_warmup(data_path, place, warmup_batch_size=1):
all_inputs, all_labels = get_data(data_path, place)
warmup_inputs = all_inputs[:warmup_batch_size]
inputs = all_inputs[warmup_batch_size:]
labels = all_labels[warmup_batch_size:]
return warmup_inputs, inputs, labels
| [
"numpy.frombuffer",
"struct.unpack",
"paddle.fluid.core.PaddleTensor",
"numpy.array",
"paddle.fluid.create_lod_tensor"
] | [((781, 829), 'paddle.fluid.create_lod_tensor', 'fluid.create_lod_tensor', (['feat', '[lod_feat]', 'place'], {}), '(feat, [lod_feat], place)\n', (804, 829), False, 'from paddle import fluid\n'), ((855, 880), 'paddle.fluid.core.PaddleTensor', 'fluid.core.PaddleTensor', ([], {}), '()\n', (878, 880), False, 'from paddle import fluid\n'), ((1129, 1154), 'paddle.fluid.core.PaddleTensor', 'fluid.core.PaddleTensor', ([], {}), '()\n', (1152, 1154), False, 'from paddle import fluid\n'), ((306, 330), 'struct.unpack', 'struct.unpack', (['"""i"""', 'plen'], {}), "('i', plen)\n", (319, 330), False, 'import struct\n'), ((975, 992), 'numpy.array', 'np.array', (['minputs'], {}), '(minputs)\n', (983, 992), True, 'import numpy as np\n'), ((1207, 1222), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1215, 1222), True, 'import numpy as np\n'), ((485, 521), 'numpy.frombuffer', 'np.frombuffer', (['label'], {'dtype': 'np.int32'}), '(label, dtype=np.int32)\n', (498, 521), True, 'import numpy as np\n'), ((631, 668), 'numpy.frombuffer', 'np.frombuffer', (['feat'], {'dtype': 'np.float32'}), '(feat, dtype=np.float32)\n', (644, 668), True, 'import numpy as np\n')] |
import unittest
import logging
from cnns.nnlib.utils.log_utils import get_logger
from cnns.nnlib.utils.log_utils import set_up_logging
import numpy as np
import torch
from cnns.nnlib.pytorch_layers.conv1D_cuda.conv import Conv1dfftCuda
import conv1D_cuda
class TestPyTorchConv1d(unittest.TestCase):
def setUp(self):
log_file = "pytorch_conv1D_cuda_reuse_map_fft.log"
is_debug = True
set_up_logging(log_file=log_file, is_debug=is_debug)
self.logger = get_logger(name=__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.info("Set up test")
def test_FunctionForwardNoCompression(self):
x = np.array([[[1., 2., 3.]]])
y = np.array([[[2., 1.]]])
b = np.array([0.0])
# get the expected results from numpy correlate
expected_result = np.correlate(x[0, 0, :], y[0, 0, :], mode="valid")
if torch.cuda.is_available() is False:
self.fail("This test can be executed only on GPU!")
device = torch.device("lltm_cuda")
print("Conv Cuda")
conv = Conv1dfftCuda(filter_value=torch.tensor(y, device=device),
bias_value=torch.tensor(b, device=device))
result = conv.forward(input=torch.from_numpy(x))
np.testing.assert_array_almost_equal(
result, np.array([[expected_result]]))
def test_plus_reduce(self):
x = torch.tensor([1, 2, 3, 4])
result = conv1D_cuda.plus_reduce(x)
np.testing.assert_almost_equal(result, torch.tensor(10))
| [
"conv1D_cuda.plus_reduce",
"numpy.array",
"torch.cuda.is_available",
"numpy.correlate",
"torch.device",
"cnns.nnlib.utils.log_utils.get_logger",
"cnns.nnlib.utils.log_utils.set_up_logging",
"torch.tensor",
"torch.from_numpy"
] | [((415, 467), 'cnns.nnlib.utils.log_utils.set_up_logging', 'set_up_logging', ([], {'log_file': 'log_file', 'is_debug': 'is_debug'}), '(log_file=log_file, is_debug=is_debug)\n', (429, 467), False, 'from cnns.nnlib.utils.log_utils import set_up_logging\n'), ((490, 515), 'cnns.nnlib.utils.log_utils.get_logger', 'get_logger', ([], {'name': '__name__'}), '(name=__name__)\n', (500, 515), False, 'from cnns.nnlib.utils.log_utils import get_logger\n'), ((662, 691), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0]]]'], {}), '([[[1.0, 2.0, 3.0]]])\n', (670, 691), True, 'import numpy as np\n'), ((701, 725), 'numpy.array', 'np.array', (['[[[2.0, 1.0]]]'], {}), '([[[2.0, 1.0]]])\n', (709, 725), True, 'import numpy as np\n'), ((736, 751), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (744, 751), True, 'import numpy as np\n'), ((834, 884), 'numpy.correlate', 'np.correlate', (['x[0, 0, :]', 'y[0, 0, :]'], {'mode': '"""valid"""'}), "(x[0, 0, :], y[0, 0, :], mode='valid')\n", (846, 884), True, 'import numpy as np\n'), ((1013, 1038), 'torch.device', 'torch.device', (['"""lltm_cuda"""'], {}), "('lltm_cuda')\n", (1025, 1038), False, 'import torch\n'), ((1411, 1437), 'torch.tensor', 'torch.tensor', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1423, 1437), False, 'import torch\n'), ((1455, 1481), 'conv1D_cuda.plus_reduce', 'conv1D_cuda.plus_reduce', (['x'], {}), '(x)\n', (1478, 1481), False, 'import conv1D_cuda\n'), ((896, 921), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (919, 921), False, 'import torch\n'), ((1335, 1364), 'numpy.array', 'np.array', (['[[expected_result]]'], {}), '([[expected_result]])\n', (1343, 1364), True, 'import numpy as np\n'), ((1529, 1545), 'torch.tensor', 'torch.tensor', (['(10)'], {}), '(10)\n', (1541, 1545), False, 'import torch\n'), ((1108, 1138), 'torch.tensor', 'torch.tensor', (['y'], {'device': 'device'}), '(y, device=device)\n', (1120, 1138), False, 'import torch\n'), ((1180, 1210), 'torch.tensor', 'torch.tensor', (['b'], {'device': 'device'}), '(b, device=device)\n', (1192, 1210), False, 'import torch\n'), ((1248, 1267), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1264, 1267), False, 'import torch\n')] |
import torch
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.class_weight import compute_class_weight
import imgaug
# Local imports
from data.dataset import Dataset
# Correctly handle RNG to ensure different training distribution across epochs
# https://github.com/aleju/imgaug/issues/406
# https://github.com/pytorch/pytorch/issues/5059#issuecomment-817392562
def worker_init_fn(worker_id):
process_seed = torch.initial_seed()
# Back out the base_seed so we can use all the bits.
base_seed = process_seed - worker_id
ss = np.random.SeedSequence([worker_id, base_seed])
# More than 128 bits (4 32-bit words) would be overkill.
imgaug.seed(ss.generate_state(4))
# Format input data dict from csv file
def format_data_from_csv(data_dict):
# Format the data into a dict and return
formatted_data_dict = {}
# Load the data from formatted csv files
# Image: files
train_imgs = pd.read_csv(data_dict['train_imgs'], usecols=['Files'], dtype=str)
val_imgs = pd.read_csv(data_dict['val_imgs'], usecols=['Files'], dtype=str)
test_imgs = pd.read_csv(data_dict['test_imgs'], usecols=['Files'], dtype=str)
# print('train_imgs:',train_imgs)
# Phase: files and phase
train_phases = pd.read_csv(data_dict['train_phases'],usecols=['Files', 'Phase'],dtype=str)
val_phases = pd.read_csv(data_dict['val_phases'],usecols=['Files', 'Phase'],dtype=str)
test_phases = pd.read_csv(data_dict['test_phases'],usecols=['Files', 'Phase'],dtype=str)
print('train_phases:',train_phases)
# Encode the phase labels from strings to ints
le = LabelEncoder()
le.fit(train_phases['Phase'])
print('le.classes_:', le.classes_)
# Modify loaded data to reflect the label encoding
train_phases['Phase'] = le.transform(train_phases['Phase'])
val_phases['Phase'] = le.transform(val_phases['Phase'])
test_phases['Phase'] = le.transform(test_phases['Phase'])
# print('train_phases:',train_phases)
# Datasets
# Load images into train, val and test dictionary partitions
# {'train': ['video01_25', 'video01_50', 'video01_75', 'video01_100']}
# partition['train']: ['video01_25', 'video01_50', 'video01_75', 'video01_100']
partition_train,partition_val,partition_test = {},{},{}
partition_train['train'] = train_imgs['Files'].values.tolist()
partition_val['validation'] = val_imgs['Files'].values.tolist()
partition_test['test'] = test_imgs['Files'].values.tolist()
# Compute class reweighting scheme
if data_dict['use_weights']:
class_weights = compute_class_weight(
class_weight='balanced',
classes=np.unique(train_phases['Phase'].values),
y=train_phases['Phase'].values)
class_weights_tensor = torch.from_numpy(class_weights)
formatted_data_dict['class_weight'] = class_weights_tensor
print('class_weights_tensor:',class_weights_tensor)
else:
n_classes = len(np.unique(train_phases['Phase'].values))
class_weights_tensor = torch.from_numpy(np.ones(n_classes))
formatted_data_dict['class_weight'] = class_weights_tensor
print('class_weights_tensor:',class_weights_tensor)
# Load the phase labels to format as:
# {'video01_25': 'Preparation', 'video01_50': 'Preparation', 'video01_75': 'Preparation'}
labels_train,labels_val,labels_test = {},{},{}
labels_train = train_phases.set_index('Files','Phase').to_dict()['Phase']
labels_val = val_phases.set_index('Files','Phase').to_dict()['Phase']
labels_test = test_phases.set_index('Files','Phase').to_dict()['Phase']
formatted_data_dict['partition_train'] = partition_train['train']
formatted_data_dict['partition_val'] = partition_val['validation']
formatted_data_dict['partition_test'] = partition_test['test']
formatted_data_dict['labels_train'] = labels_train
formatted_data_dict['labels_val'] = labels_val
formatted_data_dict['labels_test'] = labels_test
return formatted_data_dict
# def create_data_generators(data_dict, params, train_transforms, val_transforms, test_transforms):
def create_data_generators(data_dict, params):
# Cache the data dict to params if not none
if data_dict['class_weight'] is not None:
params['class_weight'] = data_dict['class_weight']
else:
params['class_weight'] = None
# Generators
# Training generator
training_set = Dataset(data_dict['partition_train'], data_dict['labels_train'], params)
training_generator = torch.utils.data.DataLoader(
training_set,
batch_size=params['batch_size'],
sampler=None,
shuffle=False,
num_workers=params['num_workers'],
pin_memory=True,
worker_init_fn=worker_init_fn)
# Set shuffle and transforms to False for validation and test data
params['shuffle'] = False
params['use_transform'] = False
# Validation generator
validation_set = Dataset(data_dict['partition_val'], data_dict['labels_val'], params)
validation_generator = torch.utils.data.DataLoader(
validation_set,
batch_size=params['batch_size'],
shuffle=False,
num_workers=params['num_workers'],
pin_memory=True)
# Test generator
test_set = Dataset(data_dict['partition_test'], data_dict['labels_test'], params)
test_generator = torch.utils.data.DataLoader(
test_set,
batch_size=params['batch_size'],
shuffle=False,
num_workers=params['num_workers'],
pin_memory=True)
# Return the generators
return training_generator, validation_generator, test_generator
| [
"torch.from_numpy",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"numpy.random.SeedSequence",
"numpy.ones",
"sklearn.preprocessing.LabelEncoder",
"torch.initial_seed",
"numpy.unique",
"data.dataset.Dataset"
] | [((471, 491), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (489, 491), False, 'import torch\n'), ((599, 645), 'numpy.random.SeedSequence', 'np.random.SeedSequence', (['[worker_id, base_seed]'], {}), '([worker_id, base_seed])\n', (621, 645), True, 'import numpy as np\n'), ((978, 1044), 'pandas.read_csv', 'pd.read_csv', (["data_dict['train_imgs']"], {'usecols': "['Files']", 'dtype': 'str'}), "(data_dict['train_imgs'], usecols=['Files'], dtype=str)\n", (989, 1044), True, 'import pandas as pd\n'), ((1060, 1124), 'pandas.read_csv', 'pd.read_csv', (["data_dict['val_imgs']"], {'usecols': "['Files']", 'dtype': 'str'}), "(data_dict['val_imgs'], usecols=['Files'], dtype=str)\n", (1071, 1124), True, 'import pandas as pd\n'), ((1141, 1206), 'pandas.read_csv', 'pd.read_csv', (["data_dict['test_imgs']"], {'usecols': "['Files']", 'dtype': 'str'}), "(data_dict['test_imgs'], usecols=['Files'], dtype=str)\n", (1152, 1206), True, 'import pandas as pd\n'), ((1294, 1371), 'pandas.read_csv', 'pd.read_csv', (["data_dict['train_phases']"], {'usecols': "['Files', 'Phase']", 'dtype': 'str'}), "(data_dict['train_phases'], usecols=['Files', 'Phase'], dtype=str)\n", (1305, 1371), True, 'import pandas as pd\n'), ((1387, 1462), 'pandas.read_csv', 'pd.read_csv', (["data_dict['val_phases']"], {'usecols': "['Files', 'Phase']", 'dtype': 'str'}), "(data_dict['val_phases'], usecols=['Files', 'Phase'], dtype=str)\n", (1398, 1462), True, 'import pandas as pd\n'), ((1479, 1555), 'pandas.read_csv', 'pd.read_csv', (["data_dict['test_phases']"], {'usecols': "['Files', 'Phase']", 'dtype': 'str'}), "(data_dict['test_phases'], usecols=['Files', 'Phase'], dtype=str)\n", (1490, 1555), True, 'import pandas as pd\n'), ((1655, 1669), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1667, 1669), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4479, 4551), 'data.dataset.Dataset', 'Dataset', (["data_dict['partition_train']", "data_dict['labels_train']", 'params'], {}), "(data_dict['partition_train'], data_dict['labels_train'], params)\n", (4486, 4551), False, 'from data.dataset import Dataset\n'), ((4577, 4771), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['training_set'], {'batch_size': "params['batch_size']", 'sampler': 'None', 'shuffle': '(False)', 'num_workers': "params['num_workers']", 'pin_memory': '(True)', 'worker_init_fn': 'worker_init_fn'}), "(training_set, batch_size=params['batch_size'],\n sampler=None, shuffle=False, num_workers=params['num_workers'],\n pin_memory=True, worker_init_fn=worker_init_fn)\n", (4604, 4771), False, 'import torch\n'), ((5008, 5076), 'data.dataset.Dataset', 'Dataset', (["data_dict['partition_val']", "data_dict['labels_val']", 'params'], {}), "(data_dict['partition_val'], data_dict['labels_val'], params)\n", (5015, 5076), False, 'from data.dataset import Dataset\n'), ((5104, 5251), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validation_set'], {'batch_size': "params['batch_size']", 'shuffle': '(False)', 'num_workers': "params['num_workers']", 'pin_memory': '(True)'}), "(validation_set, batch_size=params['batch_size'],\n shuffle=False, num_workers=params['num_workers'], pin_memory=True)\n", (5131, 5251), False, 'import torch\n'), ((5326, 5396), 'data.dataset.Dataset', 'Dataset', (["data_dict['partition_test']", "data_dict['labels_test']", 'params'], {}), "(data_dict['partition_test'], data_dict['labels_test'], params)\n", (5333, 5396), False, 'from data.dataset import Dataset\n'), ((5418, 5559), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_set'], {'batch_size': "params['batch_size']", 'shuffle': '(False)', 'num_workers': "params['num_workers']", 'pin_memory': '(True)'}), "(test_set, batch_size=params['batch_size'],\n shuffle=False, num_workers=params['num_workers'], pin_memory=True)\n", (5445, 5559), False, 'import torch\n'), ((2819, 2850), 'torch.from_numpy', 'torch.from_numpy', (['class_weights'], {}), '(class_weights)\n', (2835, 2850), False, 'import torch\n'), ((3013, 3052), 'numpy.unique', 'np.unique', (["train_phases['Phase'].values"], {}), "(train_phases['Phase'].values)\n", (3022, 3052), True, 'import numpy as np\n'), ((3102, 3120), 'numpy.ones', 'np.ones', (['n_classes'], {}), '(n_classes)\n', (3109, 3120), True, 'import numpy as np\n'), ((2703, 2742), 'numpy.unique', 'np.unique', (["train_phases['Phase'].values"], {}), "(train_phases['Phase'].values)\n", (2712, 2742), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X)
np.savetxt("X.txt", X, fmt='%.10f', delimiter=' ')
plt.figure(0)
plt.figure(figsize=(15, 20))
plt.subplot(321)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Sklearn KMeans")
y_pred_1 = np.loadtxt("X_labels.txt")
plt.subplot(322)
plt.scatter(X[:, 0], X[:, 1], c=y_pred_1)
plt.title("C++11 KMeans")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
np.savetxt("X_aniso.txt", X_aniso, fmt='%.10f', delimiter=' ')
plt.subplot(323)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Sklearn KMeans")
y_pred_1 = np.loadtxt("X_aniso_labels.txt")
plt.subplot(324)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred_1)
plt.title("C++11 KMeans")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
np.savetxt("X_varied.txt", X_varied, fmt='%.10f', delimiter=' ')
plt.subplot(325)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Sklearn KMeans")
y_pred_1 = np.loadtxt("X_varied_labels.txt")
plt.subplot(326)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred_1)
plt.title("C++11 KMeans")
plt.savefig("Results of the comparison", dpi=300)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"numpy.savetxt",
"sklearn.datasets.make_blobs",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.dot",
"matplotlib.pyplot.savefig"
] | [((172, 230), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_samples', 'random_state': 'random_state'}), '(n_samples=n_samples, random_state=random_state)\n', (182, 230), False, 'from sklearn.datasets import make_blobs\n'), ((336, 386), 'numpy.savetxt', 'np.savetxt', (['"""X.txt"""', 'X'], {'fmt': '"""%.10f"""', 'delimiter': '""" """'}), "('X.txt', X, fmt='%.10f', delimiter=' ')\n", (346, 386), True, 'import numpy as np\n'), ((388, 401), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (398, 401), True, 'import matplotlib.pyplot as plt\n'), ((402, 430), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 20)'}), '(figsize=(15, 20))\n', (412, 430), True, 'import matplotlib.pyplot as plt\n'), ((432, 448), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (443, 448), True, 'import matplotlib.pyplot as plt\n'), ((449, 488), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y_pred'}), '(X[:, 0], X[:, 1], c=y_pred)\n', (460, 488), True, 'import matplotlib.pyplot as plt\n'), ((489, 516), 'matplotlib.pyplot.title', 'plt.title', (['"""Sklearn KMeans"""'], {}), "('Sklearn KMeans')\n", (498, 516), True, 'import matplotlib.pyplot as plt\n'), ((529, 555), 'numpy.loadtxt', 'np.loadtxt', (['"""X_labels.txt"""'], {}), "('X_labels.txt')\n", (539, 555), True, 'import numpy as np\n'), ((557, 573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(322)'], {}), '(322)\n', (568, 573), True, 'import matplotlib.pyplot as plt\n'), ((574, 615), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y_pred_1'}), '(X[:, 0], X[:, 1], c=y_pred_1)\n', (585, 615), True, 'import matplotlib.pyplot as plt\n'), ((616, 641), 'matplotlib.pyplot.title', 'plt.title', (['"""C++11 KMeans"""'], {}), "('C++11 KMeans')\n", (625, 641), True, 'import matplotlib.pyplot as plt\n'), ((758, 783), 'numpy.dot', 'np.dot', (['X', 'transformation'], {}), '(X, transformation)\n', (764, 783), True, 'import numpy as np\n'), ((863, 925), 'numpy.savetxt', 'np.savetxt', (['"""X_aniso.txt"""', 'X_aniso'], {'fmt': '"""%.10f"""', 'delimiter': '""" """'}), "('X_aniso.txt', X_aniso, fmt='%.10f', delimiter=' ')\n", (873, 925), True, 'import numpy as np\n'), ((927, 943), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (938, 943), True, 'import matplotlib.pyplot as plt\n'), ((944, 995), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_aniso[:, 0]', 'X_aniso[:, 1]'], {'c': 'y_pred'}), '(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)\n', (955, 995), True, 'import matplotlib.pyplot as plt\n'), ((996, 1023), 'matplotlib.pyplot.title', 'plt.title', (['"""Sklearn KMeans"""'], {}), "('Sklearn KMeans')\n", (1005, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1036, 1068), 'numpy.loadtxt', 'np.loadtxt', (['"""X_aniso_labels.txt"""'], {}), "('X_aniso_labels.txt')\n", (1046, 1068), True, 'import numpy as np\n'), ((1070, 1086), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (1081, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1140), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_aniso[:, 0]', 'X_aniso[:, 1]'], {'c': 'y_pred_1'}), '(X_aniso[:, 0], X_aniso[:, 1], c=y_pred_1)\n', (1098, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1166), 'matplotlib.pyplot.title', 'plt.title', (['"""C++11 KMeans"""'], {}), "('C++11 KMeans')\n", (1150, 1166), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1302), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_samples', 'cluster_std': '[1.0, 2.5, 0.5]', 'random_state': 'random_state'}), '(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=\n random_state)\n', (1220, 1302), False, 'from sklearn.datasets import make_blobs\n'), ((1442, 1506), 'numpy.savetxt', 'np.savetxt', (['"""X_varied.txt"""', 'X_varied'], {'fmt': '"""%.10f"""', 'delimiter': '""" """'}), "('X_varied.txt', X_varied, fmt='%.10f', delimiter=' ')\n", (1452, 1506), True, 'import numpy as np\n'), ((1508, 1524), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (1519, 1524), True, 'import matplotlib.pyplot as plt\n'), ((1525, 1578), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_varied[:, 0]', 'X_varied[:, 1]'], {'c': 'y_pred'}), '(X_varied[:, 0], X_varied[:, 1], c=y_pred)\n', (1536, 1578), True, 'import matplotlib.pyplot as plt\n'), ((1579, 1606), 'matplotlib.pyplot.title', 'plt.title', (['"""Sklearn KMeans"""'], {}), "('Sklearn KMeans')\n", (1588, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1652), 'numpy.loadtxt', 'np.loadtxt', (['"""X_varied_labels.txt"""'], {}), "('X_varied_labels.txt')\n", (1629, 1652), True, 'import numpy as np\n'), ((1654, 1670), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(326)'], {}), '(326)\n', (1665, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1726), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_varied[:, 0]', 'X_varied[:, 1]'], {'c': 'y_pred_1'}), '(X_varied[:, 0], X_varied[:, 1], c=y_pred_1)\n', (1682, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1752), 'matplotlib.pyplot.title', 'plt.title', (['"""C++11 KMeans"""'], {}), "('C++11 KMeans')\n", (1736, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1802), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Results of the comparison"""'], {'dpi': '(300)'}), "('Results of the comparison', dpi=300)\n", (1764, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1813), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1811, 1813), True, 'import matplotlib.pyplot as plt\n'), ((272, 319), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)', 'random_state': 'random_state'}), '(n_clusters=3, random_state=random_state)\n', (278, 319), False, 'from sklearn.cluster import KMeans\n'), ((793, 840), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)', 'random_state': 'random_state'}), '(n_clusters=3, random_state=random_state)\n', (799, 840), False, 'from sklearn.cluster import KMeans\n'), ((1371, 1418), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)', 'random_state': 'random_state'}), '(n_clusters=3, random_state=random_state)\n', (1377, 1418), False, 'from sklearn.cluster import KMeans\n')] |
import numpy as np
import os
import pytest
import doctest
from decitala import trees
from decitala.trees import FragmentTree
from decitala.search import get_by_ql_array
from decitala.fragment import GeneralFragment, GreekFoot
from music21 import converter
from music21 import note
here = os.path.abspath(os.path.dirname(__file__))
decitala_path = os.path.dirname(here) + "/corpora/Decitalas"
greek_path = os.path.dirname(here) + "/corpora/Greek_Metrics/"
transcription_example = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_1.xml"
def test_doctests():
assert doctest.testmod(trees, raise_on_error=True)
@pytest.fixture
def tala_ratio_tree():
ratio_tree = FragmentTree.from_frag_type(frag_type="decitala", rep_type="ratio")
return ratio_tree
@pytest.fixture
def tala_difference_tree():
difference_tree = FragmentTree.from_frag_type(frag_type="decitala", rep_type="difference")
return difference_tree
@pytest.fixture
def greek_ratio_tree():
ratio_tree = FragmentTree.from_frag_type(frag_type="greek_foot", rep_type="ratio")
return ratio_tree
@pytest.fixture
def greek_difference_tree():
difference_tree = FragmentTree.from_frag_type(frag_type="greek_foot", rep_type="difference")
return difference_tree
@pytest.fixture
def small_fragment_tree():
frag_1 = GeneralFragment(data=[0.25, 0.25, 0.25])
frag_2 = GeneralFragment(data=[0.5, 1.0, 0.5])
frag_3 = GeneralFragment(data=[1.0, 1.0, 2.0, 3.0])
fragments = [frag_1, frag_2, frag_3]
return FragmentTree(data=fragments, rep_type="ratio")
@pytest.fixture
def fake_fragment_dataset():
g1 = GeneralFragment([3.0, 1.5, 1.5, 0.75, 0.75])
g2 = GeneralFragment([1.5, 1.0])
g3 = GeneralFragment([0.75, 0.5, 0.75])
g4 = GeneralFragment([0.25, 0.25, 0.5])
g5 = GeneralFragment([0.75, 0.5])
g6 = GeneralFragment([0.5, 1.0, 2.0, 4.0])
g7 = GeneralFragment([1.5, 1.0, 1.5])
g8 = GeneralFragment([1.0, 1.0, 2.0])
g9 = GeneralFragment([1.0, 0.5, 0.5, 0.25, 0.25])
g10 = GeneralFragment([0.25, 0.5, 1.0, 2.0])
return [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
@pytest.fixture
def grand_corbeau_examples():
"""
These are classic examples of contiguous summation.
"""
from music21 import chord
phrase_1 = [(chord.Chord(["F#2", "F3"]), (0.0, 0.125)), (chord.Chord(["F#2", "F3"]), (0.125, 0.25)), (chord.Chord(["E-3", "D4"]), (0.25, 0.375)), (chord.Chord(["A2", "A-3>"]) (0.375, 0.625))]
phrase_2 = [(chord.Chord(["F#2", "F3"]), (1.625, 1.75)), (chord.Chord(["F#2", "F3"]), (1.75, 1.875)), (chord.Chord(["F#2", "F3"]), (1.875, 2.0)), (chord.Chord(["F#2", "F3"]), (2.0, 2.25))]
phrase_3 = [(chord.Chord(["F#2", "F3"]), (2.75, 2.875)), (chord.Chord(["F#2", "F3"]), (2.875, 3.0)), (chord.Chord(["F#2", "F3"]), (3.0, 3.125)), (chord.Chord(["E-3", "D4"]), (3.125, 3.25)), (chord.Chord(["A2", "A-3"]), (3.25, 3.5))]
def test_decitala_tree_instantiation(tala_ratio_tree, tala_difference_tree):
"""Check that every file appears in the decitala ratio/difference trees."""
found_rtree = []
found_dtree = []
for this_file in os.listdir(decitala_path):
parsed = converter.parse(decitala_path + "/" + this_file)
ql_array = np.array([this_note.quarterLength for this_note in parsed.flat.getElementsByClass(note.Note)])
if len(ql_array) < 2:
pass
else:
rsearch = get_by_ql_array(ql_array, ratio_tree=tala_ratio_tree, allowed_modifications=["r"])
found_rtree.append(rsearch)
dsearch = get_by_ql_array(ql_array, difference_tree=tala_difference_tree, allowed_modifications=["d"])
found_dtree.append(dsearch)
assert not(any(x == None for x in found_rtree))
assert not(any(x == None for x in found_dtree))
def test_greek_metric_tree_instantiation(greek_ratio_tree, greek_difference_tree):
"""Check that every file appears in the greek metric ratio/difference trees."""
found_rtree = []
found_dtree = []
for this_file in os.listdir(greek_path):
parsed = converter.parse(greek_path + "/" + this_file)
ql_array = np.array([this_note.quarterLength for this_note in parsed.flat.getElementsByClass(note.Note)])
if len(ql_array) < 2:
pass
else:
rsearch = get_by_ql_array(ql_array, ratio_tree=greek_ratio_tree, allowed_modifications=["r"])
found_rtree.append(rsearch)
dsearch = get_by_ql_array(ql_array, difference_tree=greek_difference_tree, allowed_modifications=["d"])
found_dtree.append(dsearch)
assert not(any(x == None for x in found_rtree))
assert not(any(x == None for x in found_dtree))
def test_fragment_tree_size(small_fragment_tree):
assert small_fragment_tree.size() == 7
def test_livre_dorgue_talas(tala_ratio_tree, tala_difference_tree):
laya = [1.0, 0.5, 1.5, 1.5, 1.5, 1.0, 1.5, 0.25, 0.25, 0.25] #ratio
bhagna = [0.125, 0.125, 0.125, 0.125, 0.25, 0.25, 0.375] #ratio
niccanka = [0.75, 1.25, 1.25, 1.75, 1.25, 1.25, 1.25, 0.75] #difference
laya_search = get_by_ql_array(laya, ratio_tree=tala_ratio_tree, difference_tree=tala_difference_tree)
bhagna_search = get_by_ql_array(bhagna, ratio_tree=tala_ratio_tree, difference_tree=tala_difference_tree)
niccanka_search = get_by_ql_array(niccanka, ratio_tree=tala_ratio_tree, difference_tree=tala_difference_tree)
assert laya_search[0].name == "106_Laya"
assert bhagna_search[0].name == "116_Bhagna"
assert niccanka_search[1][1] == 0.25
def test_varied_ragavardhana(tala_ratio_tree):
varied_ragavardhana = np.array([1.0, 1.0, 1.0, 0.5, 0.75, 0.5])
searched = get_by_ql_array(varied_ragavardhana, ratio_tree=tala_ratio_tree, allowed_modifications=["r", "sr", "rsr"])
assert searched[0].name, searched[1] == ("93_Ragavardhana", ("rsr", 2.0))
def test_dseg_fragment_tree():
greek_dseg_tree = FragmentTree.from_frag_type(frag_type="greek_foot", rep_type="dseg")
iamb_dseg = [0.0, 1.0]
path = [0.0] + iamb_dseg
check = greek_dseg_tree.search_for_path(path)
assert check.name == GreekFoot("Iamb") | [
"decitala.fragment.GeneralFragment",
"decitala.trees.FragmentTree.from_frag_type",
"os.path.dirname",
"decitala.search.get_by_ql_array",
"numpy.array",
"decitala.fragment.GreekFoot",
"music21.chord.Chord",
"decitala.trees.FragmentTree",
"music21.converter.parse",
"os.listdir",
"doctest.testmod"
... | [((307, 332), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (322, 332), False, 'import os\n'), ((350, 371), 'os.path.dirname', 'os.path.dirname', (['here'], {}), '(here)\n', (365, 371), False, 'import os\n'), ((408, 429), 'os.path.dirname', 'os.path.dirname', (['here'], {}), '(here)\n', (423, 429), False, 'import os\n'), ((482, 503), 'os.path.dirname', 'os.path.dirname', (['here'], {}), '(here)\n', (497, 503), False, 'import os\n'), ((581, 624), 'doctest.testmod', 'doctest.testmod', (['trees'], {'raise_on_error': '(True)'}), '(trees, raise_on_error=True)\n', (596, 624), False, 'import doctest\n'), ((679, 746), 'decitala.trees.FragmentTree.from_frag_type', 'FragmentTree.from_frag_type', ([], {'frag_type': '"""decitala"""', 'rep_type': '"""ratio"""'}), "(frag_type='decitala', rep_type='ratio')\n", (706, 746), False, 'from decitala.trees import FragmentTree\n'), ((830, 902), 'decitala.trees.FragmentTree.from_frag_type', 'FragmentTree.from_frag_type', ([], {'frag_type': '"""decitala"""', 'rep_type': '"""difference"""'}), "(frag_type='decitala', rep_type='difference')\n", (857, 902), False, 'from decitala.trees import FragmentTree\n'), ((982, 1051), 'decitala.trees.FragmentTree.from_frag_type', 'FragmentTree.from_frag_type', ([], {'frag_type': '"""greek_foot"""', 'rep_type': '"""ratio"""'}), "(frag_type='greek_foot', rep_type='ratio')\n", (1009, 1051), False, 'from decitala.trees import FragmentTree\n'), ((1136, 1210), 'decitala.trees.FragmentTree.from_frag_type', 'FragmentTree.from_frag_type', ([], {'frag_type': '"""greek_foot"""', 'rep_type': '"""difference"""'}), "(frag_type='greek_foot', rep_type='difference')\n", (1163, 1210), False, 'from decitala.trees import FragmentTree\n'), ((1289, 1329), 'decitala.fragment.GeneralFragment', 'GeneralFragment', ([], {'data': '[0.25, 0.25, 0.25]'}), '(data=[0.25, 0.25, 0.25])\n', (1304, 1329), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1340, 1377), 'decitala.fragment.GeneralFragment', 'GeneralFragment', ([], {'data': '[0.5, 1.0, 0.5]'}), '(data=[0.5, 1.0, 0.5])\n', (1355, 1377), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1388, 1430), 'decitala.fragment.GeneralFragment', 'GeneralFragment', ([], {'data': '[1.0, 1.0, 2.0, 3.0]'}), '(data=[1.0, 1.0, 2.0, 3.0])\n', (1403, 1430), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1478, 1524), 'decitala.trees.FragmentTree', 'FragmentTree', ([], {'data': 'fragments', 'rep_type': '"""ratio"""'}), "(data=fragments, rep_type='ratio')\n", (1490, 1524), False, 'from decitala.trees import FragmentTree\n'), ((1577, 1621), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[3.0, 1.5, 1.5, 0.75, 0.75]'], {}), '([3.0, 1.5, 1.5, 0.75, 0.75])\n', (1592, 1621), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1628, 1655), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[1.5, 1.0]'], {}), '([1.5, 1.0])\n', (1643, 1655), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1662, 1696), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[0.75, 0.5, 0.75]'], {}), '([0.75, 0.5, 0.75])\n', (1677, 1696), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1703, 1737), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[0.25, 0.25, 0.5]'], {}), '([0.25, 0.25, 0.5])\n', (1718, 1737), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1744, 1772), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[0.75, 0.5]'], {}), '([0.75, 0.5])\n', (1759, 1772), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1779, 1816), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[0.5, 1.0, 2.0, 4.0]'], {}), '([0.5, 1.0, 2.0, 4.0])\n', (1794, 1816), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1823, 1855), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[1.5, 1.0, 1.5]'], {}), '([1.5, 1.0, 1.5])\n', (1838, 1855), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1862, 1894), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (1877, 1894), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1901, 1945), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[1.0, 0.5, 0.5, 0.25, 0.25]'], {}), '([1.0, 0.5, 0.5, 0.25, 0.25])\n', (1916, 1945), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((1953, 1991), 'decitala.fragment.GeneralFragment', 'GeneralFragment', (['[0.25, 0.5, 1.0, 2.0]'], {}), '([0.25, 0.5, 1.0, 2.0])\n', (1968, 1991), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((3007, 3032), 'os.listdir', 'os.listdir', (['decitala_path'], {}), '(decitala_path)\n', (3017, 3032), False, 'import os\n'), ((3824, 3846), 'os.listdir', 'os.listdir', (['greek_path'], {}), '(greek_path)\n', (3834, 3846), False, 'import os\n'), ((4802, 4894), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['laya'], {'ratio_tree': 'tala_ratio_tree', 'difference_tree': 'tala_difference_tree'}), '(laya, ratio_tree=tala_ratio_tree, difference_tree=\n tala_difference_tree)\n', (4817, 4894), False, 'from decitala.search import get_by_ql_array\n'), ((4907, 5001), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['bhagna'], {'ratio_tree': 'tala_ratio_tree', 'difference_tree': 'tala_difference_tree'}), '(bhagna, ratio_tree=tala_ratio_tree, difference_tree=\n tala_difference_tree)\n', (4922, 5001), False, 'from decitala.search import get_by_ql_array\n'), ((5016, 5112), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['niccanka'], {'ratio_tree': 'tala_ratio_tree', 'difference_tree': 'tala_difference_tree'}), '(niccanka, ratio_tree=tala_ratio_tree, difference_tree=\n tala_difference_tree)\n', (5031, 5112), False, 'from decitala.search import get_by_ql_array\n'), ((5306, 5347), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.5, 0.75, 0.5]'], {}), '([1.0, 1.0, 1.0, 0.5, 0.75, 0.5])\n', (5314, 5347), True, 'import numpy as np\n'), ((5360, 5470), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['varied_ragavardhana'], {'ratio_tree': 'tala_ratio_tree', 'allowed_modifications': "['r', 'sr', 'rsr']"}), "(varied_ragavardhana, ratio_tree=tala_ratio_tree,\n allowed_modifications=['r', 'sr', 'rsr'])\n", (5375, 5470), False, 'from decitala.search import get_by_ql_array\n'), ((5594, 5662), 'decitala.trees.FragmentTree.from_frag_type', 'FragmentTree.from_frag_type', ([], {'frag_type': '"""greek_foot"""', 'rep_type': '"""dseg"""'}), "(frag_type='greek_foot', rep_type='dseg')\n", (5621, 5662), False, 'from decitala.trees import FragmentTree\n'), ((3045, 3093), 'music21.converter.parse', 'converter.parse', (["(decitala_path + '/' + this_file)"], {}), "(decitala_path + '/' + this_file)\n", (3060, 3093), False, 'from music21 import converter\n'), ((3859, 3904), 'music21.converter.parse', 'converter.parse', (["(greek_path + '/' + this_file)"], {}), "(greek_path + '/' + this_file)\n", (3874, 3904), False, 'from music21 import converter\n'), ((5782, 5799), 'decitala.fragment.GreekFoot', 'GreekFoot', (['"""Iamb"""'], {}), "('Iamb')\n", (5791, 5799), False, 'from decitala.fragment import GeneralFragment, GreekFoot\n'), ((2195, 2221), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2206, 2221), False, 'from music21 import chord\n'), ((2239, 2265), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2250, 2265), False, 'from music21 import chord\n'), ((2284, 2310), 'music21.chord.Chord', 'chord.Chord', (["['E-3', 'D4']"], {}), "(['E-3', 'D4'])\n", (2295, 2310), False, 'from music21 import chord\n'), ((2329, 2356), 'music21.chord.Chord', 'chord.Chord', (["['A2', 'A-3>']"], {}), "(['A2', 'A-3>'])\n", (2340, 2356), False, 'from music21 import chord\n'), ((2388, 2414), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2399, 2414), False, 'from music21 import chord\n'), ((2433, 2459), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2444, 2459), False, 'from music21 import chord\n'), ((2478, 2504), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2489, 2504), False, 'from music21 import chord\n'), ((2522, 2548), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2533, 2548), False, 'from music21 import chord\n'), ((2578, 2604), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2589, 2604), False, 'from music21 import chord\n'), ((2623, 2649), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2634, 2649), False, 'from music21 import chord\n'), ((2667, 2693), 'music21.chord.Chord', 'chord.Chord', (["['F#2', 'F3']"], {}), "(['F#2', 'F3'])\n", (2678, 2693), False, 'from music21 import chord\n'), ((2711, 2737), 'music21.chord.Chord', 'chord.Chord', (["['E-3', 'D4']"], {}), "(['E-3', 'D4'])\n", (2722, 2737), False, 'from music21 import chord\n'), ((2756, 2782), 'music21.chord.Chord', 'chord.Chord', (["['A2', 'A-3']"], {}), "(['A2', 'A-3'])\n", (2767, 2782), False, 'from music21 import chord\n'), ((3255, 3342), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['ql_array'], {'ratio_tree': 'tala_ratio_tree', 'allowed_modifications': "['r']"}), "(ql_array, ratio_tree=tala_ratio_tree, allowed_modifications\n =['r'])\n", (3270, 3342), False, 'from decitala.search import get_by_ql_array\n'), ((3382, 3478), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['ql_array'], {'difference_tree': 'tala_difference_tree', 'allowed_modifications': "['d']"}), "(ql_array, difference_tree=tala_difference_tree,\n allowed_modifications=['d'])\n", (3397, 3478), False, 'from decitala.search import get_by_ql_array\n'), ((4066, 4153), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['ql_array'], {'ratio_tree': 'greek_ratio_tree', 'allowed_modifications': "['r']"}), "(ql_array, ratio_tree=greek_ratio_tree,\n allowed_modifications=['r'])\n", (4081, 4153), False, 'from decitala.search import get_by_ql_array\n'), ((4194, 4291), 'decitala.search.get_by_ql_array', 'get_by_ql_array', (['ql_array'], {'difference_tree': 'greek_difference_tree', 'allowed_modifications': "['d']"}), "(ql_array, difference_tree=greek_difference_tree,\n allowed_modifications=['d'])\n", (4209, 4291), False, 'from decitala.search import get_by_ql_array\n')] |
import numpy as np
from perform.constants import REAL_TYPE
from perform.flux.invisc_flux.invisc_flux import InviscFlux
class RoeInviscFlux(InviscFlux):
"""Class implementing flux methods for Roe's flux difference scheme.
Inherits from InviscFlux. Provides member functions for computing the numerical flux at each face and
its Jacobian with respect to the primitive and conservative state.
Please refer to Roe (1981) for details on the Roe scheme.
Args:
sol_domain: SolutionDomain with which this Flux is associated.
"""
def __init__(self, sol_domain):
super().__init__()
def calc_avg_state(self, sol_left, sol_right, sol_ave):
"""Computes average state at cell faces.
Computes the special Roe average, first by computing the Roe average density and stagnation enthalpy, then
adjusting the primitive state iteratively to be consistent.
Args:
sol_left: SolutionPhys representing the solution on the left side of cell faces.
sol_right: SolutionPhys representing the solution on the right side of cell faces.
sol_ave: SolutionPhys where the face average state will be stored.
"""
# Useful factors
sqrhol = np.sqrt(sol_left.sol_cons[0, :])
sqrhor = np.sqrt(sol_right.sol_cons[0, :])
fac = sqrhol / (sqrhol + sqrhor)
fac1 = 1.0 - fac
# Roe average stagnation enthalpy and density
sol_ave.h0 = fac * sol_left.h0 + fac1 * sol_right.h0
sol_ave.sol_cons[0, :] = sqrhol * sqrhor
# First guess at Roe average primitive state
sol_ave.sol_prim = fac[None, :] * sol_left.sol_prim + fac1[None, :] * sol_right.sol_prim
sol_ave.mass_fracs_full = sol_ave.gas_model.calc_all_mass_fracs(sol_ave.sol_prim[3:, :], threshold=True)
# Adjust primitive state iteratively to conform to Roe average density and enthalpy, update state
sol_ave.calc_state_from_rho_h0()
def calc_flux(self, sol_domain):
"""Compute numerical inviscid flux vector.
Args:
sol_domain: SolutionDomain with which this Flux is associated.
Returns:
NumPy array of numerical inviscid flux for every governing equation at each finite volume face.
"""
# TODO: entropy fix
sol_left = sol_domain.sol_left
sol_right = sol_domain.sol_right
# Compute inviscid flux vectors of left and right state
flux_left = self.calc_inv_flux(sol_left.sol_cons, sol_left.sol_prim, sol_left.h0)
flux_right = self.calc_inv_flux(sol_right.sol_cons, sol_right.sol_prim, sol_right.h0)
# Dissipation term
d_sol_prim = sol_left.sol_prim - sol_right.sol_prim
sol_domain.roe_diss = self.calc_roe_diss(sol_domain.sol_ave)
diss_term = 0.5 * (sol_domain.roe_diss * np.expand_dims(d_sol_prim, 0)).sum(-2)
# Complete Roe flux
flux = 0.5 * (flux_left + flux_right) + diss_term
return flux
def calc_roe_diss(self, sol_ave):
"""Compute dissipation term of Roe flux.
The derivation of this term is provided in the solver theory documentation.
Args:
sol_ave: SolutionPhys of the Roe average state at each finite volume face.
Returns:
3D NumPy array of the Roe dissipation matrix.
"""
gas = sol_ave.gas_model
diss_matrix = np.zeros((gas.num_eqs, gas.num_eqs, sol_ave.num_cells), dtype=REAL_TYPE)
# For clarity
rho = sol_ave.sol_cons[0, :]
vel = sol_ave.sol_prim[1, :]
mass_fracs = sol_ave.sol_prim[3:, :]
h0 = sol_ave.h0
c = sol_ave.c
# Derivatives of density and enthalpy
sol_ave.update_density_enthalpy_derivs()
d_rho_d_press = sol_ave.d_rho_d_press
d_rho_d_temp = sol_ave.d_rho_d_temp
d_rho_d_mass_frac = sol_ave.d_rho_d_mass_frac
d_enth_d_press = sol_ave.d_enth_d_press
d_enth_d_temp = sol_ave.d_enth_d_temp
d_enth_d_mass_frac = sol_ave.d_enth_d_mass_frac
# Gamma terms for energy equation
g_press = rho * d_enth_d_press + d_rho_d_press * h0 - 1.0
g_temp = rho * d_enth_d_temp + d_rho_d_temp * h0
g_mass_frac = rho[None, :] * d_enth_d_mass_frac + h0[None, :] * d_rho_d_mass_frac
# Characteristic speeds
lambda1 = vel + c
lambda2 = vel - c
lambda1_abs = np.absolute(lambda1)
lambda2_abs = np.absolute(lambda2)
r_roe = (lambda2_abs - lambda1_abs) / (lambda2 - lambda1)
alpha = c * (lambda1_abs + lambda2_abs) / (lambda1 - lambda2)
beta = np.power(c, 2.0) * (lambda1_abs - lambda2_abs) / (lambda1 - lambda2)
phi = c * (lambda1_abs + lambda2_abs) / (lambda1 - lambda2)
eta = (1.0 - rho * d_enth_d_press) / d_enth_d_temp
psi = eta * d_rho_d_temp + rho * d_rho_d_press
vel_abs = np.absolute(vel)
beta_star = beta * psi
beta_e = beta * (rho * g_press + g_temp * eta)
phi_star = d_rho_d_press * phi + d_rho_d_temp * eta * (phi - vel_abs) / rho
phi_e = g_press * phi + g_temp * eta * (phi - vel_abs) / rho
m = rho * alpha
e = rho * vel * alpha
# Continuity equation row
diss_matrix[0, 0, :] = phi_star
diss_matrix[0, 1, :] = beta_star
diss_matrix[0, 2, :] = vel_abs * d_rho_d_temp
diss_matrix[0, 3:, :] = vel_abs[None, :] * d_rho_d_mass_frac
# Momentum equation row
diss_matrix[1, 0, :] = vel * phi_star + r_roe
diss_matrix[1, 1, :] = vel * beta_star + m
diss_matrix[1, 2, :] = vel * vel_abs * d_rho_d_temp
diss_matrix[1, 3:, :] = (vel * vel_abs)[None, :] * d_rho_d_mass_frac
# Energy equation row
diss_matrix[2, 0, :] = phi_e + r_roe * vel
diss_matrix[2, 1, :] = beta_e + e
diss_matrix[2, 2, :] = g_temp * vel_abs
diss_matrix[2, 3:, :] = g_mass_frac * vel_abs[None, :]
# Species transport row
diss_matrix[3:, 0, :] = mass_fracs * phi_star[None, :]
diss_matrix[3:, 1, :] = mass_fracs * beta_star[None, :]
diss_matrix[3:, 2, :] = mass_fracs * (vel_abs * d_rho_d_temp)[None, :]
# TODO: vectorize
for mf_idx_out in range(3, gas.num_eqs):
for mf_idx_in in range(3, gas.num_eqs):
# TODO: check this again against GEMS,
# something weird going on
if mf_idx_out == mf_idx_in:
diss_matrix[mf_idx_out, mf_idx_in, :] = vel_abs * (
rho + mass_fracs[mf_idx_out - 3, :] * d_rho_d_mass_frac[mf_idx_in - 3, :]
)
else:
diss_matrix[mf_idx_out, mf_idx_in, :] = (
vel_abs * mass_fracs[mf_idx_out - 3, :] * d_rho_d_mass_frac[mf_idx_in - 3, :]
)
return diss_matrix
def calc_jacob(self, sol_domain, wrt_prim):
"""Compute numerical inviscid flux Jacobian.
Calculates flux Jacobian at each face and assembles Jacobian with respect to each
finite volume cell's state. Note that the gradient with respect to boundary ghost cell states are
excluded, as the Newton iteration linear solve does not need this.
Args:
sol_domain: SolutionDomain with which this Flux is associated.
wrt_prim:
Boolean flag. If True, calculate Jacobian w/r/t the primitive variables.
If False, calculate w/r/t conservative variables.
Returns:
jacob_center_cell: center block diagonal of flux Jacobian, representing the gradient of a given cell's
viscous flux contribution with respect to its own state.
jacob_left_cell: lower block diagonal of flux Jacobian, representing the gradient of a given cell's
viscous flux contribution with respect to its left neighbor's state.
jacob_left_cell: upper block diagonal of flux Jacobian, representing the gradient of a given cell's
viscous flux contribution with respect to its right neighbor's state.
"""
roe_diss = sol_domain.roe_diss
center_samp = sol_domain.flux_rhs_idxs
left_samp = sol_domain.jacob_left_samp
right_samp = sol_domain.jacob_right_samp
# Jacobian of inviscid flux vector at left and right face reconstruction
jacob_face_left = self.calc_d_inv_flux_d_sol_prim(sol_domain.sol_left)
jacob_face_right = self.calc_d_inv_flux_d_sol_prim(sol_domain.sol_right)
# TODO: when conservative Jacobian is implemented, uncomment this
# if wrt_prim:
# jacob_face_left = self.calc_d_inv_flux_d_sol_prim(sol_domain.sol_left)
# jacob_face_right = self.calc_d_inv_flux_d_sol_prim(sol_domain.sol_right)
# else:
# raise ValueError("Roe disspation issue not addressed yet")
# jacob_face_left = self.calc_d_inv_flux_d_sol_cons(sol_domain.sol_left)
# jacob_face_right = self.calc_d_inv_flux_d_sol_cons(sol_domain.sol_right)
# center, lower, and upper block diagonal of full numerical flux Jacobian
jacob_center_cell = (jacob_face_left[:, :, center_samp + 1] + roe_diss[:, :, center_samp + 1]) + (
-jacob_face_right[:, :, center_samp] + roe_diss[:, :, center_samp]
)
jacob_left_cell = -jacob_face_left[:, :, left_samp] - roe_diss[:, :, left_samp]
jacob_right_cell = jacob_face_right[:, :, right_samp] - roe_diss[:, :, right_samp]
jacob_center_cell *= 0.5
jacob_left_cell *= 0.5
jacob_right_cell *= 0.5
return jacob_center_cell, jacob_left_cell, jacob_right_cell
| [
"numpy.absolute",
"numpy.power",
"numpy.zeros",
"numpy.expand_dims",
"numpy.sqrt"
] | [((1255, 1287), 'numpy.sqrt', 'np.sqrt', (['sol_left.sol_cons[0, :]'], {}), '(sol_left.sol_cons[0, :])\n', (1262, 1287), True, 'import numpy as np\n'), ((1305, 1338), 'numpy.sqrt', 'np.sqrt', (['sol_right.sol_cons[0, :]'], {}), '(sol_right.sol_cons[0, :])\n', (1312, 1338), True, 'import numpy as np\n'), ((3430, 3502), 'numpy.zeros', 'np.zeros', (['(gas.num_eqs, gas.num_eqs, sol_ave.num_cells)'], {'dtype': 'REAL_TYPE'}), '((gas.num_eqs, gas.num_eqs, sol_ave.num_cells), dtype=REAL_TYPE)\n', (3438, 3502), True, 'import numpy as np\n'), ((4444, 4464), 'numpy.absolute', 'np.absolute', (['lambda1'], {}), '(lambda1)\n', (4455, 4464), True, 'import numpy as np\n'), ((4487, 4507), 'numpy.absolute', 'np.absolute', (['lambda2'], {}), '(lambda2)\n', (4498, 4507), True, 'import numpy as np\n'), ((4931, 4947), 'numpy.absolute', 'np.absolute', (['vel'], {}), '(vel)\n', (4942, 4947), True, 'import numpy as np\n'), ((4660, 4676), 'numpy.power', 'np.power', (['c', '(2.0)'], {}), '(c, 2.0)\n', (4668, 4676), True, 'import numpy as np\n'), ((2864, 2893), 'numpy.expand_dims', 'np.expand_dims', (['d_sol_prim', '(0)'], {}), '(d_sol_prim, 0)\n', (2878, 2893), True, 'import numpy as np\n')] |
import numpy as np
data_path = "data/problem_13.txt"
# data_path = "data/problem_13_test.txt"
initial_state = []
folds = []
with open(data_path, "r") as f:
for line in f:
line = line.rstrip()
if line:
if line[0] == "f":
direction, value = line.rsplit(" ", 1)[-1].split("=")
folds.append((int(direction == "x"), int(value)))
else:
dot_x, dot_y = line.split(",")
initial_state.append((int(dot_y), int(dot_x)))
initial_state = np.array(initial_state)
# print(initial_state)
# print(folds)
def print_grid(state):
dot_grid = np.zeros((state[:, 0].max() + 1, state[:, 1].max() + 1), dtype=int)
dot_grid[state[:, 0], state[:, 1]] = 1
for row in dot_grid:
print("".join([("#" if x == 1 else " ") for x in row]))
def compute_fold(state, axis, crease_value):
is_reflected = state[:, axis] > crease_value
new_state = state.copy()
new_state[is_reflected, axis] = (2 * crease_value) - state[is_reflected, axis]
return np.unique(new_state, axis=0)
# part 1
current_state = compute_fold(initial_state, *folds[0])
print(f"Part 1 solution: {len(current_state)}")
# part 2
current_state = initial_state
for fold in folds:
current_state = compute_fold(current_state, *fold)
print("Part 2 solution:")
print_grid(current_state)
| [
"numpy.array",
"numpy.unique"
] | [((536, 559), 'numpy.array', 'np.array', (['initial_state'], {}), '(initial_state)\n', (544, 559), True, 'import numpy as np\n'), ((1055, 1083), 'numpy.unique', 'np.unique', (['new_state'], {'axis': '(0)'}), '(new_state, axis=0)\n', (1064, 1083), True, 'import numpy as np\n')] |
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Basic/mesh_subdivision.py
import numpy as np
import open3d as o3d
import meshes
def mesh_generator():
yield meshes.triangle()
yield meshes.plane()
yield o3d.geometry.TriangleMesh.create_tetrahedron()
yield o3d.geometry.TriangleMesh.create_box()
yield o3d.geometry.TriangleMesh.create_octahedron()
yield o3d.geometry.TriangleMesh.create_icosahedron()
yield o3d.geometry.TriangleMesh.create_sphere()
yield o3d.geometry.TriangleMesh.create_cone()
yield o3d.geometry.TriangleMesh.create_cylinder()
yield meshes.knot()
yield meshes.bathtub()
if __name__ == "__main__":
np.random.seed(42)
number_of_iterations = 3
for mesh in mesh_generator():
mesh.compute_vertex_normals()
n_verts = np.asarray(mesh.vertices).shape[0]
colors = np.random.uniform(0, 1, size=(n_verts, 3))
mesh.vertex_colors = o3d.utility.Vector3dVector(colors)
print("original mesh has %d triangles and %d vertices" % (np.asarray(
mesh.triangles).shape[0], np.asarray(mesh.vertices).shape[0]))
o3d.visualization.draw_geometries([mesh])
mesh_up = mesh.subdivide_midpoint(
number_of_iterations=number_of_iterations)
print("midpoint upsampled mesh has %d triangles and %d vertices" %
(np.asarray(mesh_up.triangles).shape[0],
np.asarray(mesh_up.vertices).shape[0]))
o3d.visualization.draw_geometries([mesh_up])
mesh_up = mesh.subdivide_loop(number_of_iterations=number_of_iterations)
print("loop upsampled mesh has %d triangles and %d vertices" %
(np.asarray(mesh_up.triangles).shape[0],
np.asarray(mesh_up.vertices).shape[0]))
o3d.visualization.draw_geometries([mesh_up])
| [
"open3d.geometry.TriangleMesh.create_sphere",
"numpy.random.uniform",
"open3d.geometry.TriangleMesh.create_box",
"numpy.random.seed",
"meshes.plane",
"numpy.asarray",
"open3d.geometry.TriangleMesh.create_octahedron",
"open3d.geometry.TriangleMesh.create_icosahedron",
"meshes.triangle",
"open3d.geo... | [((741, 759), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (755, 759), True, 'import numpy as np\n'), ((239, 256), 'meshes.triangle', 'meshes.triangle', ([], {}), '()\n', (254, 256), False, 'import meshes\n'), ((267, 281), 'meshes.plane', 'meshes.plane', ([], {}), '()\n', (279, 281), False, 'import meshes\n'), ((292, 338), 'open3d.geometry.TriangleMesh.create_tetrahedron', 'o3d.geometry.TriangleMesh.create_tetrahedron', ([], {}), '()\n', (336, 338), True, 'import open3d as o3d\n'), ((349, 387), 'open3d.geometry.TriangleMesh.create_box', 'o3d.geometry.TriangleMesh.create_box', ([], {}), '()\n', (385, 387), True, 'import open3d as o3d\n'), ((398, 443), 'open3d.geometry.TriangleMesh.create_octahedron', 'o3d.geometry.TriangleMesh.create_octahedron', ([], {}), '()\n', (441, 443), True, 'import open3d as o3d\n'), ((454, 500), 'open3d.geometry.TriangleMesh.create_icosahedron', 'o3d.geometry.TriangleMesh.create_icosahedron', ([], {}), '()\n', (498, 500), True, 'import open3d as o3d\n'), ((511, 552), 'open3d.geometry.TriangleMesh.create_sphere', 'o3d.geometry.TriangleMesh.create_sphere', ([], {}), '()\n', (550, 552), True, 'import open3d as o3d\n'), ((563, 602), 'open3d.geometry.TriangleMesh.create_cone', 'o3d.geometry.TriangleMesh.create_cone', ([], {}), '()\n', (600, 602), True, 'import open3d as o3d\n'), ((613, 656), 'open3d.geometry.TriangleMesh.create_cylinder', 'o3d.geometry.TriangleMesh.create_cylinder', ([], {}), '()\n', (654, 656), True, 'import open3d as o3d\n'), ((667, 680), 'meshes.knot', 'meshes.knot', ([], {}), '()\n', (678, 680), False, 'import meshes\n'), ((691, 707), 'meshes.bathtub', 'meshes.bathtub', ([], {}), '()\n', (705, 707), False, 'import meshes\n'), ((933, 975), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(n_verts, 3)'}), '(0, 1, size=(n_verts, 3))\n', (950, 975), True, 'import numpy as np\n'), ((1005, 1039), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (1031, 1039), True, 'import open3d as o3d\n'), ((1202, 1243), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[mesh]'], {}), '([mesh])\n', (1235, 1243), True, 'import open3d as o3d\n'), ((1536, 1580), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[mesh_up]'], {}), '([mesh_up])\n', (1569, 1580), True, 'import open3d as o3d\n'), ((1852, 1896), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[mesh_up]'], {}), '([mesh_up])\n', (1885, 1896), True, 'import open3d as o3d\n'), ((881, 906), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (891, 906), True, 'import numpy as np\n'), ((1107, 1133), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (1117, 1133), True, 'import numpy as np\n'), ((1157, 1182), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (1167, 1182), True, 'import numpy as np\n'), ((1433, 1462), 'numpy.asarray', 'np.asarray', (['mesh_up.triangles'], {}), '(mesh_up.triangles)\n', (1443, 1462), True, 'import numpy as np\n'), ((1488, 1516), 'numpy.asarray', 'np.asarray', (['mesh_up.vertices'], {}), '(mesh_up.vertices)\n', (1498, 1516), True, 'import numpy as np\n'), ((1749, 1778), 'numpy.asarray', 'np.asarray', (['mesh_up.triangles'], {}), '(mesh_up.triangles)\n', (1759, 1778), True, 'import numpy as np\n'), ((1804, 1832), 'numpy.asarray', 'np.asarray', (['mesh_up.vertices'], {}), '(mesh_up.vertices)\n', (1814, 1832), True, 'import numpy as np\n')] |
# nuScenes dev-kit. Version 0.1
# Code written by <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
from __future__ import annotations
from typing import Tuple
import math
from enum import IntEnum
import numpy as np
from pyquaternion import Quaternion
class BoxVisibility(IntEnum):
""" Enumerates the various level of box visibility in an image """
ALL = 0 # Requires all corners are inside the image.
ANY = 1 # Requires at least one corner visible in the image.
NONE = 2 # Requires no corners to be inside, i.e. box can be fully outside the image.
def quaternion_slerp(q0: np.ndarray, q1: np.ndarray, fraction: float) -> np.ndarray:
"""
Does interpolation between two quaternions. This code is modified from
https://www.lfd.uci.edu/~gohlke/code/transformations.py.html
:param q0: <np.array: 4>. First quaternion.
:param q1: <np.array: 4>. Second quaternion.
:param fraction: Interpolation fraction between 0 and 1.
:return: <np.array: 4>. Interpolated quaternion.
"""
eps = np.finfo(float).eps * 4.0
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < eps:
return q0
if d < 0.0:
# invert rotation
d = -d
np.negative(q1, q1)
angle = math.acos(d)
if abs(angle) < eps:
return q0
is_in = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * is_in
q1 *= math.sin(fraction * angle) * is_in
q0 += q1
return q0
def view_points(points: np.ndarray, view: np.ndarray, normalize: bool) -> np.ndarray:
"""
This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and
orthographic projections. It first applies the dot product between the points and the view. By convention,
the view should be such that the data is projected onto the first 2 axis. It then optionally applies a
normalization along the third dimension.
For a perspective projection the view should be a 3x3 camera matrix, and normalize=True
For an orthographic projection with translation the view is a 3x4 matrix and normalize=False
For an orthographic projection without translation the view is a 3x3 matrix (optionally 3x4 with last columns
all zeros) and normalize=False
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).
The projection should be such that the corners are projected onto the first 2 axis.
:param normalize: Whether to normalize the remaining coordinate (along the third axis).
:return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[0] == 3
viewpad = np.eye(4)
viewpad[:view.shape[0], :view.shape[1]] = view
nbr_points = points.shape[1]
# Do operation in homogenous coordinates
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
return points
def box_in_image(box, intrinsic: np.ndarray, imsize: Tuple[int], vis_level: int=BoxVisibility.ANY) -> bool:
"""
Check if a box is visible inside an image without accounting for occlusions.
:param box: The box to be checked.
:param intrinsic: <float: 3, 3>. Intrinsic camera matrix.
:param imsize: (width <int>, height <int>).
:param vis_level: One of the enumerations of <BoxVisibility>.
:return True if visibility condition is satisfied.
"""
corners_3d = box.corners()
corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :]
visible = np.logical_and(corners_img[0, :] > 0, corners_img[0, :] < imsize[0])
visible = np.logical_and(visible, corners_img[1, :] < imsize[1])
visible = np.logical_and(visible, corners_img[1, :] > 0)
visible = np.logical_and(visible, corners_3d[2, :] > 1)
in_front = corners_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera.
if vis_level == BoxVisibility.ALL:
return all(visible) and all(in_front)
elif vis_level == BoxVisibility.ANY:
return any(visible) and all(in_front)
elif vis_level == BoxVisibility.NONE:
return True
else:
raise ValueError("vis_level: {} not valid".format(vis_level))
def transform_matrix(translation: np.ndarray=np.array([0, 0, 0]), rotation: Quaternion=Quaternion([1, 0, 0, 0]),
inverse: bool=False) -> np.ndarray:
"""
Convert pose to transformation matrix.
:param translation: <np.float32: 3>. Translation in x, y, z.
:param rotation: Rotation in quaternions (w ri rj rk).
:param inverse: Whether to compute inverse transform matrix.
:return: <np.float32: 4, 4>. Transformation matrix.
"""
tm = np.eye(4)
if inverse:
rot_inv = rotation.rotation_matrix.T
trans = np.transpose(-np.array(translation))
tm[:3, :3] = rot_inv
tm[:3, 3] = rot_inv.dot(trans)
else:
tm[:3, :3] = rotation.rotation_matrix
tm[:3, 3] = np.transpose(np.array(translation))
return tm | [
"numpy.eye",
"numpy.logical_and",
"numpy.negative",
"math.sin",
"numpy.ones",
"math.acos",
"numpy.finfo",
"pyquaternion.Quaternion",
"numpy.array",
"numpy.dot"
] | [((1177, 1191), 'numpy.dot', 'np.dot', (['q0', 'q1'], {}), '(q0, q1)\n', (1183, 1191), True, 'import numpy as np\n'), ((1339, 1351), 'math.acos', 'math.acos', (['d'], {}), '(d)\n', (1348, 1351), False, 'import math\n'), ((2952, 2961), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2958, 2961), True, 'import numpy as np\n'), ((3170, 3193), 'numpy.dot', 'np.dot', (['viewpad', 'points'], {}), '(viewpad, points)\n', (3176, 3193), True, 'import numpy as np\n'), ((3936, 4004), 'numpy.logical_and', 'np.logical_and', (['(corners_img[0, :] > 0)', '(corners_img[0, :] < imsize[0])'], {}), '(corners_img[0, :] > 0, corners_img[0, :] < imsize[0])\n', (3950, 4004), True, 'import numpy as np\n'), ((4019, 4073), 'numpy.logical_and', 'np.logical_and', (['visible', '(corners_img[1, :] < imsize[1])'], {}), '(visible, corners_img[1, :] < imsize[1])\n', (4033, 4073), True, 'import numpy as np\n'), ((4088, 4134), 'numpy.logical_and', 'np.logical_and', (['visible', '(corners_img[1, :] > 0)'], {}), '(visible, corners_img[1, :] > 0)\n', (4102, 4134), True, 'import numpy as np\n'), ((4149, 4194), 'numpy.logical_and', 'np.logical_and', (['visible', '(corners_3d[2, :] > 1)'], {}), '(visible, corners_3d[2, :] > 1)\n', (4163, 4194), True, 'import numpy as np\n'), ((4662, 4681), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4670, 4681), True, 'import numpy as np\n'), ((4704, 4728), 'pyquaternion.Quaternion', 'Quaternion', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (4714, 4728), False, 'from pyquaternion import Quaternion\n'), ((5100, 5109), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5106, 5109), True, 'import numpy as np\n'), ((1307, 1326), 'numpy.negative', 'np.negative', (['q1', 'q1'], {}), '(q1, q1)\n', (1318, 1326), True, 'import numpy as np\n'), ((1413, 1428), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1421, 1428), False, 'import math\n'), ((1439, 1473), 'math.sin', 'math.sin', (['((1.0 - fraction) * angle)'], {}), '((1.0 - fraction) * angle)\n', (1447, 1473), False, 'import math\n'), ((1492, 1518), 'math.sin', 'math.sin', (['(fraction * angle)'], {}), '(fraction * angle)\n', (1500, 1518), False, 'import math\n'), ((1057, 1072), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1065, 1072), True, 'import numpy as np\n'), ((3130, 3154), 'numpy.ones', 'np.ones', (['(1, nbr_points)'], {}), '((1, nbr_points))\n', (3137, 3154), True, 'import numpy as np\n'), ((5382, 5403), 'numpy.array', 'np.array', (['translation'], {}), '(translation)\n', (5390, 5403), True, 'import numpy as np\n'), ((5202, 5223), 'numpy.array', 'np.array', (['translation'], {}), '(translation)\n', (5210, 5223), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
import os
from shutil import copy
from pathlib import Path
#reading renewable timeseries
Offshore_ISO_2012 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2018')
Offshore_ISO_2030 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2030')
Offshore_ISO_2040 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2040')
Offshore_NREL_2012 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2018')
Offshore_NREL_2030 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2030')
Offshore_NREL_2040 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2040')
Onshore_ISO_2012 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2018')
Onshore_ISO_2030 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2030')
Onshore_ISO_2040 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2040')
Onshore_NREL_2012 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2018')
Onshore_NREL_2030 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2030')
Onshore_NREL_2040 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2040')
Solar_ISO_2012 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2018')
Solar_ISO_2030 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2030')
Solar_ISO_2040 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx',header=0,sheet_name='All Zones Time Series - 2040')
Solar_NREL_2012 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2018')
Solar_NREL_2030 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2030')
Solar_NREL_2040 = pd.read_excel('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx',header=0,sheet_name='All Zones Time Series - 2040')
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('ISONE_data_file/paths.csv',header=0)
#list zones
zones = ['CT', 'ME', 'NH', 'NEMA', 'RI', 'SEMA', 'VT', 'WCMA']
#time series of load for each zone
df_load_all = pd.read_excel('Demand/Hourly_demand.xlsx',header=0)
#daily hydropower availability
df_hydro = pd.read_csv('Hydropower/ISONE_dispatchable_hydro.csv',header=0)
#natural gas prices
df_ng_all = pd.read_csv('Fuel_prices/NG_prices.csv', header=0)
#oil prices
df_oil_all = pd.read_csv('Fuel_prices/Oil_prices.csv', header=0)
#daily time series of dispatchable imports by path
df_imports = pd.read_csv('Interchange/ISONE_dispatchable_imports.csv',header=0)
#hourly time series of exports by zone
df_exports = pd.read_csv('Interchange/ISONE_exports.csv',header=0)
def setup(selected_scenario):
#reading generator file
df_gen = pd.read_excel('ISONE_data_file/Scenarios/Generator_files/generators-{}.xlsx'.format(selected_scenario),header=0,sheet_name='Generators (dispatch)')
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_excel('ISONE_data_file/Scenarios/Generator_files/generators-{}.xlsx'.format(selected_scenario),header=0,sheet_name='Generators (must run)')
mustrun_each_zone = np.zeros((1,len(zones)))
for zone in zones:
zonal_mustrun = df_must.loc[df_must['State']==zone]['Product'].sum()
mustrun_each_zone[0,zones.index(zone)] = zonal_mustrun
hourly_mustrun = np.repeat(mustrun_each_zone, 384, axis=0)
df_total_must_run = pd.DataFrame(hourly_mustrun,columns=zones)
#saving relevant renewable time series
if selected_scenario == '2012-base':
df_solar_data = Solar_NREL_2012.copy()
df_onshore_data = Onshore_NREL_2012.copy()
df_offshore_data = Offshore_NREL_2012.copy()
elif selected_scenario == '2030-Offshore-2x':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
df_offshore_data = df_offshore_data*2
elif selected_scenario == '2030-Offshore-3x':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
df_offshore_data = df_offshore_data*3
elif selected_scenario == '2030-Offshore-4x':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
df_offshore_data = df_offshore_data*4
elif selected_scenario == '2030-Offshore-ReplaceISO':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_ISO_2030.copy()
elif selected_scenario == '2030-Onshore-0.5x':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_onshore_data = df_onshore_data*0.5
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-Onshore-2x':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_onshore_data = df_onshore_data*2
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-Onshore-3x':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_onshore_data = df_onshore_data*3
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-Onshore-ReplaceISO':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_ISO_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-PV-2x':
df_solar_data = Solar_NREL_2030.copy()
df_solar_data = df_solar_data*2
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-PV-3x':
df_solar_data = Solar_NREL_2030.copy()
df_solar_data = df_solar_data*3
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-PV-4x':
df_solar_data = Solar_NREL_2030.copy()
df_solar_data = df_solar_data*4
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-PV-5x':
df_solar_data = Solar_NREL_2030.copy()
df_solar_data = df_solar_data*5
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-PV-6x':
df_solar_data = Solar_NREL_2030.copy()
df_solar_data = df_solar_data*6
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-PV-ReplaceISO':
df_solar_data = Solar_ISO_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-NREL':
df_solar_data = Solar_NREL_2030.copy()
df_onshore_data = Onshore_NREL_2030.copy()
df_offshore_data = Offshore_NREL_2030.copy()
elif selected_scenario == '2030-ISONE':
df_solar_data = Solar_ISO_2030.copy()
df_onshore_data = Onshore_ISO_2030.copy()
df_offshore_data = Offshore_ISO_2030.copy()
elif selected_scenario == '2040-Offshore-2x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*2
elif selected_scenario == '2040-Offshore-3x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*3
elif selected_scenario == '2040-Offshore-4x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*4
elif selected_scenario == '2040-Offshore-5x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*5
elif selected_scenario == '2040-Offshore-6x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*6
elif selected_scenario == '2040-Offshore-7x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*7
elif selected_scenario == '2040-Offshore-8x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*8
elif selected_scenario == '2040-Offshore-9x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*9
elif selected_scenario == '2040-Offshore-10x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
df_offshore_data = df_offshore_data*10
elif selected_scenario == '2040-Offshore-ReplaceISO':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_ISO_2040.copy()
elif selected_scenario == '2040-Onshore-0.5x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_onshore_data = df_onshore_data*0.5
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-Onshore-2x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_onshore_data = df_onshore_data*2
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-Onshore-3x':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_onshore_data = df_onshore_data*3
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-Onshore-ReplaceISO':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_ISO_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-PV-2x':
df_solar_data = Solar_NREL_2040.copy()
df_solar_data = df_solar_data*2
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-PV-3x':
df_solar_data = Solar_NREL_2040.copy()
df_solar_data = df_solar_data*3
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-PV-4x':
df_solar_data = Solar_NREL_2040.copy()
df_solar_data = df_solar_data*4
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-PV-5x':
df_solar_data = Solar_NREL_2040.copy()
df_solar_data = df_solar_data*5
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-PV-6x':
df_solar_data = Solar_NREL_2040.copy()
df_solar_data = df_solar_data*6
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-PV-ReplaceISO':
df_solar_data = Solar_ISO_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-NREL':
df_solar_data = Solar_NREL_2040.copy()
df_onshore_data = Onshore_NREL_2040.copy()
df_offshore_data = Offshore_NREL_2040.copy()
elif selected_scenario == '2040-ISONE':
df_solar_data = Solar_ISO_2040.copy()
df_onshore_data = Onshore_ISO_2040.copy()
df_offshore_data = Offshore_ISO_2040.copy()
#time series of natural gas prices for each zone
df_ng = df_ng_all.copy()
#time series of oil prices for each zone
df_oil = df_oil_all.copy()
#time series of load for each zone
df_load = df_load_all.copy()
#time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
############
# sets #
############
#write data.dat file
path = str(Path.cwd().parent) + str(Path('/UCED/LR/'+selected_scenario))
os.makedirs(path,exist_ok=True)
generators_file='ISONE_data_file/Scenarios/Generator_files/generators-{}.xlsx'.format(selected_scenario)
dispatch_file='../UCED/ISONE_dispatch.py'
dispatchLP_file='../UCED/ISONE_dispatchLP.py'
wrapper_file='../UCED/ISONE_wrapper.py'
simulation_file='../UCED/ISONE_simulation.py'
copy(dispatch_file,path)
copy(wrapper_file,path)
copy(simulation_file,path)
copy(dispatchLP_file,path)
copy(generators_file,path)
os.rename('{}/generators-{}.xlsx'.format(path,selected_scenario), '{}/generators.xlsx'.format(path))
filename = path + '/data.dat'
#write data.dat file
with open(filename, 'w') as f:
# generator sets by zone
for z in zones:
# zone string
z_int = zones.index(z)
f.write('set Zone%dGenerators :=\n' % (z_int+1))
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z:
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_CT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYCT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_WCMA :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYWCMA_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# HQ imports
f.write('set HQ_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'HQVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NB imports
f.write('set NB_Imports_ME :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NBME_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# generator sets by type
# coal
f.write('set Coal :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'coal':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Slack
f.write('set Slack :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'slack':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Hydro
f.write('set Hydro :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Ramping
f.write('set Ramping :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro' or df_gen.loc[gen,'typ'] == 'imports':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# gas generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# Natural Gas
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dGas :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# oil generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dOil :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# zones
f.write('set zones :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sources
f.write('set sources :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sinks
f.write('set sinks :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
################
# parameters #
################
# simulation details
SimHours = 384
f.write('param SimHours := %d;' % SimHours)
f.write('\n')
f.write('param SimDays:= %d;' % int(SimHours/24))
f.write('\n\n')
HorizonHours = 48
f.write('param HorizonHours := %d;' % HorizonHours)
f.write('\n\n')
HorizonDays = int(HorizonHours/24)
f.write('param HorizonDays := %d;' % HorizonDays)
f.write('\n\n')
# create parameter matrix for transmission paths (source and sink connections)
f.write('param:' + '\t' + 'limit' + '\t' +'hurdle :=' + '\n')
for z in zones:
for x in zones:
f.write(z + '\t' + x + '\t')
match = 0
for p in range(0,len(df_paths)):
source = df_paths.loc[p,'start_zone']
sink = df_paths.loc[p,'end_zone']
if source == z and sink == x:
match = 1
p_match = p
if match > 0:
f.write(str(round(df_paths.loc[p_match,'limit'],3)) + '\t' + str(round(df_paths.loc[p_match,'hurdle'],3)) + '\n')
else:
f.write('0' + '\t' + '0' + '\n')
f.write(';\n\n')
# create parameter matrix for generators
f.write('param:' + '\t')
for c in df_gen.columns:
if c != 'name':
f.write(c + '\t')
f.write(':=\n\n')
for i in range(0,len(df_gen)):
for c in df_gen.columns:
if c == 'name':
unit_name = df_gen.loc[i,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + '\t')
elif c == 'typ' or c == 'zone':
f.write(str(df_gen.loc[i,c]) + '\t')
else:
f.write(str(round(df_gen.loc[i,c],3)) + '\t')
f.write('\n')
f.write(';\n\n')
# times series data
# zonal (hourly)
f.write('param:' + '\t' + 'SimDemand' + '\t' + 'SimOffshoreWind' \
+ '\t' + 'SimSolar' + '\t' + 'SimOnshoreWind' + '\t' + 'SimMustRun:=' + '\n')
for z in zones:
for h in range(0,len(df_load)):
f.write(z + '\t' + str(h+1) + '\t' + str(round(df_load.loc[h,z],3))\
+ '\t' + str(round(df_offshore_data.loc[h,z],3))\
+ '\t' + str(round(df_solar_data.loc[h,z],3))\
+ '\t' + str(round(df_onshore_data.loc[h,z],3))\
+ '\t' + str(round(df_total_must_run.loc[h,z],3)) + '\n')
f.write(';\n\n')
# zonal (daily)
f.write('param:' + '\t' + 'SimGasPrice' + '\t' + 'SimOilPrice:=' + '\n')
for z in zones:
for d in range(0,int(SimHours/24)):
f.write(z + '\t' + str(d+1) + '\t' + str(round(df_ng.loc[d,z], 3)) + '\t' + str(round(df_oil.loc[d,z], 3)) + '\n')
f.write(';\n\n')
#system wide (daily)
f.write('param:' + '\t' + 'SimNY_imports_CT' + '\t' + 'SimNY_imports_VT' + '\t' + 'SimNY_imports_WCMA' + '\t' + 'SimNB_imports_ME' + '\t' + 'SimHQ_imports_VT' + '\t' + 'SimCT_hydro' + '\t' + 'SimME_hydro' + '\t' + 'SimNH_hydro' + '\t' + 'SimNEMA_hydro' + '\t' + 'SimRI_hydro' + '\t' + 'SimVT_hydro' + '\t' + 'SimWCMA_hydro:=' + '\n')
for d in range(0,len(df_imports)):
f.write(str(d+1) + '\t' + str(round(df_imports.loc[d,'NY_imports_CT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_VT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_WCMA'],3)) + '\t' + str(round(df_imports.loc[d,'NB_imports_ME'],3)) + '\t' + str(round(df_imports.loc[d,'HQ_imports_VT'],3)) + '\t' + str(round(df_hydro.loc[d,'CT'],3)) + '\t' + str(round(df_hydro.loc[d,'ME'],3)) + '\t' + str(round(df_hydro.loc[d,'NH'],3)) + '\t' + str(round(df_hydro.loc[d,'NEMA'],3)) + '\t' + str(round(df_hydro.loc[d,'RI'],3)) + '\t' + str(round(df_hydro.loc[d,'VT'],3)) + '\t' + str(round(df_hydro.loc[d,'WCMA'],3)) + '\n')
f.write(';\n\n')
#system wide (hourly)
f.write('param:' + '\t' + 'SimCT_exports_NY' + '\t' + 'SimWCMA_exports_NY' + '\t' + 'SimVT_exports_NY' + '\t' + 'SimVT_exports_HQ' + '\t' + 'SimME_exports_NB' + '\t' + 'SimReserves:=' + '\n')
for h in range(0,len(df_load)):
f.write(str(h+1) + '\t' + str(round(df_exports.loc[h,'CT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'WCMA_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_HQ'],3)) + '\t' + str(round(df_exports.loc[h,'ME_exports_NB'],3)) + '\t' + str(round(df_reserves.loc[h,'reserves'],3)) + '\n')
f.write(';\n\n')
return None
| [
"pandas.DataFrame",
"numpy.sum",
"os.makedirs",
"pandas.read_csv",
"pandas.read_excel",
"pathlib.Path",
"pathlib.Path.cwd",
"shutil.copy",
"numpy.repeat"
] | [((235, 384), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2018"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2018')\n", (248, 384), True, 'import pandas as pd\n'), ((394, 543), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2030"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2030')\n", (407, 543), True, 'import pandas as pd\n'), ((553, 702), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2040"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2040')\n", (566, 702), True, 'import pandas as pd\n'), ((713, 863), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2018"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2018')\n", (726, 863), True, 'import pandas as pd\n'), ((874, 1024), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2030"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2030')\n", (887, 1024), True, 'import pandas as pd\n'), ((1035, 1185), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2040"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/Off_Shore_wind_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2040')\n", (1048, 1185), True, 'import pandas as pd\n'), ((1195, 1343), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2018"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2018')\n", (1208, 1343), True, 'import pandas as pd\n'), ((1352, 1500), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2030"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2030')\n", (1365, 1500), True, 'import pandas as pd\n'), ((1509, 1657), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2040"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2040')\n", (1522, 1657), True, 'import pandas as pd\n'), ((1667, 1816), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2018"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2018')\n", (1680, 1816), True, 'import pandas as pd\n'), ((1826, 1975), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2030"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2030')\n", (1839, 1975), True, 'import pandas as pd\n'), ((1985, 2134), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2040"""'}), "(\n 'ISONE_data_file/Scenarios/Renewable_timeseries/On_Shore_Wind_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2040')\n", (1998, 2134), True, 'import pandas as pd\n'), ((2142, 2277), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2018"""'}), "('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2018')\n", (2155, 2277), True, 'import pandas as pd\n'), ((2289, 2424), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2030"""'}), "('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2030')\n", (2302, 2424), True, 'import pandas as pd\n'), ((2436, 2571), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2040"""'}), "('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO.xlsx',\n header=0, sheet_name='All Zones Time Series - 2040')\n", (2449, 2571), True, 'import pandas as pd\n'), ((2584, 2720), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2018"""'}), "('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2018')\n", (2597, 2720), True, 'import pandas as pd\n'), ((2733, 2869), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2030"""'}), "('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2030')\n", (2746, 2869), True, 'import pandas as pd\n'), ((2882, 3018), 'pandas.read_excel', 'pd.read_excel', (['"""ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx"""'], {'header': '(0)', 'sheet_name': '"""All Zones Time Series - 2040"""'}), "('ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL.xlsx',\n header=0, sheet_name='All Zones Time Series - 2040')\n", (2895, 3018), True, 'import pandas as pd\n'), ((3075, 3125), 'pandas.read_csv', 'pd.read_csv', (['"""ISONE_data_file/paths.csv"""'], {'header': '(0)'}), "('ISONE_data_file/paths.csv', header=0)\n", (3086, 3125), True, 'import pandas as pd\n'), ((3251, 3303), 'pandas.read_excel', 'pd.read_excel', (['"""Demand/Hourly_demand.xlsx"""'], {'header': '(0)'}), "('Demand/Hourly_demand.xlsx', header=0)\n", (3264, 3303), True, 'import pandas as pd\n'), ((3347, 3411), 'pandas.read_csv', 'pd.read_csv', (['"""Hydropower/ISONE_dispatchable_hydro.csv"""'], {'header': '(0)'}), "('Hydropower/ISONE_dispatchable_hydro.csv', header=0)\n", (3358, 3411), True, 'import pandas as pd\n'), ((3444, 3494), 'pandas.read_csv', 'pd.read_csv', (['"""Fuel_prices/NG_prices.csv"""'], {'header': '(0)'}), "('Fuel_prices/NG_prices.csv', header=0)\n", (3455, 3494), True, 'import pandas as pd\n'), ((3521, 3572), 'pandas.read_csv', 'pd.read_csv', (['"""Fuel_prices/Oil_prices.csv"""'], {'header': '(0)'}), "('Fuel_prices/Oil_prices.csv', header=0)\n", (3532, 3572), True, 'import pandas as pd\n'), ((3638, 3705), 'pandas.read_csv', 'pd.read_csv', (['"""Interchange/ISONE_dispatchable_imports.csv"""'], {'header': '(0)'}), "('Interchange/ISONE_dispatchable_imports.csv', header=0)\n", (3649, 3705), True, 'import pandas as pd\n'), ((3758, 3812), 'pandas.read_csv', 'pd.read_csv', (['"""Interchange/ISONE_exports.csv"""'], {'header': '(0)'}), "('Interchange/ISONE_exports.csv', header=0)\n", (3769, 3812), True, 'import pandas as pd\n'), ((4503, 4544), 'numpy.repeat', 'np.repeat', (['mustrun_each_zone', '(384)'], {'axis': '(0)'}), '(mustrun_each_zone, 384, axis=0)\n', (4512, 4544), True, 'import numpy as np\n'), ((4569, 4612), 'pandas.DataFrame', 'pd.DataFrame', (['hourly_mustrun'], {'columns': 'zones'}), '(hourly_mustrun, columns=zones)\n', (4581, 4612), True, 'import pandas as pd\n'), ((14550, 14572), 'pandas.DataFrame', 'pd.DataFrame', (['reserves'], {}), '(reserves)\n', (14562, 14572), True, 'import pandas as pd\n'), ((14779, 14811), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (14790, 14811), False, 'import os\n'), ((15124, 15149), 'shutil.copy', 'copy', (['dispatch_file', 'path'], {}), '(dispatch_file, path)\n', (15128, 15149), False, 'from shutil import copy\n'), ((15153, 15177), 'shutil.copy', 'copy', (['wrapper_file', 'path'], {}), '(wrapper_file, path)\n', (15157, 15177), False, 'from shutil import copy\n'), ((15181, 15208), 'shutil.copy', 'copy', (['simulation_file', 'path'], {}), '(simulation_file, path)\n', (15185, 15208), False, 'from shutil import copy\n'), ((15212, 15239), 'shutil.copy', 'copy', (['dispatchLP_file', 'path'], {}), '(dispatchLP_file, path)\n', (15216, 15239), False, 'from shutil import copy\n'), ((15243, 15270), 'shutil.copy', 'copy', (['generators_file', 'path'], {}), '(generators_file, path)\n', (15247, 15270), False, 'from shutil import copy\n'), ((14512, 14528), 'numpy.sum', 'np.sum', (['rv[i, :]'], {}), '(rv[i, :])\n', (14518, 14528), True, 'import numpy as np\n'), ((14738, 14775), 'pathlib.Path', 'Path', (["('/UCED/LR/' + selected_scenario)"], {}), "('/UCED/LR/' + selected_scenario)\n", (14742, 14775), False, 'from pathlib import Path\n'), ((14713, 14723), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (14721, 14723), False, 'from pathlib import Path\n')] |
import numpy as np
import math as m
import copy
from array import array
import matplotlib.pyplot as plot
from sklearn.linear_model import Ridge
#################################################################################################################################
# Implementatio of DMP #
############################################
class DMP(object):
def __init__(self,w,pastor_mod = False):
####################################################
# Data Collection #
############################################
self.pastor_mod = pastor_mod
#initial values
self.x0 = 0 #initial position
self.goal = 20 #goal position
self.step = 0.1 #The amount of steps in taken in a particular time frame
####################################################
# Tan function Implementation #
############################################
#for x in range(len(step)):
self.x_asis_of_tan = np.arange(self.x0,self.goal,self.step) #position of each step in the x axis in term of time
#amplitude of the tan curve of a variable like time, that will be the value y; it is also collecting samples at te same time
self.y_asis_of_tan = np.tan(self.x_asis_of_tan) #position
self.k = 100
self.d = 2.0 * np.sqrt(self.k)
self.w = w
#converges_for_tan
self.start = self.d / 3 # where it starts converges_for_tan to 0 but won't equal 0
self.l = 1000.0
self.b = 20.0 / np.pi
#########################################################################################################################################################
# Implimentation DMP Learning For Tan Functions #
###########################################################
def spring_damping_for_tan(self):
#Think of it as the slope of the function as it goes through
return self.k * (self.goal - self.y_asis_of_tan) - self.k * (self.goal - self.x0) * self.s + self.k
def converges_for_tan(self):
phases = np.exp(-self.start * (((np.linspace(0, 1, len(self.x_asis_of_tan))))))
#print(phases)
return phases #it displays the exponential converges_for_tan
def duplicate_for_tan(self):
#Vertically stack the array with y coordinates and x coordinates divided by the ammount of steps in secs
original_matrix_1 = np.vstack((np.zeros([1, (self.goal*10)], dtype = int), (self.y_asis_of_tan / self.step)))
original_matrix_2 = np.vstack((np.zeros([1, self.goal*10], dtype = int), original_matrix_1 / self.step))
F = self.step * self.step * original_matrix_1 - self.d * (self.k * (original_matrix_1 ) - self.step * original_matrix_1)
temp = np.zeros([200, (self.goal*10)], dtype = int)
temp[:F.shape[0],:F.shape[1]] = F
design = np.array([self._features_for_tan() for self.s in self.converges_for_tan()])
#print(design)
lr = Ridge(alpha=1.0, fit_intercept=False)
lr.fit(design, temp)
self.w = lr.coef_
#Think of it as the x-asis of the duplicate_for_tan
return self.w
def shape_path_for_tan(self, scale=False):
#creating a 2d vector base on the duplicate_for_tan
f = np.dot(self.w, self._features_for_tan())
return f
def reproduction_for_tan(self, o = None, shape = True, avoidance=False, verbose=0):
#if verbose <= 1:
#print("Trajectory with x0 = %s, g = %s, self.step=%.2f, step=%.3f" % (self.x0, self.goal, self.step, self.step))
#puts evething that was from X to x; from array to matrix
x = copy.copy(self.y_asis_of_tan)
temp_matrix_of_x1 = copy.copy(x)
temp_matrix_of_x2 = copy.copy(x)
original_matrix_1 = [copy.copy(temp_matrix_of_x1)]
original_matrix_2 = [copy.copy(temp_matrix_of_x2)]
#reproducing the x-asis
t = 0.1 * self.step
ti = 0
S = self.converges_for_tan()
while t < self.step:
t += self.step
ti += 1
self.s = S[ti]
x += self.step * temp_matrix_of_x1
temp_matrix_of_x1 += self.step * temp_matrix_of_x2
sd = self.spring_damping_for_tan()
# the weighted shape base on the movement
f = self.shape_path_for_tan() if shape else 0.
C = self.step.obstacle_for_tan(o, x, temp_matrix_of_x1) if avoidance else 0.0
#print(temp_matrix_of_x2)
#Everything that you implemented in the matrix that was temperary will initialize will be put into the none temperary matrix
if ti % self.step > 0:
temp_matrix_of_x1 = np.append(copy.copy(x),copy.copy(self.y_asis_of_tan))
original_matrix_1 = np.append(copy.copy(self.y_asis_of_tan),copy.copy(temp_matrix_of_x1))
original_matrix_2 = np.append(copy.copy(self.y_asis_of_tan),copy.copy(temp_matrix_of_x2))
#return the matrix as array when returning
return np.array(self.y_asis_of_tan), np.array(x), np.array(original_matrix_1)
def obstacle_for_tan(self, o, original_matrix_1):
if self.y_asis_of_tan.ndim == 1:
self.y_asis_of_tan = self.y_asis_of_tan[np.newaxis, np.newaxis, :]
if original_matrix_1.ndim == 1:
original_matrix_1 = original_matrix_1[np.newaxis, np.newaxis, :]
C = np.zeros_like(self.y_asis_of_tan)
R = np.array([[np.cos(np.pi / 2.0), -np.tan(np.pi / 2.0)],
[np.tan(np.pi / 2.0), np.cos(np.pi / 2.0)]])
for i in xrange(self.y_asis_of_tan.shape[0]):
for j in xrange(self.y_asis_of_tan.shape[1]):
obstacle_diff = o - self.y_asis_of_tan[i, j]
theta = (np.arccos(obstacle_diff.dot(original_matrix_1[i, j]) / (np.linalg.norm(obstacle_diff) * np.linalg.norm(original_matrix_1[i, j]) + 1e-10)))
C[i, j] = (self.l * R.dot(original_matrix_1[i, j]) * theta * np.exp(-self.b * theta))
return np.squeeze(C)
def _features_for_tan(self):
#getting the y asis base on the x asis, tance the amplitude just asolates between 1 and -1
c = self.converges_for_tan()
#calculate the discrete difference along the y asis
h= np.diff(c)
h = np.hstack((h, [h[-1]]))
phi = np.exp(-h * (self.s - c) ** 2)
return self.s * phi / phi.sum()
def main():
#########################################################################################
#title of the tane curve
plot.title('Demonstration')
#give x axis a label, it is the time
plot.xlabel('Time represented as t')
#give y asix a label, it is the amplitude
plot.ylabel('Amplitude - tan(time)')
plot.grid(True, which='both')
#########################################################################################
w = [None]
dmp = DMP(w,True)
w = dmp.duplicate_for_tan()
dmp.w = w
array1, array2, array3 = dmp.reproduction_for_tan(dmp)
array1_a = np.tan(array1)
plot.plot(dmp.x_asis_of_tan,array1)
plot.axhline(y=0, color='green')
array1_b = np.tan(array2)
plot.plot(dmp.x_asis_of_tan,array2)
plot.axhline(y=0, color='red')
#array1_c = np.tan(array3)
#plot.plot(dmp.time,array3)
plot.axhline(y=0, color='purple')
plot.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.exp",
"numpy.linalg.norm",
"numpy.zeros_like",
"numpy.tan",
"sklearn.linear_model.Ridge",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"numpy.hstack",
"numpy.cos",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gr... | [((6894, 6921), 'matplotlib.pyplot.title', 'plot.title', (['"""Demonstration"""'], {}), "('Demonstration')\n", (6904, 6921), True, 'import matplotlib.pyplot as plot\n'), ((6968, 7004), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Time represented as t"""'], {}), "('Time represented as t')\n", (6979, 7004), True, 'import matplotlib.pyplot as plot\n'), ((7056, 7092), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Amplitude - tan(time)"""'], {}), "('Amplitude - tan(time)')\n", (7067, 7092), True, 'import matplotlib.pyplot as plot\n'), ((7098, 7127), 'matplotlib.pyplot.grid', 'plot.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (7107, 7127), True, 'import matplotlib.pyplot as plot\n'), ((7378, 7392), 'numpy.tan', 'np.tan', (['array1'], {}), '(array1)\n', (7384, 7392), True, 'import numpy as np\n'), ((7397, 7433), 'matplotlib.pyplot.plot', 'plot.plot', (['dmp.x_asis_of_tan', 'array1'], {}), '(dmp.x_asis_of_tan, array1)\n', (7406, 7433), True, 'import matplotlib.pyplot as plot\n'), ((7437, 7469), 'matplotlib.pyplot.axhline', 'plot.axhline', ([], {'y': '(0)', 'color': '"""green"""'}), "(y=0, color='green')\n", (7449, 7469), True, 'import matplotlib.pyplot as plot\n'), ((7486, 7500), 'numpy.tan', 'np.tan', (['array2'], {}), '(array2)\n', (7492, 7500), True, 'import numpy as np\n'), ((7505, 7541), 'matplotlib.pyplot.plot', 'plot.plot', (['dmp.x_asis_of_tan', 'array2'], {}), '(dmp.x_asis_of_tan, array2)\n', (7514, 7541), True, 'import matplotlib.pyplot as plot\n'), ((7545, 7575), 'matplotlib.pyplot.axhline', 'plot.axhline', ([], {'y': '(0)', 'color': '"""red"""'}), "(y=0, color='red')\n", (7557, 7575), True, 'import matplotlib.pyplot as plot\n'), ((7644, 7677), 'matplotlib.pyplot.axhline', 'plot.axhline', ([], {'y': '(0)', 'color': '"""purple"""'}), "(y=0, color='purple')\n", (7656, 7677), True, 'import matplotlib.pyplot as plot\n'), ((7682, 7693), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (7691, 7693), True, 'import matplotlib.pyplot as plot\n'), ((1179, 1219), 'numpy.arange', 'np.arange', (['self.x0', 'self.goal', 'self.step'], {}), '(self.x0, self.goal, self.step)\n', (1188, 1219), True, 'import numpy as np\n'), ((1443, 1469), 'numpy.tan', 'np.tan', (['self.x_asis_of_tan'], {}), '(self.x_asis_of_tan)\n', (1449, 1469), True, 'import numpy as np\n'), ((3035, 3077), 'numpy.zeros', 'np.zeros', (['[200, self.goal * 10]'], {'dtype': 'int'}), '([200, self.goal * 10], dtype=int)\n', (3043, 3077), True, 'import numpy as np\n'), ((3268, 3305), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(1.0)', 'fit_intercept': '(False)'}), '(alpha=1.0, fit_intercept=False)\n', (3273, 3305), False, 'from sklearn.linear_model import Ridge\n'), ((3962, 3991), 'copy.copy', 'copy.copy', (['self.y_asis_of_tan'], {}), '(self.y_asis_of_tan)\n', (3971, 3991), False, 'import copy\n'), ((4020, 4032), 'copy.copy', 'copy.copy', (['x'], {}), '(x)\n', (4029, 4032), False, 'import copy\n'), ((4061, 4073), 'copy.copy', 'copy.copy', (['x'], {}), '(x)\n', (4070, 4073), False, 'import copy\n'), ((5740, 5773), 'numpy.zeros_like', 'np.zeros_like', (['self.y_asis_of_tan'], {}), '(self.y_asis_of_tan)\n', (5753, 5773), True, 'import numpy as np\n'), ((6367, 6380), 'numpy.squeeze', 'np.squeeze', (['C'], {}), '(C)\n', (6377, 6380), True, 'import numpy as np\n'), ((6624, 6634), 'numpy.diff', 'np.diff', (['c'], {}), '(c)\n', (6631, 6634), True, 'import numpy as np\n'), ((6647, 6670), 'numpy.hstack', 'np.hstack', (['(h, [h[-1]])'], {}), '((h, [h[-1]]))\n', (6656, 6670), True, 'import numpy as np\n'), ((6685, 6715), 'numpy.exp', 'np.exp', (['(-h * (self.s - c) ** 2)'], {}), '(-h * (self.s - c) ** 2)\n', (6691, 6715), True, 'import numpy as np\n'), ((1554, 1569), 'numpy.sqrt', 'np.sqrt', (['self.k'], {}), '(self.k)\n', (1561, 1569), True, 'import numpy as np\n'), ((4104, 4132), 'copy.copy', 'copy.copy', (['temp_matrix_of_x1'], {}), '(temp_matrix_of_x1)\n', (4113, 4132), False, 'import copy\n'), ((4163, 4191), 'copy.copy', 'copy.copy', (['temp_matrix_of_x2'], {}), '(temp_matrix_of_x2)\n', (4172, 4191), False, 'import copy\n'), ((2686, 2726), 'numpy.zeros', 'np.zeros', (['[1, self.goal * 10]'], {'dtype': 'int'}), '([1, self.goal * 10], dtype=int)\n', (2694, 2726), True, 'import numpy as np\n'), ((2808, 2848), 'numpy.zeros', 'np.zeros', (['[1, self.goal * 10]'], {'dtype': 'int'}), '([1, self.goal * 10], dtype=int)\n', (2816, 2848), True, 'import numpy as np\n'), ((5362, 5390), 'numpy.array', 'np.array', (['self.y_asis_of_tan'], {}), '(self.y_asis_of_tan)\n', (5370, 5390), True, 'import numpy as np\n'), ((5392, 5403), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5400, 5403), True, 'import numpy as np\n'), ((5405, 5432), 'numpy.array', 'np.array', (['original_matrix_1'], {}), '(original_matrix_1)\n', (5413, 5432), True, 'import numpy as np\n'), ((5031, 5043), 'copy.copy', 'copy.copy', (['x'], {}), '(x)\n', (5040, 5043), False, 'import copy\n'), ((5044, 5073), 'copy.copy', 'copy.copy', (['self.y_asis_of_tan'], {}), '(self.y_asis_of_tan)\n', (5053, 5073), False, 'import copy\n'), ((5121, 5150), 'copy.copy', 'copy.copy', (['self.y_asis_of_tan'], {}), '(self.y_asis_of_tan)\n', (5130, 5150), False, 'import copy\n'), ((5151, 5179), 'copy.copy', 'copy.copy', (['temp_matrix_of_x1'], {}), '(temp_matrix_of_x1)\n', (5160, 5179), False, 'import copy\n'), ((5227, 5256), 'copy.copy', 'copy.copy', (['self.y_asis_of_tan'], {}), '(self.y_asis_of_tan)\n', (5236, 5256), False, 'import copy\n'), ((5257, 5285), 'copy.copy', 'copy.copy', (['temp_matrix_of_x2'], {}), '(temp_matrix_of_x2)\n', (5266, 5285), False, 'import copy\n'), ((5797, 5816), 'numpy.cos', 'np.cos', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (5803, 5816), True, 'import numpy as np\n'), ((5866, 5885), 'numpy.tan', 'np.tan', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (5872, 5885), True, 'import numpy as np\n'), ((5888, 5907), 'numpy.cos', 'np.cos', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (5894, 5907), True, 'import numpy as np\n'), ((6326, 6349), 'numpy.exp', 'np.exp', (['(-self.b * theta)'], {}), '(-self.b * theta)\n', (6332, 6349), True, 'import numpy as np\n'), ((5819, 5838), 'numpy.tan', 'np.tan', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (5825, 5838), True, 'import numpy as np\n'), ((6166, 6195), 'numpy.linalg.norm', 'np.linalg.norm', (['obstacle_diff'], {}), '(obstacle_diff)\n', (6180, 6195), True, 'import numpy as np\n'), ((6198, 6237), 'numpy.linalg.norm', 'np.linalg.norm', (['original_matrix_1[i, j]'], {}), '(original_matrix_1[i, j])\n', (6212, 6237), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from config import Config
import gdown
np.random.seed(Config.random_seed)
Config.original_dataset_file_path.parent.mkdir(parents=True, exist_ok=True)
Config.dataset_path.mkdir(parents=True, exist_ok=True)
gdown.download(
'https://drive.google.com/uc?id=1q7c0UqDGMzYI67kzbpq9xAfBXHMkR_O_',
str(Config.original_dataset_file_path)
)
df = pd.read_csv(str(Config.original_dataset_file_path), encoding = 'latin1')
df_train, df_test = train_test_split(df, test_size = 0.2, random_state = Config.random_seed)
df_train.to_csv(str(Config.dataset_path / 'train.csv'), index = None)
df_test.to_csv(str(Config.dataset_path / 'test.csv'), index = None) | [
"config.Config.original_dataset_file_path.parent.mkdir",
"sklearn.model_selection.train_test_split",
"numpy.random.seed",
"config.Config.dataset_path.mkdir"
] | [((133, 167), 'numpy.random.seed', 'np.random.seed', (['Config.random_seed'], {}), '(Config.random_seed)\n', (147, 167), True, 'import numpy as np\n'), ((169, 244), 'config.Config.original_dataset_file_path.parent.mkdir', 'Config.original_dataset_file_path.parent.mkdir', ([], {'parents': '(True)', 'exist_ok': '(True)'}), '(parents=True, exist_ok=True)\n', (215, 244), False, 'from config import Config\n'), ((245, 299), 'config.Config.dataset_path.mkdir', 'Config.dataset_path.mkdir', ([], {'parents': '(True)', 'exist_ok': '(True)'}), '(parents=True, exist_ok=True)\n', (270, 299), False, 'from config import Config\n'), ((534, 602), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)', 'random_state': 'Config.random_seed'}), '(df, test_size=0.2, random_state=Config.random_seed)\n', (550, 602), False, 'from sklearn.model_selection import train_test_split\n')] |
# - *- coding: utf- 8 - *-
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import xlwings as xw
import sys
import numpy as np
from listaColumnas import ObtenerColumnas
from obtenerMes import ObtenerMesesQuiebre, ObtenerMesesFuturos, ObtenerMesesQuiebreInt, ObtenerMesesQuiebreMenor, ObtenerCuandoProducirMenor
from listaColumnas import ObtenerColumnasCuanto
from listaColumnas import listaColumnasCalculoCosto
from obtenerCuanto import ObtenerCuantoProducir,ObtenerCantidadLotes, CuandoProducir
from funcionCosto import calcularCosto, calcularCostoLotes, ObtenerGrupo, ObtenerLotePertenencia
from obtenerColumnasAjuste import ObtenerAjuste
from obtenerColumnasAjuste import ObtenerColumnasAjuste
from cantidadLotesDemandaAnual import CalcularCantidadLotesDemandaAnual
#CARGO VARIABLES DE EJEC.
def explode(df, lst_cols, fill_value='', preserve_index=False):
# make sure `lst_cols` is list-alike
if (lst_cols is not None
and len(lst_cols) > 0
and not isinstance(lst_cols, (list, tuple, np.ndarray, pd.Series))):
lst_cols = [lst_cols]
# all columns except `lst_cols`
idx_cols = df.columns.difference(lst_cols)
# calculate lengths of lists
lens = df[lst_cols[0]].str.len()
# preserve original index values
idx = np.repeat(df.index.values, lens)
# create "exploded" DF
res = (pd.DataFrame({
col:np.repeat(df[col].values, lens)
for col in idx_cols},
index=idx)
.assign(**{col:np.concatenate(df.loc[lens>0, col].values)
for col in lst_cols}))
# append those rows that have empty lists
if (lens == 0).any():
# at least one list in cells is empty
res = (res.append(df.loc[lens==0, idx_cols], sort=False)
.fillna(fill_value))
# revert the original index order
res = res.sort_index()
# reset index if requested
if not preserve_index:
res = res.reset_index(drop=True)
return res
def listaMesesProducir(mesCuando, cuanto):
if(isinstance(cuanto, str)):
cuanto = 0
else:
cuanto = int(cuanto)
return ObtenerColumnasCuanto(mesCuando, cuanto)
def listaColumnasCalculoCostoFn():
return listaColumnasCalculoCosto()
dataframeRoot = ""
#EJECUTO LA FUNCION DE CARGA DE EXCEL Y LO ASIGNO A LA VARIABLE dataFrame - PANDAS DATAFRAME
# dataFrame = cargarExcelDataframe(archivoExcel)
def mainProcesoReProcess(dataFrame):
dataframeRoot = dataFrame.copy()
listaMeses =ObtenerColumnas()#FUNCION QUE ADMINISTRA LOS NOMBRES DE LAS COLUMNAS A UTILIZAR
listaAjustes = ObtenerColumnasAjuste()
dataFrame.loc[:, 'CantidadLotesDemandaAnual']= dataFrame.apply(lambda x: CalcularCantidadLotesDemandaAnual(dataFrame), axis=1)
dataFrame.loc[:,'Quiebre'] = dataFrame.apply(lambda x: ObtenerMesesQuiebre((x[listaMeses]),x["A16"],x["12"],x["14"],x["15"],x["19"],x["13"],x["21"]), axis =1)
dataFrame.loc[:,'QuiebreInt'] = dataFrame.apply(lambda x: ObtenerMesesQuiebreInt(x['Quiebre']), axis =1)
dataFrame.loc[:,'QuiebreMenor'] = dataFrame.apply(lambda x: ObtenerMesesQuiebreMenor(dataFrame['QuiebreInt']), axis =1)
dataFrame.loc[:,'CuandoProducir'] = dataFrame.apply(lambda x: ObtenerCuandoProducirMenor(dataFrame['Quiebre']), axis =1)
dataFrame= dataFrame.dropna(subset=['QuiebreInt'])
dataFrame.loc[:,'AjustePlan'] = dataFrame.apply(lambda x: ObtenerAjuste(x['Quiebre'],(x[listaAjustes])),axis=1)
dataFrame.loc[:,'MesesFuturos'] = dataFrame.apply(lambda x: ObtenerMesesFuturos((x[listaMeses]),x["A16"],x["12"],x["14"],x["15"],x["19"],x["13"],x["21"],x["QuiebreMenor"]), axis =1)
#EJECUTO LA FUNCION DE CALCULO DE "CUANTO" ENVIAR A PRODUCIR
dataFrame.loc[:,'CuantoPlanificar'] = dataFrame.apply(lambda x: ObtenerCuantoProducir((x[listaMesesProducir(x['QuiebreMenor'],x["MesesFuturos"])]),x["MesesFuturos"], x['AjustePlan'], x['A4'], x['A0']), axis =1)
dataFrame.loc[:,'FormulaCosto'] = dataFrame.apply(lambda x: calcularCosto(x[listaColumnasCalculoCostoFn()]), axis=1)
dataFrame.loc[:,'FormulaCostoOptimoEnteroLote'] = dataFrame.apply(lambda x: calcularCostoLotes(x[listaColumnasCalculoCostoFn()],1), axis=1)
#AGRUPAMIENTO POR LINEA DE PRODUCCION Y CUANDO PLANIFICAR
# dataFrame.to_excel("excelversion1Previa.xlsx", sheet_name="Prueba")
dataFrameFiltradoLinea = dataFrame[dataFrame['A16']!=40.0]
dataFrameFiltradoLinea2 = dataFrameFiltradoLinea[dataFrameFiltradoLinea['A16']!=60.0 ]
dataFrameGroupFilter = dataFrameFiltradoLinea2.loc[dataFrameFiltradoLinea2['CuantoPlanificar']!=0]
# dataFrameGroup = dataFrameGroupFilter.groupby(['QuiebreInt','A3','A5'])['CuantoPlanificar'].agg('sum')
# dataFrameGroupDF = dataFrameGroup.to_frame()
dataFrameGroupCopy = dataFrameGroupFilter#dataFrameGroupDF
dataFrameGroupCopy.loc[:,'CantidadLotes'] = dataFrameGroupCopy.apply(lambda x: ObtenerCantidadLotes(x['CuantoPlanificar'],x['A5']), axis=1)
# dataFrameGroupCopy.loc[:,'CuandoProducir'] = dataFrameGroupCopy.apply(lambda x: CuandoProducir(x['Quiebre']), axis=1)
dataFrameGroupCopyGrouped = dataFrameGroupCopy.groupby(['QuiebreInt','CuandoProducir','QuiebreMenor', 'AjustePlan','A4','A3','A0','A5','12','FormulaCosto','FormulaCostoOptimoEnteroLote','20','CantidadLotesDemandaAnual']).agg({'CantidadLotes':'sum','CuantoPlanificar':'sum'}).reset_index()
dataFrameIterar = dataFrameGroupCopyGrouped.copy()
return dataFrameGroupFilter
# grupoLote = validarGrupo(dataFrameIterar, dataframeRoot)
# dataFrameLista = pd.DataFrame(grupoLote)
# dataFrameLista.reset_index()
# columnas = ['Tamanolote','IdLote','MesesIncluidos','CodigoProducto']
# dataFrameLista.columns=columnas
# dfUsar = dataFrameLista[['Tamanolote','IdLote','MesesIncluidos','CodigoProducto']]
# # dfUsar.explode('MesesIncluidos')
# # dfUsar.loc[:,'Explotado'] = dfUsar.apply(lambda x: explotarMeses(x['MesesIncluidos']), axis=1)
# # dfExplodedUsar = explode(dfUsar,'CodigoProducto','-',preserve_index=True)
# dfNuevoExplotado = dfUsar.assign(CodigoProductoExploded=dfUsar.CodigoProducto.str.split(',')).explode('CodigoProductoExploded').reset_index(drop=False)
# dfNuevoExplotado.loc[:,'CuantoPlanificarNuevo'] = dfNuevoExplotado.apply(lambda x: splitColumn(x['CodigoProductoExploded'],1) , axis=1)
# dfNuevoExplotado.loc[:,'CodigoProductoExp'] = dfNuevoExplotado.apply(lambda x: splitColumn(x['CodigoProductoExploded'],0) , axis=1)
# dfNuevoExplotado.loc[:,'QuiebreIntExp'] = dfNuevoExplotado.apply(lambda x: splitColumn(x['CodigoProductoExploded'],2) , axis=1)
# dfNuevoExplotado.loc[:,'MinimoExp'] = dfNuevoExplotado.apply(lambda x: splitColumn(x['CodigoProductoExploded'],3) , axis=1)
# dfNuevoExplotado = dfNuevoExplotado[['Tamanolote','IdLote','CodigoProductoExp','CuantoPlanificarNuevo','QuiebreIntExp','MinimoExp']]
# dfNuevoExplotado.set_index(["CodigoProductoExp"],inplace = True, append = False, drop = True)
# dfIterarCosto = CalcularMenorCosto(dataFrameGroupFilter, dfNuevoExplotado)
# # dfNuevoExplotado.reset_index()
# # print(dfNuevoExplotado)
# # dfNuevo = pd.DataFrame(dataFrameLista['Tamanolote','IdLote',dataFrameLista.MesesIncluidos.str.split(',').tolist(),'CodigoProducto']).stack()
# # dataFrameLista.reset_index()
# # dataFrameLista.columns=columnas
# # dfNuevoExplotado.to_excel("ListaLotes.xlsx",sheet_name="Prueba")
# # dataFrameGroupCopyGrouped.to_excel("AgrupadosPrueba.xlsx",sheet_name="Prueba")
# # # dataFrameIterar['GrupoLote'] = dataFrameIterar.apply(lambda x: validarGrupo(dataFrameIterar), axis=1)
# # bins = pd.cut(dataFrameGroupCopyGrouped['QuiebreInt'], [0, 100, 250, 1500])
# # dataFrameGroupCopyGrouped.rename(columns={'QuiebreInt':'Quiebre','A3':'LineaProducto','A5':'TLote','CuantoPlanificar':'CuantoPlanificar','CantidadLotes':'CantidadLotes'},inplace=True)
# # dataFrameGroupCopyGrouped.loc[:,'LoteDePertenencia'] = dataFrameGroupCopyGrouped.apply(lambda x: ObtenerLotePertenencia(dataFrameGroupCopyGrouped) , axis=1)
# # dataFrameGroupCopyGrouped.to_excel("excelFiltradoGroup.xlsx",sheet_name="PruebaFiltro")
# #EJECUTO LA FUNCION DE CALCULO DE "COSTO"
# dfIterarCosto.to_excel("Fase1Lotes.xlsx",sheet_name="Prueba")
# #EXCEL DE VALIDACION DF
# return dfIterarCosto#dataFrameIterar | [
"cantidadLotesDemandaAnual.CalcularCantidadLotesDemandaAnual",
"warnings.simplefilter",
"obtenerMes.ObtenerMesesQuiebre",
"listaColumnas.ObtenerColumnasCuanto",
"obtenerMes.ObtenerMesesQuiebreMenor",
"obtenerColumnasAjuste.ObtenerColumnasAjuste",
"obtenerColumnasAjuste.ObtenerAjuste",
"obtenerMes.Obte... | [((63, 125), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (84, 125), False, 'import warnings\n'), ((1323, 1355), 'numpy.repeat', 'np.repeat', (['df.index.values', 'lens'], {}), '(df.index.values, lens)\n', (1332, 1355), True, 'import numpy as np\n'), ((2204, 2244), 'listaColumnas.ObtenerColumnasCuanto', 'ObtenerColumnasCuanto', (['mesCuando', 'cuanto'], {}), '(mesCuando, cuanto)\n', (2225, 2244), False, 'from listaColumnas import ObtenerColumnasCuanto\n'), ((2292, 2319), 'listaColumnas.listaColumnasCalculoCosto', 'listaColumnasCalculoCosto', ([], {}), '()\n', (2317, 2319), False, 'from listaColumnas import listaColumnasCalculoCosto\n'), ((2584, 2601), 'listaColumnas.ObtenerColumnas', 'ObtenerColumnas', ([], {}), '()\n', (2599, 2601), False, 'from listaColumnas import ObtenerColumnas\n'), ((2683, 2706), 'obtenerColumnasAjuste.ObtenerColumnasAjuste', 'ObtenerColumnasAjuste', ([], {}), '()\n', (2704, 2706), False, 'from obtenerColumnasAjuste import ObtenerColumnasAjuste\n'), ((2784, 2828), 'cantidadLotesDemandaAnual.CalcularCantidadLotesDemandaAnual', 'CalcularCantidadLotesDemandaAnual', (['dataFrame'], {}), '(dataFrame)\n', (2817, 2828), False, 'from cantidadLotesDemandaAnual import CalcularCantidadLotesDemandaAnual\n'), ((2897, 3000), 'obtenerMes.ObtenerMesesQuiebre', 'ObtenerMesesQuiebre', (['x[listaMeses]', "x['A16']", "x['12']", "x['14']", "x['15']", "x['19']", "x['13']", "x['21']"], {}), "(x[listaMeses], x['A16'], x['12'], x['14'], x['15'], x[\n '19'], x['13'], x['21'])\n", (2916, 3000), False, 'from obtenerMes import ObtenerMesesQuiebre, ObtenerMesesFuturos, ObtenerMesesQuiebreInt, ObtenerMesesQuiebreMenor, ObtenerCuandoProducirMenor\n'), ((3064, 3100), 'obtenerMes.ObtenerMesesQuiebreInt', 'ObtenerMesesQuiebreInt', (["x['Quiebre']"], {}), "(x['Quiebre'])\n", (3086, 3100), False, 'from obtenerMes import ObtenerMesesQuiebre, ObtenerMesesFuturos, ObtenerMesesQuiebreInt, ObtenerMesesQuiebreMenor, ObtenerCuandoProducirMenor\n'), ((3180, 3229), 'obtenerMes.ObtenerMesesQuiebreMenor', 'ObtenerMesesQuiebreMenor', (["dataFrame['QuiebreInt']"], {}), "(dataFrame['QuiebreInt'])\n", (3204, 3229), False, 'from obtenerMes import ObtenerMesesQuiebre, ObtenerMesesFuturos, ObtenerMesesQuiebreInt, ObtenerMesesQuiebreMenor, ObtenerCuandoProducirMenor\n'), ((3306, 3354), 'obtenerMes.ObtenerCuandoProducirMenor', 'ObtenerCuandoProducirMenor', (["dataFrame['Quiebre']"], {}), "(dataFrame['Quiebre'])\n", (3332, 3354), False, 'from obtenerMes import ObtenerMesesQuiebre, ObtenerMesesFuturos, ObtenerMesesQuiebreInt, ObtenerMesesQuiebreMenor, ObtenerCuandoProducirMenor\n'), ((3483, 3527), 'obtenerColumnasAjuste.ObtenerAjuste', 'ObtenerAjuste', (["x['Quiebre']", 'x[listaAjustes]'], {}), "(x['Quiebre'], x[listaAjustes])\n", (3496, 3527), False, 'from obtenerColumnasAjuste import ObtenerAjuste\n'), ((3601, 3723), 'obtenerMes.ObtenerMesesFuturos', 'ObtenerMesesFuturos', (['x[listaMeses]', "x['A16']", "x['12']", "x['14']", "x['15']", "x['19']", "x['13']", "x['21']", "x['QuiebreMenor']"], {}), "(x[listaMeses], x['A16'], x['12'], x['14'], x['15'], x[\n '19'], x['13'], x['21'], x['QuiebreMenor'])\n", (3620, 3723), False, 'from obtenerMes import ObtenerMesesQuiebre, ObtenerMesesFuturos, ObtenerMesesQuiebreInt, ObtenerMesesQuiebreMenor, ObtenerCuandoProducirMenor\n'), ((4992, 5044), 'obtenerCuanto.ObtenerCantidadLotes', 'ObtenerCantidadLotes', (["x['CuantoPlanificar']", "x['A5']"], {}), "(x['CuantoPlanificar'], x['A5'])\n", (5012, 5044), False, 'from obtenerCuanto import ObtenerCuantoProducir, ObtenerCantidadLotes, CuandoProducir\n'), ((1554, 1598), 'numpy.concatenate', 'np.concatenate', (['df.loc[lens > 0, col].values'], {}), '(df.loc[lens > 0, col].values)\n', (1568, 1598), True, 'import numpy as np\n'), ((1429, 1460), 'numpy.repeat', 'np.repeat', (['df[col].values', 'lens'], {}), '(df[col].values, lens)\n', (1438, 1460), True, 'import numpy as np\n')] |
from .OBJET import OBJET
import numpy as np
class Objet(object):
"""OBJET"""
def __init__(self, path_to_meta_json, width=500, height=500):
self._OBJET = OBJET(path_to_meta_json, width, height)
self.width = width
self.height = height
def draw(self, ):
self._OBJET.Draw()
def get_image(self, ):
img = np.array(self._OBJET.GetImage())
img = img.reshape([self.height, self.width, -1])
return np.flip(img, axis=0)
def get_depth_map(self, ):
img = np.array(self._OBJET.GetDepthMap())
img = img.reshape([self.height, self.width])
return np.flip(img, axis=0)
def to_image(self, path_to_image):
self._OBJET.ToImage(path_to_image)
def set_camera(self, position, target):
self._OBJET.SetCamera(position, target)
def set_object_position(self, object_name, position):
self._OBJET.SetObjectPosition(object_name, position)
def set_object_y_rotation(self, object_name, y_rotation):
self._OBJET.SetObjectYRotation(object_name, y_rotation)
def set_object_scale(self, object_name, scale):
self._OBJET.SetObjectScale(object_name, scale)
| [
"numpy.flip"
] | [((466, 486), 'numpy.flip', 'np.flip', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (473, 486), True, 'import numpy as np\n'), ((637, 657), 'numpy.flip', 'np.flip', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (644, 657), True, 'import numpy as np\n')] |
"""
Potentials
==========
Potential energy functions.
.. autosummary::
:nosignatures:
LennardJones
.. autoclass:: LennardJones
:members:
"""
import numpy as np
class LennardJones:
r"""Lennard-Jones pair potential.
The prototypical pair potential consisting of a steep repulsive core
and an attractive tail describing dispersion forces. The functional form
of the potential is:
.. math::
u(r) = \begin{cases}
4 \varepsilon\left[\left(\dfrac{\sigma}{r}\right)^{12}
- \left(\dfrac{\sigma}{r}\right)^6 \right], & r \le r_{\rm cut} \\
0, & r > r_{\rm cut}
\end{cases}
where :math:`r` is the distance between the centers of two particles,
:math:`\varepsilon` sets the strength of the attraction, and
:math:`\sigma` sets the length scale of the interaction. (Typically,
:math:`\sigma` can be regarded as a particle diameter.) The potential
is truncated to zero at :math:`r_{\rm cut}`, and good accuracy for
thermodynamic properties is usually achieved when :math:`r_{\rm cut} \ge 3\sigma`.
In molecular dynamics (MD) simulations, the forces on the particles are what
is actually required. Forces are computed from the derivative of :math:`u(r)`
with the truncation scheme:
.. math::
\mathbf{F}(\mathbf{r}) = \begin{cases}
f(r) \mathbf{r}/r, & r \le r_{\rm cut} \\
0, & r > r_{\rm cut}
\end{cases}
where :math:`f(r) = -\partial u/\partial r`. The force is a vector with
direction. If :math:`\mathbf{r}` is the vector pointing from a particle *i*
to a particle *j*, then the force on *j* is :math:`\mathbf{F}` and the force
on *i* is :math:`-\mathbf{F}`.
This force truncation implies that the energy should be shifted to zero at
:math:`r_{\rm cut}` by subtracting :math:`u(r_{\rm cut})`. However, this
distinction is often not made in MD simulations unless thermodynamic properties
based on the energy are being computed. Caution must be taken if MD results
with this scheme are compared to Monte Carlo (MC) results, which are sensitive
to whether :math:`u` is shifted or not.
If the Lennard-Jones potential is truncated and shifted at its minimum
:math:`r_{\rm cut} = 2^{1/6}\sigma`, the interactions are purely repulsive.
(The forces are always positive, :math:`|\mathbf{F}| \ge 0`.)
This special case is often used as an approximation of nearly hard spheres,
where it is referred to as the Weeks--Chandler--Andersen potential based
on its role in their perturbation theory of the liquid state.
Parameters
----------
epsilon : float
Interaction energy.
sigma : float
Interaction length.
rcut : float
Truncation distance.
shift : bool
If ``True``, shift the potential to zero at ``rcut``.
"""
def __init__(self, epsilon, sigma, rcut, shift=False):
self.epsilon = epsilon
self.sigma = sigma
self.rcut = rcut
self.shift = shift
def compute(self, state):
r"""Compute energy and forces on particles.
The pair potential is evaluated using a direct calculation between
all :math:`N^2` pairs in the ``state``. Half of the potential energy is
assigned to each particle in the pair.
Hint: the pair calculation can be efficiently implemented using NumPy arrays::
for i in range(state.N-1):
# get dr with all j particles ahead of myself (count each pair once)
drij = state.positions[i+1:]-state.positions[i]
# do the calculations to get uij and fij from drij
# ...
# accumulate the result
u[i] += np.sum(uij)
u[i+1:] += uij
f[i] -= np.sum(fij,axis=0)
f[i+1:] += fij
Parameters
----------
state : :class:`~learnmolsim.state.State`
Simulation state.
Returns
-------
:class:`numpy.ndarray`
Potential energy assigned to each particle.
:class:`numpy.ndarray`
Force on each particle.
"""
raise NotImplementedError()
def energy_force(self, rsq):
r"""Evaluate potential energy and force magnitude.
Efficiently implements the functional form of the potential. Accepting
:math:`r^2` rather than *r* means no square root needs to be evaluated.
The potential energy :math:`u(r)` and the force divided by *r*,
i.e., :math:`f(r)/r`, are evaluated directly using :math:`r^2`. Factoring
the :math:`1/r` here means that the force vector can be applied without
normalization:
.. math::
\mathbf{F}(\mathbf{r}) = \frac{f(r)}{r} \mathbf{r}
If any ``rsq`` is 0, the energy and force is :py:obj:`numpy.inf`.
The return type will be a scalar or array depending on the type of ``rsq``.
Parameters
----------
rsq : float or array_like
Squared pair distance.
Returns
-------
float or :class:`numpy.ndarray`
Energy at the pair distances.
float or :class:`numpy.ndarray`
Force divided
"""
raise NotImplementedError()
@classmethod
def _zeros(cls, x):
"""Ensure a 1d NumPy array of zeros to match coordinates.
This function can be used to ensure coordinates are consistently treated
as a 1d array. If the coordinate ``x`` is a scalar (a float), it is
promoted to a one-element NumPy array. If ``x`` is a 1d array, nothing is
done. Higher dimensional arrays are rejected.
The shape of the returned array matches the shape of ``x`` as an array.
A flag is returned to indicate if ``x`` was originally a scalar. This can
be used to downconvert to the same input type::
x,f,s = self._zeros(x)
# do something
if s:
f = f.item()
return f
Parameters
----------
x : float or array_like
Coordinates to make an array of zeros for.
Returns
-------
:class:`numpy.ndarray`
Coordinates promoted to a NumPy array.
:class:`numpy.ndarray`
Empty array matching the returned coordinates.
bool
True if coordinates were originally a scalar quantity.
Raises
------
TypeError
If the coordinates are not castable to a 1d array.
"""
s = np.isscalar(x)
x = np.array(x, dtype=np.float64, ndmin=1)
if len(x.shape) != 1:
raise TypeError('Coordinate must be scalar or 1D array.')
return x,np.zeros_like(x),s
| [
"numpy.isscalar",
"numpy.zeros_like",
"numpy.array"
] | [((6726, 6740), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (6737, 6740), True, 'import numpy as np\n'), ((6753, 6791), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float64', 'ndmin': '(1)'}), '(x, dtype=np.float64, ndmin=1)\n', (6761, 6791), True, 'import numpy as np\n'), ((6909, 6925), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (6922, 6925), True, 'import numpy as np\n')] |
import logging
from bitstring import BitArray, ConstBitStream
from numpy import flip
from bitglitter.read.coloranalysis import colorSnap, returnDistance
from bitglitter.read.decoderassets import scanBlock
from bitglitter.palettes.paletteutilities import paletteGrabber, ColorsToValue
def frameLockOn(image, blockHeightOverride, blockWidthOverride, frameWidth, frameHeight):
'''This function is used to lock onto the frame. If override values are present, these will be used. Otherwise,
it will attempt to extract the correct values from the X and Y calibrator on the initial frame.'''
logging.debug('Locking onto frame...')
initializerPaletteA = paletteGrabber('1')
initializerPaletteB = paletteGrabber('11')
initializerPaletteADict = ColorsToValue(initializerPaletteA)
initializerPaletteBDict = ColorsToValue(initializerPaletteB)
combinedColors = initializerPaletteA.colorSet + initializerPaletteB.colorSet
if blockHeightOverride and blockWidthOverride: # Jump straight to verification
logging.info("blockHeightOverride and blockWidthOverride parameters detected. Attempting to lock with these"
" values...")
pixelWidth = ((frameWidth / blockWidthOverride) + (frameHeight / blockHeightOverride)) / 2
checkpoint = verifyBlocksX(image, pixelWidth, blockWidthOverride, combinedColors, initializerPaletteADict,
initializerPaletteBDict, override=True)
if checkpoint == False:
return False, False, False
checkpoint = verifyBlocksY(image, pixelWidth, blockHeightOverride, combinedColors, initializerPaletteADict,
initializerPaletteBDict, override=True)
if checkpoint == False:
return False, False, False
blockWidth, blockHeight = blockWidthOverride, blockHeightOverride
else:
# First checkpoint. Does pixel 0,0 have colorDistance value of under 100 for black (0,0,0)?
if returnDistance(image[0, 0], (0,0,0)) > 100:
logging.warning('Frame lock fail! Initial pixel value exceeds maximum color distance allowed for a '
'reliable lock.')
return False, False, False
pixelWidth, blockDimensionGuess = pixelCreep(image, initializerPaletteA, initializerPaletteB, combinedColors,
initializerPaletteADict, initializerPaletteBDict, frameWidth,
frameHeight, width=True)
checkpoint = verifyBlocksX(image, pixelWidth, blockDimensionGuess, combinedColors, initializerPaletteADict,
initializerPaletteBDict)
if checkpoint == False:
return False, False, False
blockWidth = blockDimensionGuess
pixelWidth, blockDimensionGuess = pixelCreep(image, initializerPaletteA, initializerPaletteB, combinedColors,
initializerPaletteADict, initializerPaletteBDict, frameWidth,
frameHeight, width=False)
checkpoint = verifyBlocksY(image, pixelWidth, blockDimensionGuess, combinedColors, initializerPaletteADict,
initializerPaletteBDict)
if checkpoint == False:
return False, False, False
blockHeight = blockDimensionGuess
logging.debug(f'Lockon successful.\npixelWidth: {pixelWidth}\nblockHeight: {blockHeight}\nblockWidth: {blockWidth}')
return blockHeight, blockWidth, pixelWidth
def verifyBlocksX(image, pixelWidth, blockWidthEstimate, combinedColors, initializerPaletteADict,
initializerPaletteBDict, override = False):
'''This is a function used within frameLockOn(). It verifies the correct values for the X axis.'''
calibratorBitsX = BitArray()
for xBlock in range(17):
snappedValue = colorSnap(scanBlock(image, pixelWidth, xBlock, 0), combinedColors)
if xBlock % 2 == 0:
calibratorBitsX.append(initializerPaletteADict.getValue(snappedValue))
else:
calibratorBitsX.append(initializerPaletteBDict.getValue(snappedValue))
calibratorBitsX.reverse()
readCalibratorX = ConstBitStream(calibratorBitsX)
if readCalibratorX.read('uint:16') != blockWidthEstimate:
if override == True:
logging.warning('blockWidthOverride is not equal to what was read on calibrator. Aborting...')
else:
logging.warning('blockWidth verification does not match initial read. This could be the result of \n'
'sufficiently distorted frames. Aborting...')
return False
if readCalibratorX.read('bool') != False:
logging.warning('0,0 block unexpected value. Aborting...')
return False
if override == True:
logging.info('blockWidthOverride successfully verified.')
else:
logging.debug('blockWidth successfully verified.')
return True
def verifyBlocksY(image, pixelWidth, blockHeightEstimate, combinedColors, initializerPaletteADict,
initializerPaletteBDict, override = False):
'''This is a function used within frameLockOn(). It verifies the correct values for the Y axis.'''
calibratorBitsY = BitArray()
for yBlock in range(17):
snappedValue = colorSnap(scanBlock(image, pixelWidth, 0, yBlock), combinedColors)
if yBlock % 2 == 0:
calibratorBitsY.append(initializerPaletteADict.getValue(snappedValue))
else:
calibratorBitsY.append(initializerPaletteBDict.getValue(snappedValue))
calibratorBitsY.reverse()
readCalibratorY = ConstBitStream(calibratorBitsY)
if readCalibratorY.read('uint:16') != blockHeightEstimate:
if override == True:
logging.warning('blockHeightOverride is not equal to what was read on calibrator. Aborting...')
else:
logging.warning('blockHeight verification does not match initial read. This could be the result of \n'
'sufficiently distorted frames. Aborting...')
return False
if readCalibratorY.read('bool') != False:
logging.warning('0,0 block unexpected value. Aborting...')
return False
if override == True:
logging.info('blockHeightOverride successfully verified.')
else:
logging.debug('blockHeight successfully verified.')
return True
def pixelCreep(image, initializerPaletteA, initializerPaletteB, combinedColors, initializerPaletteADict,
initializerPaletteBDict, imageWidth, imageHeight, width):
'''This function moves across the calibrator on the top and left of the frame one pixel at a time, and after
'snapping' the colors, decodes an unsigned integer from each axis, which if read correctly, is the block width and
block height of the frame.
'''
calibratorBits = BitArray()
snappedValues = []
activeColor = (0, 0, 0)
activeDistance = 0
pixelOnDimension = 1
paletteAIsActive = False
if width == True:
axisOnImage = pixelOnDimension, 0
axisAnalyzed = imageWidth
else:
axisOnImage = 0, pixelOnDimension
axisAnalyzed = imageHeight
for value in range(16):
while True:
if width == True:
axisOnImage = 0, pixelOnDimension
axisAnalyzed = imageWidth
else:
axisOnImage = pixelOnDimension, 0
axisAnalyzed = imageHeight
newPaletteLocked = False
activeScan = flip(image[axisOnImage])
activeDistance = returnDistance(activeScan, activeColor)
pixelOnDimension += 1
if activeDistance < 100: # Iterating over same colored blocks, until distance exceeds 100.
continue
else: # We are determining if we are within < 100 dist of a new color, or are in fuzzy space.
if paletteAIsActive == False:
activePalette = initializerPaletteB.colorSet
else:
activePalette = initializerPaletteA.colorSet
for color in activePalette:
activeDistance = returnDistance(activeScan, color)
if activeDistance < 100:
paletteAIsActive = not paletteAIsActive
newPaletteLocked = True
break
else:
continue
if newPaletteLocked == True:
break
activeColor = colorSnap(activeScan, combinedColors)
snappedValues.append(activeColor)
if value % 2 != 0:
calibratorBits.append(initializerPaletteADict.getValue(activeColor))
else:
calibratorBits.append(initializerPaletteBDict.getValue(activeColor))
activeDistance = 0
calibratorBits.reverse()
readCalibratorBits = ConstBitStream(calibratorBits)
blockDimensionGuess = readCalibratorBits.read('uint:16')
pixelWidth = axisAnalyzed / blockDimensionGuess
return pixelWidth, blockDimensionGuess | [
"bitglitter.read.decoderassets.scanBlock",
"logging.debug",
"bitstring.ConstBitStream",
"numpy.flip",
"logging.warning",
"bitstring.BitArray",
"bitglitter.palettes.paletteutilities.paletteGrabber",
"bitglitter.palettes.paletteutilities.ColorsToValue",
"logging.info",
"bitglitter.read.coloranalysis... | [((604, 642), 'logging.debug', 'logging.debug', (['"""Locking onto frame..."""'], {}), "('Locking onto frame...')\n", (617, 642), False, 'import logging\n'), ((669, 688), 'bitglitter.palettes.paletteutilities.paletteGrabber', 'paletteGrabber', (['"""1"""'], {}), "('1')\n", (683, 688), False, 'from bitglitter.palettes.paletteutilities import paletteGrabber, ColorsToValue\n'), ((715, 735), 'bitglitter.palettes.paletteutilities.paletteGrabber', 'paletteGrabber', (['"""11"""'], {}), "('11')\n", (729, 735), False, 'from bitglitter.palettes.paletteutilities import paletteGrabber, ColorsToValue\n'), ((766, 800), 'bitglitter.palettes.paletteutilities.ColorsToValue', 'ColorsToValue', (['initializerPaletteA'], {}), '(initializerPaletteA)\n', (779, 800), False, 'from bitglitter.palettes.paletteutilities import paletteGrabber, ColorsToValue\n'), ((831, 865), 'bitglitter.palettes.paletteutilities.ColorsToValue', 'ColorsToValue', (['initializerPaletteB'], {}), '(initializerPaletteB)\n', (844, 865), False, 'from bitglitter.palettes.paletteutilities import paletteGrabber, ColorsToValue\n'), ((3465, 3592), 'logging.debug', 'logging.debug', (['f"""Lockon successful.\npixelWidth: {pixelWidth}\nblockHeight: {blockHeight}\nblockWidth: {blockWidth}"""'], {}), '(\n f"""Lockon successful.\npixelWidth: {pixelWidth}\nblockHeight: {blockHeight}\nblockWidth: {blockWidth}"""\n )\n', (3478, 3592), False, 'import logging\n'), ((3918, 3928), 'bitstring.BitArray', 'BitArray', ([], {}), '()\n', (3926, 3928), False, 'from bitstring import BitArray, ConstBitStream\n'), ((4311, 4342), 'bitstring.ConstBitStream', 'ConstBitStream', (['calibratorBitsX'], {}), '(calibratorBitsX)\n', (4325, 4342), False, 'from bitstring import BitArray, ConstBitStream\n'), ((5375, 5385), 'bitstring.BitArray', 'BitArray', ([], {}), '()\n', (5383, 5385), False, 'from bitstring import BitArray, ConstBitStream\n'), ((5768, 5799), 'bitstring.ConstBitStream', 'ConstBitStream', (['calibratorBitsY'], {}), '(calibratorBitsY)\n', (5782, 5799), False, 'from bitstring import BitArray, ConstBitStream\n'), ((7020, 7030), 'bitstring.BitArray', 'BitArray', ([], {}), '()\n', (7028, 7030), False, 'from bitstring import BitArray, ConstBitStream\n'), ((9076, 9106), 'bitstring.ConstBitStream', 'ConstBitStream', (['calibratorBits'], {}), '(calibratorBits)\n', (9090, 9106), False, 'from bitstring import BitArray, ConstBitStream\n'), ((1039, 1169), 'logging.info', 'logging.info', (['"""blockHeightOverride and blockWidthOverride parameters detected. Attempting to lock with these values..."""'], {}), "(\n 'blockHeightOverride and blockWidthOverride parameters detected. Attempting to lock with these values...'\n )\n", (1051, 1169), False, 'import logging\n'), ((4825, 4884), 'logging.warning', 'logging.warning', (['"""0,0 block unexpected value. Aborting..."""'], {}), "('0,0 block unexpected value. Aborting...')\n", (4840, 4884), False, 'import logging\n'), ((4940, 4997), 'logging.info', 'logging.info', (['"""blockWidthOverride successfully verified."""'], {}), "('blockWidthOverride successfully verified.')\n", (4952, 4997), False, 'import logging\n'), ((5017, 5067), 'logging.debug', 'logging.debug', (['"""blockWidth successfully verified."""'], {}), "('blockWidth successfully verified.')\n", (5030, 5067), False, 'import logging\n'), ((6285, 6344), 'logging.warning', 'logging.warning', (['"""0,0 block unexpected value. Aborting..."""'], {}), "('0,0 block unexpected value. Aborting...')\n", (6300, 6344), False, 'import logging\n'), ((6400, 6458), 'logging.info', 'logging.info', (['"""blockHeightOverride successfully verified."""'], {}), "('blockHeightOverride successfully verified.')\n", (6412, 6458), False, 'import logging\n'), ((6478, 6529), 'logging.debug', 'logging.debug', (['"""blockHeight successfully verified."""'], {}), "('blockHeight successfully verified.')\n", (6491, 6529), False, 'import logging\n'), ((8708, 8745), 'bitglitter.read.coloranalysis.colorSnap', 'colorSnap', (['activeScan', 'combinedColors'], {}), '(activeScan, combinedColors)\n', (8717, 8745), False, 'from bitglitter.read.coloranalysis import colorSnap, returnDistance\n'), ((2014, 2052), 'bitglitter.read.coloranalysis.returnDistance', 'returnDistance', (['image[0, 0]', '(0, 0, 0)'], {}), '(image[0, 0], (0, 0, 0))\n', (2028, 2052), False, 'from bitglitter.read.coloranalysis import colorSnap, returnDistance\n'), ((2070, 2196), 'logging.warning', 'logging.warning', (['"""Frame lock fail! Initial pixel value exceeds maximum color distance allowed for a reliable lock."""'], {}), "(\n 'Frame lock fail! Initial pixel value exceeds maximum color distance allowed for a reliable lock.'\n )\n", (2085, 2196), False, 'import logging\n'), ((3991, 4030), 'bitglitter.read.decoderassets.scanBlock', 'scanBlock', (['image', 'pixelWidth', 'xBlock', '(0)'], {}), '(image, pixelWidth, xBlock, 0)\n', (4000, 4030), False, 'from bitglitter.read.decoderassets import scanBlock\n'), ((4447, 4552), 'logging.warning', 'logging.warning', (['"""blockWidthOverride is not equal to what was read on calibrator. Aborting..."""'], {}), "(\n 'blockWidthOverride is not equal to what was read on calibrator. Aborting...'\n )\n", (4462, 4552), False, 'import logging\n'), ((4570, 4729), 'logging.warning', 'logging.warning', (['"""blockWidth verification does not match initial read. This could be the result of \nsufficiently distorted frames. Aborting..."""'], {}), '(\n """blockWidth verification does not match initial read. This could be the result of \nsufficiently distorted frames. Aborting..."""\n )\n', (4585, 4729), False, 'import logging\n'), ((5448, 5487), 'bitglitter.read.decoderassets.scanBlock', 'scanBlock', (['image', 'pixelWidth', '(0)', 'yBlock'], {}), '(image, pixelWidth, 0, yBlock)\n', (5457, 5487), False, 'from bitglitter.read.decoderassets import scanBlock\n'), ((5905, 6011), 'logging.warning', 'logging.warning', (['"""blockHeightOverride is not equal to what was read on calibrator. Aborting..."""'], {}), "(\n 'blockHeightOverride is not equal to what was read on calibrator. Aborting...'\n )\n", (5920, 6011), False, 'import logging\n'), ((6029, 6189), 'logging.warning', 'logging.warning', (['"""blockHeight verification does not match initial read. This could be the result of \nsufficiently distorted frames. Aborting..."""'], {}), '(\n """blockHeight verification does not match initial read. This could be the result of \nsufficiently distorted frames. Aborting..."""\n )\n', (6044, 6189), False, 'import logging\n'), ((7692, 7716), 'numpy.flip', 'flip', (['image[axisOnImage]'], {}), '(image[axisOnImage])\n', (7696, 7716), False, 'from numpy import flip\n'), ((7746, 7785), 'bitglitter.read.coloranalysis.returnDistance', 'returnDistance', (['activeScan', 'activeColor'], {}), '(activeScan, activeColor)\n', (7760, 7785), False, 'from bitglitter.read.coloranalysis import colorSnap, returnDistance\n'), ((8339, 8372), 'bitglitter.read.coloranalysis.returnDistance', 'returnDistance', (['activeScan', 'color'], {}), '(activeScan, color)\n', (8353, 8372), False, 'from bitglitter.read.coloranalysis import colorSnap, returnDistance\n')] |
"""
Author: <NAME>
Copyright:
Secure Systems Group, Aalto University
https://ssg.aalto.fi/
This code is released under Apache 2.0 license
http://www.apache.org/licenses/LICENSE-2.0
"""
"""
This file takes a csv file (e.g. written by numpy) and puts the values
as an array into a TensorProto that is stored as a file.
Such a TensorProto is required to run MiniONN as a client.
Obviously, there are many ways to give such a TensorProto as input but
this example file should be enough to get the idea of how to work with TensorProto.
"""
import onnx
import struct
import numpy as np
filename = "array.txt"
delimiter = ","
tensor_name = "1"
# Load values and convert to list
values = np.loadtxt(filename, delimiter=delimiter)
values_list = values.flatten().tolist()
# Pack the input into raw bytes
values_raw = struct.pack('%sf' % len(values_list), *values_list)
# export the raw data to a tensor proto.
# We use FLOAT type here but pack it in bytes
t_type = onnx.TensorProto.FLOAT
tensor = onnx.helper.make_tensor(tensor_name, t_type, list(values.shape), values_raw, True)
# Write to file
f = open(filename + '.tensor', 'wb')
f.write(tensor.SerializeToString())
f.close() | [
"numpy.loadtxt"
] | [((690, 731), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': 'delimiter'}), '(filename, delimiter=delimiter)\n', (700, 731), True, 'import numpy as np\n')] |
#%%
from typing import *
import numpy as np
from numpy.fft import fft, fftfreq, rfft, rfftfreq
from scipy.stats import kurtosis, skew
class SpectrumValue:
frequency: Any
value: Any
def __init__(self, frequency, value):
self.frequency = frequency
self.value = value
def get_amplitude_spectrum(data, framerate, max_framerate: Union[int, None]):
if len(data.shape) == 2:
data = data.sum(axis=1) / 2
spectrum: np.ndarray = abs(rfft(data))
freqs = rfftfreq(len(data),1./framerate)
result = []
for val_index, val in enumerate(spectrum):
if max_framerate is None or freqs[val_index] <= max_framerate:
result.append(SpectrumValue(freqs[val_index], val))
return result
def get_features1d(feature2d):
return [
np.mean(feature2d, axis=1),
np.min(feature2d, axis=1),
np.max(feature2d, axis=1),
np.median(feature2d, axis=1),
np.var(feature2d, axis=1),
skew(feature2d, axis=1),
kurtosis(feature2d, axis=1),
]
| [
"numpy.fft.rfft",
"numpy.median",
"scipy.stats.skew",
"numpy.min",
"numpy.mean",
"numpy.max",
"scipy.stats.kurtosis",
"numpy.var"
] | [((472, 482), 'numpy.fft.rfft', 'rfft', (['data'], {}), '(data)\n', (476, 482), False, 'from numpy.fft import fft, fftfreq, rfft, rfftfreq\n'), ((798, 824), 'numpy.mean', 'np.mean', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (805, 824), True, 'import numpy as np\n'), ((834, 859), 'numpy.min', 'np.min', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (840, 859), True, 'import numpy as np\n'), ((869, 894), 'numpy.max', 'np.max', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (875, 894), True, 'import numpy as np\n'), ((904, 932), 'numpy.median', 'np.median', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (913, 932), True, 'import numpy as np\n'), ((942, 967), 'numpy.var', 'np.var', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (948, 967), True, 'import numpy as np\n'), ((977, 1000), 'scipy.stats.skew', 'skew', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (981, 1000), False, 'from scipy.stats import kurtosis, skew\n'), ((1010, 1037), 'scipy.stats.kurtosis', 'kurtosis', (['feature2d'], {'axis': '(1)'}), '(feature2d, axis=1)\n', (1018, 1037), False, 'from scipy.stats import kurtosis, skew\n')] |
"""
ALS methode for CP decomposition
"""
import numpy as np
import tensorly as tl
import time
import copy
from src._base import svd_init_fac,err
def err_fast(norm_tensor,A,V,W):
"""
fast data fitting error calculation of als
Parameters
----------
norm_tensor : float
norm of the tensor
A : matrix
factor matrix
V : matrix
matrix V defined as in als
W : matrix
matrix W defined as in als
Returns
-------
float
data fitting error
"""
res=sum(sum(V*(np.transpose(A).dot(A))))
res=res-2*sum(sum(W*A))
return(np.sqrt(norm_tensor**2+res))
def als(tensor,rank,factors=None,it_max=100,tol=1e-7,list_factors=False,error_fast=True,time_rec=False):
"""
ALS methode of CP decomposition
Parameters
----------
tensor : tensor
rank : int
factors : list of matrices, optional
an initial factor matrices. The default is None.
it_max : int, optional
maximal number of iteration. The default is 100.
tol : float, optional
error tolerance. The default is 1e-7.
list_factors : boolean, optional
If true, then return factor matrices of each iteration. The default is False.
error_fast : boolean, optional
If true, use err_fast to compute data fitting error, otherwise, use err. The default is True.
time_rec : boolean, optional
If true, return computation time of each iteration. The default is False.
Returns
-------
the CP decomposition, number of iteration and termination criterion.
list_fac and list_time are optional.
"""
N=tl.ndim(tensor) # order of tensor
norm_tensor=tl.norm(tensor) # norm of tensor
if time_rec == True : list_time=[]
if list_factors==True : list_fac=[] # list of factor matrices
if (factors==None): factors=svd_init_fac(tensor,rank)
weights=None
it=0
if list_factors==True : list_fac.append(copy.deepcopy(factors))
error=[err(tensor,weights,factors)/norm_tensor]
while (error[len(error)-1]>tol and it<it_max):
if time_rec == True : tic=time.time()
for n in range(N):
V=np.ones((rank,rank))
for i in range(len(factors)):
if i != n : V=V*tl.dot(tl.transpose(factors[i]),factors[i])
W=tl.cp_tensor.unfolding_dot_khatri_rao(tensor, (None,factors), n)
factors[n]= tl.transpose(tl.solve(tl.transpose(V),tl.transpose(W)))
if list_factors==True : list_fac.append(copy.deepcopy(factors))
it=it+1
if (error_fast==False) : error.append(err(tensor,weights,factors)/norm_tensor)
else : error.append(err_fast(norm_tensor,factors[N-1],V,W)/norm_tensor)
if time_rec == True :
toc=time.time()
list_time.append(toc-tic)
# weights,factors=tl.cp_tensor.cp_normalize((None,factors))
if list_factors==True and time_rec==True: return(weights,factors,it,error,list_fac,list_time)
if time_rec==True : return(weights,factors,it,error,list_time)
if list_factors==True : return(weights,factors,it,error,list_fac)
return(weights,factors,it,error) | [
"tensorly.transpose",
"src._base.svd_init_fac",
"copy.deepcopy",
"src._base.err",
"tensorly.cp_tensor.unfolding_dot_khatri_rao",
"tensorly.ndim",
"numpy.transpose",
"numpy.ones",
"tensorly.norm",
"time.time",
"numpy.sqrt"
] | [((594, 625), 'numpy.sqrt', 'np.sqrt', (['(norm_tensor ** 2 + res)'], {}), '(norm_tensor ** 2 + res)\n', (601, 625), True, 'import numpy as np\n'), ((1617, 1632), 'tensorly.ndim', 'tl.ndim', (['tensor'], {}), '(tensor)\n', (1624, 1632), True, 'import tensorly as tl\n'), ((1665, 1680), 'tensorly.norm', 'tl.norm', (['tensor'], {}), '(tensor)\n', (1672, 1680), True, 'import tensorly as tl\n'), ((1830, 1856), 'src._base.svd_init_fac', 'svd_init_fac', (['tensor', 'rank'], {}), '(tensor, rank)\n', (1842, 1856), False, 'from src._base import svd_init_fac, err\n'), ((1921, 1943), 'copy.deepcopy', 'copy.deepcopy', (['factors'], {}), '(factors)\n', (1934, 1943), False, 'import copy\n'), ((1954, 1983), 'src._base.err', 'err', (['tensor', 'weights', 'factors'], {}), '(tensor, weights, factors)\n', (1957, 1983), False, 'from src._base import svd_init_fac, err\n'), ((2074, 2085), 'time.time', 'time.time', ([], {}), '()\n', (2083, 2085), False, 'import time\n'), ((2118, 2139), 'numpy.ones', 'np.ones', (['(rank, rank)'], {}), '((rank, rank))\n', (2125, 2139), True, 'import numpy as np\n'), ((2251, 2316), 'tensorly.cp_tensor.unfolding_dot_khatri_rao', 'tl.cp_tensor.unfolding_dot_khatri_rao', (['tensor', '(None, factors)', 'n'], {}), '(tensor, (None, factors), n)\n', (2288, 2316), True, 'import tensorly as tl\n'), ((2667, 2678), 'time.time', 'time.time', ([], {}), '()\n', (2676, 2678), False, 'import time\n'), ((2435, 2457), 'copy.deepcopy', 'copy.deepcopy', (['factors'], {}), '(factors)\n', (2448, 2457), False, 'import copy\n'), ((2357, 2372), 'tensorly.transpose', 'tl.transpose', (['V'], {}), '(V)\n', (2369, 2372), True, 'import tensorly as tl\n'), ((2373, 2388), 'tensorly.transpose', 'tl.transpose', (['W'], {}), '(W)\n', (2385, 2388), True, 'import tensorly as tl\n'), ((2513, 2542), 'src._base.err', 'err', (['tensor', 'weights', 'factors'], {}), '(tensor, weights, factors)\n', (2516, 2542), False, 'from src._base import svd_init_fac, err\n'), ((533, 548), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (545, 548), True, 'import numpy as np\n'), ((2206, 2230), 'tensorly.transpose', 'tl.transpose', (['factors[i]'], {}), '(factors[i])\n', (2218, 2230), True, 'import tensorly as tl\n')] |
# Copyright (c) 2019-2021 by University of Kassel, <NAME>, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import pytest
from numpy import array
import pandapower as pp
import pandapower.networks as pn
import simbench as sb
__author__ = "smeinecke"
def test_convert_voltlvl_names():
voltlvl_names = sb.convert_voltlvl_names([1, 2, "hv", 4, 5, "ehv", 7], str)
assert voltlvl_names == ["EHV", "EHV-HV", "HV", "HV-MV", "MV", "EHV", "LV"]
voltlvl_names = sb.convert_voltlvl_names([1, 2, "hv", 4, 5, "ehv", 7], int)
assert voltlvl_names == [1, 2, 3, 4, 5, 1, 7]
def test_voltlvl_idx():
net = pn.example_multivoltage()
# add measurements
pp.create_measurement(net, "v", "bus", 1.03, 0.3, net.bus.index[7]) # 380 kV
pp.create_measurement(net, "v", "bus", 1.03, 0.3, net.bus.index[40]) # 10 kV
pp.create_measurement(net, "i", "trafo", 0.23, 0.03, net.trafo.index[-1], "hv") # 10 kV
pp.create_measurement(net, "p", "trafo", 0.33, 0.03, net.trafo.index[0], "lv") # 110 kV
pp.create_measurement(net, "i", "line", 0.23, 0.03, net.line.index[-1], "to") # 0.4 kV
pp.create_measurement(net, "q", "line", 0.33, 0.03, net.line.index[0], "from") # 110 kV
# checking voltlvl_idx()
hv_and_mv_buses = list(range(16, 45))
assert hv_and_mv_buses == sb.voltlvl_idx(net, "bus", 4)
assert hv_and_mv_buses == sb.voltlvl_idx(net, "bus", ["HV-MV"])
assert hv_and_mv_buses == sb.voltlvl_idx(net, "bus", [3, 5])
assert hv_and_mv_buses == sb.voltlvl_idx(net, "bus", ["HV", "MV"])
mv_loads = list(range(5, 13))
assert mv_loads == sb.voltlvl_idx(net, "load", "MV")
hvmv_trafo3ws = [0]
assert hvmv_trafo3ws == sb.voltlvl_idx(net, "trafo3w", "HV", branch_bus="hv_bus")
assert hvmv_trafo3ws == sb.voltlvl_idx(net, "trafo3w", "MV", branch_bus="mv_bus")
assert hvmv_trafo3ws == sb.voltlvl_idx(net, "trafo3w", "MV", branch_bus="lv_bus")
ehvhv_trafos = [0]
assert ehvhv_trafos == sb.voltlvl_idx(net, "trafo", 1, branch_bus="hv_bus")
assert ehvhv_trafos == sb.voltlvl_idx(net, "trafo", 3, branch_bus="lv_bus")
ehvhv_and_hvmv_trafos = [0]
assert ehvhv_and_hvmv_trafos == sb.voltlvl_idx(net, "trafo", 2, branch_bus="hv_bus")
assert ehvhv_and_hvmv_trafos == sb.voltlvl_idx(net, "trafo", 4, branch_bus="lv_bus")
hvmv_trafos = []
assert hvmv_trafos == sb.voltlvl_idx(net, "trafo", 5, branch_bus="lv_bus")
mvlv_trafos = [1]
assert mvlv_trafos == sb.voltlvl_idx(net, "trafo", 5, branch_bus="hv_bus")
lv_loads = list(range(13, 25))
assert lv_loads == sb.voltlvl_idx(net, "load", 7)
m1 = sb.voltlvl_idx(net, "measurement", [1])
m3 = sb.voltlvl_idx(net, "measurement", 3)
m5 = sb.voltlvl_idx(net, "measurement", 5)
m7 = sb.voltlvl_idx(net, "measurement", 7)
assert m1 == [0]
assert m3 == [3, 5]
assert m5 == [1, 2]
assert m7 == [4]
assert len(net.measurement.index) == len(m1+m3+m5+m7)
assert set(net.measurement.index) == set(m1) | set(m3) | set(m5) | set(m7)
def test_all_voltlvl_idx():
net = pn.example_simple()
lvl_dicts = sb.all_voltlvl_idx(net)
elms = set()
for elm in pp.pp_elements():
if net[elm].shape[0]:
elms |= {elm}
idxs = set()
for _, idx in lvl_dicts[elm].items():
idxs |= idx
assert set(net[elm].index) == idxs
assert elms == set(lvl_dicts.keys())
elms = ["bus"]
lvl_dicts = sb.all_voltlvl_idx(net, elms=elms)
assert list(lvl_dicts.keys()) == elms
lvl_dicts = sb.all_voltlvl_idx(net, elms=["bus", "trafo3w"], include_empty_elms_dicts=True)
assert not bool(net.trafo3w.shape[0])
assert "trafo3w" in lvl_dicts.keys()
def test_get_voltlvl():
input1 = [146, 145, 144, 61, 60, 59, 2, 1, 0.8]
input2 = 0.4
assert all(sb.get_voltlvl(input1) == array([1, 3, 3, 3, 5, 5, 5, 7, 7]))
assert sb.get_voltlvl(input2) == 7
if __name__ == "__main__":
if 0:
pytest.main(["test_voltLvl.py", "-xs"])
else:
# test_convert_voltlvl_names()
# test_voltlvl_idx()
# test_get_voltlvl()
test_all_voltlvl_idx()
pass
| [
"simbench.all_voltlvl_idx",
"pandapower.networks.example_multivoltage",
"simbench.voltlvl_idx",
"pandapower.pp_elements",
"pytest.main",
"simbench.convert_voltlvl_names",
"pandapower.networks.example_simple",
"numpy.array",
"simbench.get_voltlvl",
"pandapower.create_measurement"
] | [((456, 515), 'simbench.convert_voltlvl_names', 'sb.convert_voltlvl_names', (["[1, 2, 'hv', 4, 5, 'ehv', 7]", 'str'], {}), "([1, 2, 'hv', 4, 5, 'ehv', 7], str)\n", (480, 515), True, 'import simbench as sb\n'), ((616, 675), 'simbench.convert_voltlvl_names', 'sb.convert_voltlvl_names', (["[1, 2, 'hv', 4, 5, 'ehv', 7]", 'int'], {}), "([1, 2, 'hv', 4, 5, 'ehv', 7], int)\n", (640, 675), True, 'import simbench as sb\n'), ((762, 787), 'pandapower.networks.example_multivoltage', 'pn.example_multivoltage', ([], {}), '()\n', (785, 787), True, 'import pandapower.networks as pn\n'), ((815, 882), 'pandapower.create_measurement', 'pp.create_measurement', (['net', '"""v"""', '"""bus"""', '(1.03)', '(0.3)', 'net.bus.index[7]'], {}), "(net, 'v', 'bus', 1.03, 0.3, net.bus.index[7])\n", (836, 882), True, 'import pandapower as pp\n'), ((897, 965), 'pandapower.create_measurement', 'pp.create_measurement', (['net', '"""v"""', '"""bus"""', '(1.03)', '(0.3)', 'net.bus.index[40]'], {}), "(net, 'v', 'bus', 1.03, 0.3, net.bus.index[40])\n", (918, 965), True, 'import pandapower as pp\n'), ((979, 1058), 'pandapower.create_measurement', 'pp.create_measurement', (['net', '"""i"""', '"""trafo"""', '(0.23)', '(0.03)', 'net.trafo.index[-1]', '"""hv"""'], {}), "(net, 'i', 'trafo', 0.23, 0.03, net.trafo.index[-1], 'hv')\n", (1000, 1058), True, 'import pandapower as pp\n'), ((1072, 1150), 'pandapower.create_measurement', 'pp.create_measurement', (['net', '"""p"""', '"""trafo"""', '(0.33)', '(0.03)', 'net.trafo.index[0]', '"""lv"""'], {}), "(net, 'p', 'trafo', 0.33, 0.03, net.trafo.index[0], 'lv')\n", (1093, 1150), True, 'import pandapower as pp\n'), ((1165, 1242), 'pandapower.create_measurement', 'pp.create_measurement', (['net', '"""i"""', '"""line"""', '(0.23)', '(0.03)', 'net.line.index[-1]', '"""to"""'], {}), "(net, 'i', 'line', 0.23, 0.03, net.line.index[-1], 'to')\n", (1186, 1242), True, 'import pandapower as pp\n'), ((1257, 1335), 'pandapower.create_measurement', 'pp.create_measurement', (['net', '"""q"""', '"""line"""', '(0.33)', '(0.03)', 'net.line.index[0]', '"""from"""'], {}), "(net, 'q', 'line', 0.33, 0.03, net.line.index[0], 'from')\n", (1278, 1335), True, 'import pandapower as pp\n'), ((2747, 2786), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""measurement"""', '[1]'], {}), "(net, 'measurement', [1])\n", (2761, 2786), True, 'import simbench as sb\n'), ((2796, 2833), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""measurement"""', '(3)'], {}), "(net, 'measurement', 3)\n", (2810, 2833), True, 'import simbench as sb\n'), ((2843, 2880), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""measurement"""', '(5)'], {}), "(net, 'measurement', 5)\n", (2857, 2880), True, 'import simbench as sb\n'), ((2890, 2927), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""measurement"""', '(7)'], {}), "(net, 'measurement', 7)\n", (2904, 2927), True, 'import simbench as sb\n'), ((3195, 3214), 'pandapower.networks.example_simple', 'pn.example_simple', ([], {}), '()\n', (3212, 3214), True, 'import pandapower.networks as pn\n'), ((3232, 3255), 'simbench.all_voltlvl_idx', 'sb.all_voltlvl_idx', (['net'], {}), '(net)\n', (3250, 3255), True, 'import simbench as sb\n'), ((3289, 3305), 'pandapower.pp_elements', 'pp.pp_elements', ([], {}), '()\n', (3303, 3305), True, 'import pandapower as pp\n'), ((3590, 3624), 'simbench.all_voltlvl_idx', 'sb.all_voltlvl_idx', (['net'], {'elms': 'elms'}), '(net, elms=elms)\n', (3608, 3624), True, 'import simbench as sb\n'), ((3684, 3763), 'simbench.all_voltlvl_idx', 'sb.all_voltlvl_idx', (['net'], {'elms': "['bus', 'trafo3w']", 'include_empty_elms_dicts': '(True)'}), "(net, elms=['bus', 'trafo3w'], include_empty_elms_dicts=True)\n", (3702, 3763), True, 'import simbench as sb\n'), ((1448, 1477), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""bus"""', '(4)'], {}), "(net, 'bus', 4)\n", (1462, 1477), True, 'import simbench as sb\n'), ((1508, 1545), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""bus"""', "['HV-MV']"], {}), "(net, 'bus', ['HV-MV'])\n", (1522, 1545), True, 'import simbench as sb\n'), ((1576, 1610), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""bus"""', '[3, 5]'], {}), "(net, 'bus', [3, 5])\n", (1590, 1610), True, 'import simbench as sb\n'), ((1641, 1681), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""bus"""', "['HV', 'MV']"], {}), "(net, 'bus', ['HV', 'MV'])\n", (1655, 1681), True, 'import simbench as sb\n'), ((1739, 1772), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""load"""', '"""MV"""'], {}), "(net, 'load', 'MV')\n", (1753, 1772), True, 'import simbench as sb\n'), ((1825, 1882), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo3w"""', '"""HV"""'], {'branch_bus': '"""hv_bus"""'}), "(net, 'trafo3w', 'HV', branch_bus='hv_bus')\n", (1839, 1882), True, 'import simbench as sb\n'), ((1911, 1968), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo3w"""', '"""MV"""'], {'branch_bus': '"""mv_bus"""'}), "(net, 'trafo3w', 'MV', branch_bus='mv_bus')\n", (1925, 1968), True, 'import simbench as sb\n'), ((1997, 2054), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo3w"""', '"""MV"""'], {'branch_bus': '"""lv_bus"""'}), "(net, 'trafo3w', 'MV', branch_bus='lv_bus')\n", (2011, 2054), True, 'import simbench as sb\n'), ((2105, 2157), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo"""', '(1)'], {'branch_bus': '"""hv_bus"""'}), "(net, 'trafo', 1, branch_bus='hv_bus')\n", (2119, 2157), True, 'import simbench as sb\n'), ((2185, 2237), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo"""', '(3)'], {'branch_bus': '"""lv_bus"""'}), "(net, 'trafo', 3, branch_bus='lv_bus')\n", (2199, 2237), True, 'import simbench as sb\n'), ((2306, 2358), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo"""', '(2)'], {'branch_bus': '"""hv_bus"""'}), "(net, 'trafo', 2, branch_bus='hv_bus')\n", (2320, 2358), True, 'import simbench as sb\n'), ((2395, 2447), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo"""', '(4)'], {'branch_bus': '"""lv_bus"""'}), "(net, 'trafo', 4, branch_bus='lv_bus')\n", (2409, 2447), True, 'import simbench as sb\n'), ((2495, 2547), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo"""', '(5)'], {'branch_bus': '"""lv_bus"""'}), "(net, 'trafo', 5, branch_bus='lv_bus')\n", (2509, 2547), True, 'import simbench as sb\n'), ((2596, 2648), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""trafo"""', '(5)'], {'branch_bus': '"""hv_bus"""'}), "(net, 'trafo', 5, branch_bus='hv_bus')\n", (2610, 2648), True, 'import simbench as sb\n'), ((2707, 2737), 'simbench.voltlvl_idx', 'sb.voltlvl_idx', (['net', '"""load"""', '(7)'], {}), "(net, 'load', 7)\n", (2721, 2737), True, 'import simbench as sb\n'), ((4030, 4052), 'simbench.get_voltlvl', 'sb.get_voltlvl', (['input2'], {}), '(input2)\n', (4044, 4052), True, 'import simbench as sb\n'), ((4105, 4144), 'pytest.main', 'pytest.main', (["['test_voltLvl.py', '-xs']"], {}), "(['test_voltLvl.py', '-xs'])\n", (4116, 4144), False, 'import pytest\n'), ((3957, 3979), 'simbench.get_voltlvl', 'sb.get_voltlvl', (['input1'], {}), '(input1)\n', (3971, 3979), True, 'import simbench as sb\n'), ((3983, 4017), 'numpy.array', 'array', (['[1, 3, 3, 3, 5, 5, 5, 7, 7]'], {}), '([1, 3, 3, 3, 5, 5, 5, 7, 7])\n', (3988, 4017), False, 'from numpy import array\n')] |
import os
import json
import torch
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
from build_utils import img_utils
from build_utils import torch_utils
from build_utils import utils
from models import Darknet
from draw_box_utils import draw_box
import models
import grad_CAM
from skimage import io
from skimage import img_as_ubyte
def main():
img_size = 512 #An integer multiple of 32 [416, 512, 608]
cfg = "cfg/my_yolov3.cfg" # use generated cfg file
weights = "weights/yolov3spp-29.pt".format(img_size) # weight file
json_path = "./data/pascal_voc_classes.json" # json label file
img_path = "./img/1.png"
assert os.path.exists(cfg), "cfg file {} dose not exist.".format(cfg)
assert os.path.exists(weights), "weights file {} dose not exist.".format(weights)
assert os.path.exists(json_path), "json file {} dose not exist.".format(json_path)
assert os.path.exists(img_path), "image file {} dose not exist.".format(img_path)
json_file = open(json_path, 'r')
class_dict = json.load(json_file)
category_index = {v: k for k, v in class_dict.items()}
input_size = (img_size, img_size)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Darknet(cfg, img_size)
model.load_state_dict(torch.load(weights, map_location=device)["model"])
model.to(device)
layer_name = models.get_last_conv_name(model)
#print(layer_name)
img_o = cv2.imread(img_path) # BGR
assert img_o is not None, "Image Not Found " + img_path
img = img_utils.letterbox(img_o, new_shape=input_size, auto=True, color=(0, 0, 0))[0]
height, width = img.shape[:2]
# Convert
img_rgb = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img_con = np.ascontiguousarray(img_rgb)
#img_final = torch.as_tensor(img_con[:,:,::-1].astype("float32").transpose(2, 0, 1)).requires_grad_(True)
img_final = (torch.from_numpy(img_con).to(device).float()).requires_grad_(True)
img_final = img_final / 255.0 # scale (0, 255) to (0, 1)
img_final = img_final.unsqueeze(0) # add batch dimension
ori_shape = img_o.shape
final_shape = img_final.shape[2:]
inputs = {"image": img_final, "height": height, "width": width, "ori_shape": ori_shape, "final_shape": final_shape}
t1 = torch_utils.time_synchronized()
grad_cam = grad_CAM.GradCAM(model, layer_name, ori_shape, final_shape)
mask, box, class_id = grad_cam(inputs) # cam mask
image_dict = {}
img = img_o[..., ::-1]
x1, y1, x2, y2 = box
image_dict['predict_box'] = img[y1:y2, x1:x2]
image_cam, image_dict['heatmap'] = models.gen_cam(img[y1:y2, x1:x2], mask)
models.save_image(image_dict, "gradCAM")
if __name__ == "__main__":
main()
| [
"models.save_image",
"grad_CAM.GradCAM",
"json.load",
"build_utils.img_utils.letterbox",
"models.get_last_conv_name",
"torch.load",
"os.path.exists",
"models.gen_cam",
"build_utils.torch_utils.time_synchronized",
"models.Darknet",
"cv2.imread",
"torch.cuda.is_available",
"numpy.ascontiguousa... | [((676, 695), 'os.path.exists', 'os.path.exists', (['cfg'], {}), '(cfg)\n', (690, 695), False, 'import os\n'), ((750, 773), 'os.path.exists', 'os.path.exists', (['weights'], {}), '(weights)\n', (764, 773), False, 'import os\n'), ((836, 861), 'os.path.exists', 'os.path.exists', (['json_path'], {}), '(json_path)\n', (850, 861), False, 'import os\n'), ((923, 947), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (937, 947), False, 'import os\n'), ((1053, 1073), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1062, 1073), False, 'import json\n'), ((1262, 1284), 'models.Darknet', 'Darknet', (['cfg', 'img_size'], {}), '(cfg, img_size)\n', (1269, 1284), False, 'from models import Darknet\n'), ((1407, 1439), 'models.get_last_conv_name', 'models.get_last_conv_name', (['model'], {}), '(model)\n', (1432, 1439), False, 'import models\n'), ((1475, 1495), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1485, 1495), False, 'import cv2\n'), ((1793, 1822), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img_rgb'], {}), '(img_rgb)\n', (1813, 1822), True, 'import numpy as np\n'), ((2342, 2373), 'build_utils.torch_utils.time_synchronized', 'torch_utils.time_synchronized', ([], {}), '()\n', (2371, 2373), False, 'from build_utils import torch_utils\n'), ((2391, 2450), 'grad_CAM.GradCAM', 'grad_CAM.GradCAM', (['model', 'layer_name', 'ori_shape', 'final_shape'], {}), '(model, layer_name, ori_shape, final_shape)\n', (2407, 2450), False, 'import grad_CAM\n'), ((2674, 2713), 'models.gen_cam', 'models.gen_cam', (['img[y1:y2, x1:x2]', 'mask'], {}), '(img[y1:y2, x1:x2], mask)\n', (2688, 2713), False, 'import models\n'), ((2719, 2759), 'models.save_image', 'models.save_image', (['image_dict', '"""gradCAM"""'], {}), "(image_dict, 'gradCAM')\n", (2736, 2759), False, 'import models\n'), ((1574, 1650), 'build_utils.img_utils.letterbox', 'img_utils.letterbox', (['img_o'], {'new_shape': 'input_size', 'auto': '(True)', 'color': '(0, 0, 0)'}), '(img_o, new_shape=input_size, auto=True, color=(0, 0, 0))\n', (1593, 1650), False, 'from build_utils import img_utils\n'), ((1211, 1236), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1234, 1236), False, 'import torch\n'), ((1311, 1351), 'torch.load', 'torch.load', (['weights'], {'map_location': 'device'}), '(weights, map_location=device)\n', (1321, 1351), False, 'import torch\n'), ((1951, 1976), 'torch.from_numpy', 'torch.from_numpy', (['img_con'], {}), '(img_con)\n', (1967, 1976), False, 'import torch\n')] |
from __future__ import print_function
import os
try:
import cPickle # python 2
except:
import pickle as cPickle # python 3 (cPickle not existing any more)
import numpy as np
from scipy.spatial import cKDTree
from geoval.core import GeoData
from geoval.core.mapping import SingleMap
"""
Minimum trend analysis
"""
class MintrendPlot(object):
"""
class for plotting minimum trend analysis results
Note that the minimum trend analysis look up table
needs to be preprocessed using a spearate program
and that also the STDV and MEAN (timstd, timmean)
are required for plotting final data
"""
def __init__(self, lutname, backend='imshow', proj_prop=None):
"""
Parameters
----------
lutname : str
name of LUT file derived from mintrend analysis
backend : str
backend used for plotting ['imshow','basemap']
"""
self._lutname = lutname
self.backend=backend
self.proj_prop = proj_prop
self._read_lut()
def _read_lut(self):
assert os.path.exists(self._lutname)
d = cPickle.load(open(self._lutname,'r'))
# generate array of indices and then use only LUT values that are not NaN
MEAN, PHIS, CVS = np.meshgrid(d['means'],d['phis'],d['cvs'])
msk = ~np.isnan(d['res'])
self.cvs = CVS[msk].flatten()
self.means = MEAN[msk].flatten()
self.phis = PHIS[msk].flatten()
self.lut = d['res'][msk].flatten()
def _interpolate_fast(self, tcvs, tmeans, tphis):
#http://stackoverflow.com/questions/29974122/interpolating-data-from-a-look-up-table
# these are the target coordinates
tcvs = np.asarray(tcvs)
tmeans = np.asarray(tmeans)
tphis = np.asarray(tphis)
coords = np.vstack((tphis, tmeans, tcvs)).T
xyz = np.vstack((self.phis, self.means, self.cvs)).T
val = self.lut
tree = cKDTree(xyz)
dist, ind = tree.query(coords, k=2) # take 2 closest LUT points
# the problem can occur that an invali index is returned.
# if this corresponds to the boundary, then this is a hacked fix
# this seems to be a proble in the cKDTree
ind[ind==len(val)]=len(val)-1
print('ncoords: ', coords.shape)
print('indx: ', ind.min(), ind.max(), ind.shape)
print('val: ', val.min(), val.max(), val.shape)
d1,d2 = dist.T
v1,v2 = val[ind].T
v = (d1)/(d1 + d2)*(v2 - v1) + v1
return v
#~ def _interpolate_slow(self, tcvs, tmeans, tphis, method='linear'):
#~ """
#~ interpolate LUT to target using griddata
#~ for this vectors of the variation coefficient
#~ and mean is required
#~
#~ tcvs : ndarray
#~ list of variations of coefficient to interpolate to
#~ tmeans : ndarray
#~ list of mean values to interpolate to
#~ tphis : ndarray
#~ list of correlation values (phi)
#~ method : str
#~ methods used for interpolation ['cubic','linear']
#~ for further details see documentation of griddata routine
#~ """
#~ tcvs = np.asarray(tcvs)
#~ tmeans = np.asarray(tmeans)
#~ tphis = np.asarray(tphis)
#~
#~
#~
#~ return griddata((self.phis,self.means,self.cvs), self.lut, (tphis[:,None],tmeans[None,:],tcvs[:,None]), method=method)
def _calc_cv(self, PHI, SLOPE, SIG_R, ME, var_t):
"""
calculate coefficient of variance
and return a Data object
note, that the slope and tvar units need to be consistent (e.g. need to be valid per YEAR)
see Albedo_mintrend.ipynb for checking
"""
# now calculate CV for deseasonalized timeseries
# CV = sig_y / mu_y with
# sig_y = sqrt(b**2 * var(t) + sig_r^2 / (1-phi**2))
TMP1 = PHI.mul(PHI).mulc(-1.).addc(1.)
RIGHT = SIG_R.mul(SIG_R).div(TMP1)
LEFT = SLOPE.mul(SLOPE).mulc(var_t)
CV = LEFT.add(RIGHT)
CV.data = np.sqrt(CV.data)
CV = CV.div(ME)
self.CV = CV
def map_trends(self, SIG_R, ME, PHI, SLOPE, var_t, force=False, time_unit=None):
"""
STD : GeoData
ME : GeoData
"""
assert time_unit is not None, 'Time unit needs to be provided'
if SIG_R.ndim == 3:
if SIG_R.nt == 1:
SIG_R.data = SIG_R.data[0,:,:]
else:
assert False
if ME.ndim == 3:
if ME.nt == 1:
ME.data = ME.data[0,:,:]
else:
assert False
if PHI.ndim == 3:
if PHI.nt == 1:
PHI.data = PHI.data[0,:,:]
else:
assert False
if SLOPE.ndim == 3:
if SLOPE.nt == 1:
SLOPE.data = SLOPE.data[0,:,:]
else:
assert False
assert SIG_R.data.ndim == 2
assert ME.data.ndim == 2
assert PHI.data.ndim == 2
assert SLOPE.data.ndim == 2
# coefficient of variation
self._calc_cv(PHI, SLOPE, SIG_R, ME, var_t)
print(self.CV.min, self.CV.max)
# mask for valid pixels
msk = ~self.CV.data.mask
# vectors which correpond to data that should be interpolated to
cvs = self.CV.data[msk].flatten()
means = ME.data[msk].flatten()
phis = PHI.data[msk].flatten()
print('NPixels: ', len(means))
if hasattr(self, 'X'):
if force:
do_calc = True
else:
do_calc = False
else:
do_calc = True
if do_calc:
# interpolation
z = self._interpolate_fast(cvs, means, phis)
# map back to original geometry
tmp = np.ones(ME.nx*ME.ny)*np.nan
tmp[msk.flatten()] = z
tmp = tmp.reshape((ME.ny,ME.nx))
X = GeoData(None, None)
X._init_sample_object(nt=None, ny=ME.ny, nx=ME.nx, gaps=False)
X.data = np.ma.array(tmp, mask=tmp != tmp)
self.X = X
self.X.lon = ME.lon*1.
self.X.lat = ME.lat*1.
self.X.unit = 'trend / ' + time_unit
self.X._trend_unit = time_unit
def _get_temporal_scaling_factor(self):
if self.X._trend_unit == 'year':
scal = 10.
else:
assert False, 'Unknown temporal unit. Automatic rescaling not possible'
return scal
def draw_trend_map(self, decade=True, ax=None, **kwargs):
if decade: # show trends per decade
scal= self._get_temporal_scaling_factor()
X = self.X.mulc(scal)
X.unit = 'trend / decade'
else:
X = self.X
self.M = SingleMap(X, ax=ax, backend=self.backend)
self.M.plot(proj_prop=self.proj_prop, **kwargs)
def draw_cv_map(self, ax=None, **kwargs):
"""
show map of CV, which needs to be calculated before
"""
self.Mcv = SingleMap(self.CV, ax=ax, backend=self.backend)
self.Mcv.plot(proj_prop=self.proj_prop, **kwargs)
def draw_relative_trend(self, M, decade=True, ax=None, **kwargs):
"""
generate a relative trend map
the trend mapping needs to have been performed already
Parameters
==========
M : GeoData
mean value
"""
assert ax is not None
assert hasattr(self, 'X'), 'mintrend has not been preprocessed'
scal= self._get_temporal_scaling_factor()
X = self.X.mulc(scal).div(M).mulc(100.)
X.unit = '% / decade'
self.Mr = SingleMap(X, ax=ax)
self.Mr.plot(proj_prop=self.proj_prop, **kwargs)
| [
"numpy.meshgrid",
"numpy.asarray",
"os.path.exists",
"numpy.ones",
"numpy.isnan",
"geoval.core.GeoData",
"numpy.ma.array",
"scipy.spatial.cKDTree",
"geoval.core.mapping.SingleMap",
"numpy.vstack",
"numpy.sqrt"
] | [((1086, 1115), 'os.path.exists', 'os.path.exists', (['self._lutname'], {}), '(self._lutname)\n', (1100, 1115), False, 'import os\n'), ((1276, 1320), 'numpy.meshgrid', 'np.meshgrid', (["d['means']", "d['phis']", "d['cvs']"], {}), "(d['means'], d['phis'], d['cvs'])\n", (1287, 1320), True, 'import numpy as np\n'), ((1724, 1740), 'numpy.asarray', 'np.asarray', (['tcvs'], {}), '(tcvs)\n', (1734, 1740), True, 'import numpy as np\n'), ((1758, 1776), 'numpy.asarray', 'np.asarray', (['tmeans'], {}), '(tmeans)\n', (1768, 1776), True, 'import numpy as np\n'), ((1793, 1810), 'numpy.asarray', 'np.asarray', (['tphis'], {}), '(tphis)\n', (1803, 1810), True, 'import numpy as np\n'), ((1965, 1977), 'scipy.spatial.cKDTree', 'cKDTree', (['xyz'], {}), '(xyz)\n', (1972, 1977), False, 'from scipy.spatial import cKDTree\n'), ((4098, 4114), 'numpy.sqrt', 'np.sqrt', (['CV.data'], {}), '(CV.data)\n', (4105, 4114), True, 'import numpy as np\n'), ((6868, 6909), 'geoval.core.mapping.SingleMap', 'SingleMap', (['X'], {'ax': 'ax', 'backend': 'self.backend'}), '(X, ax=ax, backend=self.backend)\n', (6877, 6909), False, 'from geoval.core.mapping import SingleMap\n'), ((7116, 7163), 'geoval.core.mapping.SingleMap', 'SingleMap', (['self.CV'], {'ax': 'ax', 'backend': 'self.backend'}), '(self.CV, ax=ax, backend=self.backend)\n', (7125, 7163), False, 'from geoval.core.mapping import SingleMap\n'), ((7750, 7769), 'geoval.core.mapping.SingleMap', 'SingleMap', (['X'], {'ax': 'ax'}), '(X, ax=ax)\n', (7759, 7769), False, 'from geoval.core.mapping import SingleMap\n'), ((1334, 1352), 'numpy.isnan', 'np.isnan', (["d['res']"], {}), "(d['res'])\n", (1342, 1352), True, 'import numpy as np\n'), ((1829, 1861), 'numpy.vstack', 'np.vstack', (['(tphis, tmeans, tcvs)'], {}), '((tphis, tmeans, tcvs))\n', (1838, 1861), True, 'import numpy as np\n'), ((1879, 1923), 'numpy.vstack', 'np.vstack', (['(self.phis, self.means, self.cvs)'], {}), '((self.phis, self.means, self.cvs))\n', (1888, 1923), True, 'import numpy as np\n'), ((6015, 6034), 'geoval.core.GeoData', 'GeoData', (['None', 'None'], {}), '(None, None)\n', (6022, 6034), False, 'from geoval.core import GeoData\n'), ((6131, 6164), 'numpy.ma.array', 'np.ma.array', (['tmp'], {'mask': '(tmp != tmp)'}), '(tmp, mask=tmp != tmp)\n', (6142, 6164), True, 'import numpy as np\n'), ((5890, 5912), 'numpy.ones', 'np.ones', (['(ME.nx * ME.ny)'], {}), '(ME.nx * ME.ny)\n', (5897, 5912), True, 'import numpy as np\n')] |
"""Clone optimizer definion and utilities."""
import numpy as np
from abc import ABCMeta, abstractmethod
def get_objective(number_of_mutations, B):
"""Get the objective function."""
return lambda x: np.linalg.norm(
np.outer(
x[:number_of_mutations],
x[number_of_mutations:]
) - B
)
class CloneOptimizer(object, metaclass=ABCMeta):
"""Clone optimizer class."""
def __init__(self, clone, estimates, delta_lb=0.0, delta_ub=4.0):
self.B = (
clone.mutations_df[clone.samples_info_df['vaf']].values *
clone.mutations_df[clone.samples_info_df['cn']].values /
clone.samples_info_df['purity'].values
)
self.number_of_mutations = self.B.shape[0]
self.number_of_biopsies = self.B.shape[1]
self.mutations = clone.mutations_df.index
self.samples = estimates.columns
self._problem_setup(delta_lb, delta_ub)
def _problem_setup(self, delta_lb, delta_ub):
number_of_biopsies = self.B.shape[1]
self.bounds = tuple(
[(delta_lb, delta_ub) for i in range(self.number_of_mutations)] +
[(0, 1) for i in range(number_of_biopsies)]
)
def optimize(self, **kwargs):
pass
| [
"numpy.outer"
] | [((233, 291), 'numpy.outer', 'np.outer', (['x[:number_of_mutations]', 'x[number_of_mutations:]'], {}), '(x[:number_of_mutations], x[number_of_mutations:])\n', (241, 291), True, 'import numpy as np\n')] |
import numpy as np
from tqdm import tqdm_notebook as tqdm
import torch
from utils.pytorch.utils.common import get_batch_info
def validate_model(model, criterion, loss_fn, metric_fn, val_dataloader):
n_val_obs, val_batch_size, val_batch_per_epoch = get_batch_info(val_dataloader)
total_loss = np.zeros(val_batch_per_epoch)
total_metric = np.zeros(val_batch_per_epoch)
model = model.eval()
t = tqdm(enumerate(val_dataloader), total=val_batch_per_epoch)
with torch.no_grad():
for idx, data in t:
loss = loss_fn(model, criterion, data)
metric = metric_fn(model, data)
total_loss[idx] = loss
total_metric[idx] = metric
return total_loss.mean(), total_metric.mean()
| [
"torch.no_grad",
"numpy.zeros",
"utils.pytorch.utils.common.get_batch_info"
] | [((255, 285), 'utils.pytorch.utils.common.get_batch_info', 'get_batch_info', (['val_dataloader'], {}), '(val_dataloader)\n', (269, 285), False, 'from utils.pytorch.utils.common import get_batch_info\n'), ((303, 332), 'numpy.zeros', 'np.zeros', (['val_batch_per_epoch'], {}), '(val_batch_per_epoch)\n', (311, 332), True, 'import numpy as np\n'), ((352, 381), 'numpy.zeros', 'np.zeros', (['val_batch_per_epoch'], {}), '(val_batch_per_epoch)\n', (360, 381), True, 'import numpy as np\n'), ((483, 498), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (496, 498), False, 'import torch\n')] |
import argparse
import logging
import cv2 as cv
import librosa
import numpy as np
import torch
from config import sample_rate
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(epoch, epochs_since_improvement, model, metric_fc, optimizer, acc, is_best):
state = {'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'acc': acc,
'model': model,
'metric_fc': metric_fc,
'optimizer': optimizer}
filename = 'checkpoint.tar'
torch.save(state, filename)
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state, 'BEST_checkpoint.tar')
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def accuracy(scores, targets, k=1):
batch_size = targets.size(0)
_, ind = scores.topk(k, 1, True, True)
correct = ind.eq(targets.view(-1, 1).expand_as(ind))
correct_total = correct.view(-1).float().sum() # 0D tensor
return correct_total.item() * (100.0 / batch_size)
def parse_args():
parser = argparse.ArgumentParser(description='Speaker Embeddings')
# Training config
parser.add_argument('--epochs', default=1000, type=int, help='Number of maximum epochs')
parser.add_argument('--lr', default=1e-3, type=float, help='Init learning rate')
parser.add_argument('--l2', default=1e-6, type=float, help='weight decay (L2)')
parser.add_argument('--batch-size', default=32, type=int, help='Batch size')
parser.add_argument('--num-workers', default=4, type=int, help='Number of workers to generate minibatch')
# optimizer
parser.add_argument('--margin-m', type=float, default=0.2, help='angular margin m')
parser.add_argument('--margin-s', type=float, default=10.0, help='feature scale s')
parser.add_argument('--emb-size', type=int, default=512, help='embedding length')
parser.add_argument('--easy-margin', type=bool, default=False, help='easy margin')
parser.add_argument('--weight-decay', type=float, default=0.0, help='weight decay')
parser.add_argument('--mom', type=float, default=0.9, help='momentum')
parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint')
args = parser.parse_args()
return args
def get_logger():
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def ensure_folder(folder):
import os
if not os.path.isdir(folder):
os.mkdir(folder)
def pad_list(xs, pad_value):
# From: espnet/src/nets/e2e_asr_th.py: pad_list()
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, :xs[i].size(0)] = xs[i]
return pad
# [-0.5, 0.5]
def normalize(yt):
yt_max = np.max(yt)
yt_min = np.min(yt)
a = 1.0 / (yt_max - yt_min)
b = -(yt_max + yt_min) / (2 * (yt_max - yt_min))
yt = yt * a + b
return yt
# Acoustic Feature Extraction
# Parameters
# - input file : str, audio file path
# - feature : str, fbank or mfcc
# - dim : int, dimension of feature
# - cmvn : bool, apply CMVN on feature
# - window_size : int, window size for FFT (ms)
# - stride : int, window stride for FFT
# - save_feature: str, if given, store feature to the path and return len(feature)
# Return
# acoustic features with shape (time step, dim)
def extract_feature(input_file, feature='fbank', dim=40, cmvn=True, delta=False, delta_delta=False,
window_size=25, stride=10, save_feature=None):
y, sr = librosa.load(input_file, sr=sample_rate)
yt, _ = librosa.effects.trim(y, top_db=20)
yt = normalize(yt)
ws = int(sr * 0.001 * window_size)
st = int(sr * 0.001 * stride)
if feature == 'fbank': # log-scaled
feat = librosa.feature.melspectrogram(y=yt, sr=sr, n_mels=dim, n_fft=ws, hop_length=st)
feat = np.log(feat + 1e-6)
elif feature == 'mfcc':
feat = librosa.feature.mfcc(y=yt, sr=sr, n_mfcc=dim, n_mels=26, n_fft=ws, hop_length=st)
feat[0] = librosa.feature.rmse(yt, hop_length=st, frame_length=ws)
else:
raise ValueError('Unsupported Acoustic Feature: ' + feature)
feat = [feat]
if delta:
feat.append(librosa.feature.delta(feat[0]))
if delta_delta:
feat.append(librosa.feature.delta(feat[0], order=2))
feat = np.concatenate(feat, axis=0)
if cmvn:
feat = (feat - feat.mean(axis=1)[:, np.newaxis]) / (feat.std(axis=1) + 1e-16)[:, np.newaxis]
if save_feature is not None:
tmp = np.swapaxes(feat, 0, 1).astype('float32')
np.save(save_feature, tmp)
return len(tmp)
else:
return np.swapaxes(feat, 0, 1).astype('float32')
def build_LFR_features(inputs, m, n):
"""
Actually, this implements stacking frames and skipping frames.
if m = 1 and n = 1, just return the origin features.
if m = 1 and n > 1, it works like skipping.
if m > 1 and n = 1, it works like stacking but only support right frames.
if m > 1 and n > 1, it works like LFR.
Args:
inputs_batch: inputs is T x D np.ndarray
m: number of frames to stack
n: number of frames to skip
"""
# LFR_inputs_batch = []
# for inputs in inputs_batch:
LFR_inputs = []
T = inputs.shape[0]
T_lfr = int(np.ceil(T / n))
for i in range(T_lfr):
if m <= T - i * n:
LFR_inputs.append(np.hstack(inputs[i * n:i * n + m]))
else: # process last LFR frame
num_padding = m - (T - i * n)
frame = np.hstack(inputs[i * n:])
for _ in range(num_padding):
frame = np.hstack((frame, inputs[-1]))
LFR_inputs.append(frame)
return np.vstack(LFR_inputs)
def theta_dist():
from test import image_name
img = cv.imread(image_name)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img = img / 255.
return img
| [
"os.mkdir",
"argparse.ArgumentParser",
"logging.Formatter",
"librosa.feature.melspectrogram",
"librosa.feature.mfcc",
"cv2.cvtColor",
"librosa.feature.rmse",
"numpy.max",
"numpy.swapaxes",
"librosa.effects.trim",
"numpy.save",
"numpy.ceil",
"librosa.feature.delta",
"logging.StreamHandler",... | [((910, 937), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (920, 937), False, 'import torch\n'), ((2354, 2411), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Speaker Embeddings"""'}), "(description='Speaker Embeddings')\n", (2377, 2411), False, 'import argparse\n'), ((3579, 3598), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3596, 3598), False, 'import logging\n'), ((3613, 3636), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3634, 3636), False, 'import logging\n'), ((3653, 3713), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s \t%(message)s"""'], {}), "('%(asctime)s %(levelname)s \\t%(message)s')\n", (3670, 3713), False, 'import logging\n'), ((4288, 4298), 'numpy.max', 'np.max', (['yt'], {}), '(yt)\n', (4294, 4298), True, 'import numpy as np\n'), ((4312, 4322), 'numpy.min', 'np.min', (['yt'], {}), '(yt)\n', (4318, 4322), True, 'import numpy as np\n'), ((5098, 5138), 'librosa.load', 'librosa.load', (['input_file'], {'sr': 'sample_rate'}), '(input_file, sr=sample_rate)\n', (5110, 5138), False, 'import librosa\n'), ((5151, 5185), 'librosa.effects.trim', 'librosa.effects.trim', (['y'], {'top_db': '(20)'}), '(y, top_db=20)\n', (5171, 5185), False, 'import librosa\n'), ((5912, 5940), 'numpy.concatenate', 'np.concatenate', (['feat'], {'axis': '(0)'}), '(feat, axis=0)\n', (5926, 5940), True, 'import numpy as np\n'), ((7281, 7302), 'numpy.vstack', 'np.vstack', (['LFR_inputs'], {}), '(LFR_inputs)\n', (7290, 7302), True, 'import numpy as np\n'), ((7365, 7386), 'cv2.imread', 'cv.imread', (['image_name'], {}), '(image_name)\n', (7374, 7386), True, 'import cv2 as cv\n'), ((7397, 7431), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (7408, 7431), True, 'import cv2 as cv\n'), ((1072, 1112), 'torch.save', 'torch.save', (['state', '"""BEST_checkpoint.tar"""'], {}), "(state, 'BEST_checkpoint.tar')\n", (1082, 1112), False, 'import torch\n'), ((3887, 3908), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (3900, 3908), False, 'import os\n'), ((3918, 3934), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (3926, 3934), False, 'import os\n'), ((5338, 5423), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'yt', 'sr': 'sr', 'n_mels': 'dim', 'n_fft': 'ws', 'hop_length': 'st'}), '(y=yt, sr=sr, n_mels=dim, n_fft=ws, hop_length=st\n )\n', (5368, 5423), False, 'import librosa\n'), ((5434, 5454), 'numpy.log', 'np.log', (['(feat + 1e-06)'], {}), '(feat + 1e-06)\n', (5440, 5454), True, 'import numpy as np\n'), ((6152, 6178), 'numpy.save', 'np.save', (['save_feature', 'tmp'], {}), '(save_feature, tmp)\n', (6159, 6178), True, 'import numpy as np\n'), ((6873, 6887), 'numpy.ceil', 'np.ceil', (['(T / n)'], {}), '(T / n)\n', (6880, 6887), True, 'import numpy as np\n'), ((5497, 5582), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'yt', 'sr': 'sr', 'n_mfcc': 'dim', 'n_mels': '(26)', 'n_fft': 'ws', 'hop_length': 'st'}), '(y=yt, sr=sr, n_mfcc=dim, n_mels=26, n_fft=ws,\n hop_length=st)\n', (5517, 5582), False, 'import librosa\n'), ((5597, 5653), 'librosa.feature.rmse', 'librosa.feature.rmse', (['yt'], {'hop_length': 'st', 'frame_length': 'ws'}), '(yt, hop_length=st, frame_length=ws)\n', (5617, 5653), False, 'import librosa\n'), ((5787, 5817), 'librosa.feature.delta', 'librosa.feature.delta', (['feat[0]'], {}), '(feat[0])\n', (5808, 5817), False, 'import librosa\n'), ((5860, 5899), 'librosa.feature.delta', 'librosa.feature.delta', (['feat[0]'], {'order': '(2)'}), '(feat[0], order=2)\n', (5881, 5899), False, 'import librosa\n'), ((7111, 7136), 'numpy.hstack', 'np.hstack', (['inputs[i * n:]'], {}), '(inputs[i * n:])\n', (7120, 7136), True, 'import numpy as np\n'), ((6102, 6125), 'numpy.swapaxes', 'np.swapaxes', (['feat', '(0)', '(1)'], {}), '(feat, 0, 1)\n', (6113, 6125), True, 'import numpy as np\n'), ((6228, 6251), 'numpy.swapaxes', 'np.swapaxes', (['feat', '(0)', '(1)'], {}), '(feat, 0, 1)\n', (6239, 6251), True, 'import numpy as np\n'), ((6973, 7007), 'numpy.hstack', 'np.hstack', (['inputs[i * n:i * n + m]'], {}), '(inputs[i * n:i * n + m])\n', (6982, 7007), True, 'import numpy as np\n'), ((7202, 7232), 'numpy.hstack', 'np.hstack', (['(frame, inputs[-1])'], {}), '((frame, inputs[-1]))\n', (7211, 7232), True, 'import numpy as np\n')] |
import numpy as np
sleep = 12
study = 4
stress = 10
xs = np.array([[sleep],
[study],
[stress]])
#sleep, study, stress
ws = np.array([0.5, 0.3, -0.2]).reshape(3, 1)
p = xs.T.dot(ws)
s = 1/(1+np.exp(-p))
prediction = s*100
print(prediction)
| [
"numpy.array",
"numpy.exp"
] | [((59, 97), 'numpy.array', 'np.array', (['[[sleep], [study], [stress]]'], {}), '([[sleep], [study], [stress]])\n', (67, 97), True, 'import numpy as np\n'), ((156, 182), 'numpy.array', 'np.array', (['[0.5, 0.3, -0.2]'], {}), '([0.5, 0.3, -0.2])\n', (164, 182), True, 'import numpy as np\n'), ((224, 234), 'numpy.exp', 'np.exp', (['(-p)'], {}), '(-p)\n', (230, 234), True, 'import numpy as np\n')] |
import os
from numpy.distutils.core import setup
DESCRIPTION = "Python implementation of Friedman's Supersmoother"
LONG_DESCRIPTION = """
SuperSmoother in Python
=======================
This is an efficient implementation of Friedman's SuperSmoother based in
Python. It makes use of numpy for fast numerical computation.
For more information, see the github project page:
http://github.com/jakevdp/supersmoother
"""
NAME = "supersmoother"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
URL = 'http://github.com/jakevdp/supersmoother'
DOWNLOAD_URL = 'http://github.com/jakevdp/supersmoother'
LICENSE = 'BSD 3-clause'
import supersmoother
VERSION = supersmoother.__version__
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('supersmoother')
return config
setup(configuration=configuration,
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['supersmoother',
'supersmoother.tests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
)
| [
"numpy.distutils.core.setup",
"numpy.distutils.misc_util.Configuration",
"os.path.exists",
"os.remove"
] | [((1304, 2029), 'numpy.distutils.core.setup', 'setup', ([], {'configuration': 'configuration', 'name': 'NAME', 'version': 'VERSION', 'description': 'DESCRIPTION', 'long_description': 'LONG_DESCRIPTION', 'author': 'AUTHOR', 'author_email': 'AUTHOR_EMAIL', 'maintainer': 'MAINTAINER', 'maintainer_email': 'MAINTAINER_EMAIL', 'url': 'URL', 'download_url': 'DOWNLOAD_URL', 'license': 'LICENSE', 'packages': "['supersmoother', 'supersmoother.tests']", 'classifiers': "['Development Status :: 4 - Beta', 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License', 'Natural Language :: English',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4']"}), "(configuration=configuration, name=NAME, version=VERSION, description=\n DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR,\n author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=\n MAINTAINER_EMAIL, url=URL, download_url=DOWNLOAD_URL, license=LICENSE,\n packages=['supersmoother', 'supersmoother.tests'], classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License', 'Natural Language :: English',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4'])\n", (1309, 2029), False, 'from numpy.distutils.core import setup\n'), ((784, 810), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (798, 810), False, 'import os\n'), ((912, 957), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (925, 957), False, 'from numpy.distutils.misc_util import Configuration\n'), ((820, 841), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (829, 841), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
def findSymbolOutOfPara(symbol, content):
num = -1
ignore = False
while num < len(content)-1:
num += 1
if content[num]==')':
ignore = False
continue
if ignore:
continue
if content[num]=='(':
ignore = True
continue
if symbol == content[num]:
return num
return -1
trygEq = input("Give me please, a sin/cos equation to sketch")
pointToCut = findSymbolOutOfPara('+',trygEq)
if pointToCut!=-1:
trygEqExplode = [trygEq[0:pointToCut],trygEq[pointToCut+1:]]
else:
trygEqExplode = [trygEq]
yShift = 0
mainPart = ""
if len(trygEqExplode) > 1:
if trygEqExplode[0].find('s')!=-1 or trygEqExplode[0].find('c')!=-1 or trygEqExplode[0].find('t')!=-1 or trygEqExplode[0].find('l')!=-1:
mainPart = trygEqExplode[0]
yShift = int(trygEqExplode[1])
else:
mainPart = trygEqExplode[1]
yShift = int(trygEqExplode[0])
else:
mainPart = trygEq
print(mainPart)
pointToCut = findSymbolOutOfPara('*',mainPart)
if pointToCut!=-1:
mainProducts = [mainPart[0:pointToCut],mainPart[pointToCut+1:]]
else:
mainProducts = [mainPart]
multiplier = 1
mainFunction = ""
print(mainProducts[1])
if len(mainProducts) > 1:
if mainProducts[0].find('s')!=-1 or mainProducts[0].find('c')!=-1 or mainProducts[0].find('t')!=-1 or mainProducts[0].find('l')!=-1 :
mainFunction = mainProducts[0]
multiplier = int(mainProducts[1])
else:
mainFunction = mainProducts[1]
multiplier = int(mainProducts[0])
else:
mainFunction = mainProducts[0]
print(mainFunction)
insideFunction = mainFunction.split("(")[1]
insideFunction = insideFunction[:-1]
insideFunctionSplit = insideFunction.split('+')
addCoefficient = 0
multiCoefficient = 1
if len(insideFunctionSplit) > 1:
if insideFunctionSplit[0].find('x')!=-1:
addCoefficient = int(insideFunctionSplit[1])
if insideFunctionSplit[0][:-2] != '':
multiCoefficient = int(insideFunctionSplit[0][:-2])
else:
addCoefficient = int(insideFunctionSplit[0])
if insideFunctionSplit[1][:-2] != '':
multiCoefficient = int(insideFunctionSplit[1][:-2])
xValues = np.linspace(-12 * np.pi, 12 * np.pi, 1000)
if mainFunction[0] == 'c':
trigValues = np.cos(xValues * multiCoefficient + addCoefficient)
elif mainFunction[0] == 't':
trigValues = np.tan(xValues * multiCoefficient + addCoefficient)
elif mainFunction[0] == 'l':
trigValues = np.log2(xValues * multiCoefficient + addCoefficient)
else:
trigValues = np.sin(xValues * multiCoefficient + addCoefficient)
trigValues *= multiplier
trigValues += yShift
plt.figure(1)
plt.subplot(2,2,1)
plt.plot(xValues, trigValues, alpha=0.7, color="#A14491", linestyle="--", linewidth=4)
plt.subplot(2,2,2)
plt.plot(xValues, trigValues, alpha=0.7, color="#A14491", linestyle="--", linewidth=3)
plt.subplot(2,2,3)
plt.plot(xValues, trigValues, alpha=0.7, color="#A14491", linestyle="--", linewidth=2)
plt.figure(2)
plt.ylim(-5,5)
plt.plot(xValues, trigValues,alpha=0.2, color="#519441", linestyle="-", linewidth=1)
plt.plot(xValues, trigValues, alpha=0.2, color="#519441", linestyle="-", linewidth=1)
plt.show() | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.log2",
"matplotlib.pyplot.figure",
"numpy.tan",
"numpy.sin",
"numpy.linspace",
"numpy.cos"
] | [((2295, 2337), 'numpy.linspace', 'np.linspace', (['(-12 * np.pi)', '(12 * np.pi)', '(1000)'], {}), '(-12 * np.pi, 12 * np.pi, 1000)\n', (2306, 2337), True, 'import numpy as np\n'), ((2754, 2767), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2764, 2767), True, 'import matplotlib.pyplot as plt\n'), ((2768, 2788), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2779, 2788), True, 'import matplotlib.pyplot as plt\n'), ((2787, 2877), 'matplotlib.pyplot.plot', 'plt.plot', (['xValues', 'trigValues'], {'alpha': '(0.7)', 'color': '"""#A14491"""', 'linestyle': '"""--"""', 'linewidth': '(4)'}), "(xValues, trigValues, alpha=0.7, color='#A14491', linestyle='--',\n linewidth=4)\n", (2795, 2877), True, 'import matplotlib.pyplot as plt\n'), ((2874, 2894), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2885, 2894), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2983), 'matplotlib.pyplot.plot', 'plt.plot', (['xValues', 'trigValues'], {'alpha': '(0.7)', 'color': '"""#A14491"""', 'linestyle': '"""--"""', 'linewidth': '(3)'}), "(xValues, trigValues, alpha=0.7, color='#A14491', linestyle='--',\n linewidth=3)\n", (2901, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2980, 3000), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2991, 3000), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3089), 'matplotlib.pyplot.plot', 'plt.plot', (['xValues', 'trigValues'], {'alpha': '(0.7)', 'color': '"""#A14491"""', 'linestyle': '"""--"""', 'linewidth': '(2)'}), "(xValues, trigValues, alpha=0.7, color='#A14491', linestyle='--',\n linewidth=2)\n", (3007, 3089), True, 'import matplotlib.pyplot as plt\n'), ((3086, 3099), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (3096, 3099), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3115), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (3108, 3115), True, 'import matplotlib.pyplot as plt\n'), ((3115, 3204), 'matplotlib.pyplot.plot', 'plt.plot', (['xValues', 'trigValues'], {'alpha': '(0.2)', 'color': '"""#519441"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(xValues, trigValues, alpha=0.2, color='#519441', linestyle='-',\n linewidth=1)\n", (3123, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3201, 3290), 'matplotlib.pyplot.plot', 'plt.plot', (['xValues', 'trigValues'], {'alpha': '(0.2)', 'color': '"""#519441"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(xValues, trigValues, alpha=0.2, color='#519441', linestyle='-',\n linewidth=1)\n", (3209, 3290), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3296, 3298), True, 'import matplotlib.pyplot as plt\n'), ((2382, 2433), 'numpy.cos', 'np.cos', (['(xValues * multiCoefficient + addCoefficient)'], {}), '(xValues * multiCoefficient + addCoefficient)\n', (2388, 2433), True, 'import numpy as np\n'), ((2480, 2531), 'numpy.tan', 'np.tan', (['(xValues * multiCoefficient + addCoefficient)'], {}), '(xValues * multiCoefficient + addCoefficient)\n', (2486, 2531), True, 'import numpy as np\n'), ((2578, 2630), 'numpy.log2', 'np.log2', (['(xValues * multiCoefficient + addCoefficient)'], {}), '(xValues * multiCoefficient + addCoefficient)\n', (2585, 2630), True, 'import numpy as np\n'), ((2654, 2705), 'numpy.sin', 'np.sin', (['(xValues * multiCoefficient + addCoefficient)'], {}), '(xValues * multiCoefficient + addCoefficient)\n', (2660, 2705), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .anchor_head_template import AnchorHeadTemplate
from ...ops.iou3d_nms import iou3d_nms_utils
from ..model_utils import meter_utils
class Diversity_Head(AnchorHeadTemplate):
'''
This class will implement the distillation loss.
This class do not support multihead.....
'''
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.cls_stems = nn.Sequential(
nn.Conv2d(
input_channels, 256, 1, 1
),
nn.Conv2d(
256, 256, 3, 1, 1
),
nn.Conv2d(
256, 256, 3, 1, 1
)
)
self.reg_stems = nn.Sequential(
nn.Conv2d(
input_channels, 256, 1, 1
),
nn.Conv2d(
256, 256, 3, 1, 1
),
nn.Conv2d(
256, 256, 3, 1, 1
)
)
self.conv_cls = nn.Conv2d(
256, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
256, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
self.conv_dir_cls = nn.Conv2d(
256,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
post_center_range = [0, -40.0, -5.0, 70.4, 40.0, 5.0]
self.post_center_range = torch.tensor(post_center_range, dtype=torch.float).cuda()
self.knowledge_forward_rect = {}
self.init_weights()
#self.kd_cls_meter = meter_utils.AverageMeter()
#self.kd_fea_meter = meter_utils.AverageMeter()
#self.kd_cls_fea_meter = meter_utils.AverageMeter()
#self.kd_reg_fea_meter = meter_utils.AverageMeter()
#self.kd_fea_total_meter = meter_utils.AverageMeter()
#self.kd_con_meter = meter_utils.AverageMeter()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def nn_distance(self, student_box, teacher_box, iou_thres=0.7):
iou_ground = iou3d_nms_utils.boxes_iou3d_gpu(student_box, teacher_box)
iou1, idx1 = torch.max(iou_ground, dim=1)
iou2, idx2 = torch.max(iou_ground, dim=0)
mask1, mask2 = iou1 > iou_thres, iou2 > iou_thres
# filter box by iou_thresh....
iou_ground = iou_ground[mask1]
iou_ground = iou_ground[:, mask2]
if iou_ground.shape[0] == 0 or iou_ground.shape[1] == 0: # for unlabeled data (some scenes wo cars)
return [None] * 5
iou1, idx1 = torch.max(iou_ground, dim=1)
iou2, idx2 = torch.max(iou_ground, dim=0)
val_box1, val_box2 = student_box[mask1], teacher_box[mask2]
aligned_box1, aligned_box2 = val_box1[idx2], val_box2[idx1]
box1, box2 = self.add_sin_difference(val_box1, aligned_box2)
box_cosistency_loss = self.reg_loss_func(box1, box2)
box_cosistency_loss = box_cosistency_loss.sum() / box_cosistency_loss.shape[0]
return box_cosistency_loss, idx1, idx2, mask1, mask2
def get_normized_weight(self):
box_cls_labels = self.forward_ret_dict['box_cls_labels']
positives = box_cls_labels > 0
reg_weights = positives.type(torch.float32)
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
return reg_weights
def consistency_loss(self):
# First get the decoded predicts of box and cls.
cost_function = torch.nn.SmoothL1Loss()
reg_weight = self.get_normized_weight()
student_cls = self.forward_ret_dict['batch_cls_preds']
student_box = self.forward_ret_dict['batch_box_preds']
student_dir = self.forward_ret_dict['batch_dir_cls_preds']
teacher_cls = self.knowledge_forward_rect['teacher_cls_pred']
teacher_box = self.knowledge_forward_rect['teacher_box_pred']
teacher_dir = self.knowledge_forward_rect['teacher_dir_pred']
batch_sz, height, width, _ = student_dir.shape
student_dir = student_dir.view(batch_sz, height * width * self.num_class * 2, -1)
teacher_dir = teacher_dir.view(batch_sz, height * width * self.num_class * 2, -1)
# Second Get the max cls score of each class in student_cls and teacher_cls
student_score = torch.max(student_cls, dim=-1)[0]
teacher_score = torch.max(teacher_cls, dim=-1)[0]
batch_sz, _, _ = student_cls.shape
batch_box_loss = torch.tensor([0.], dtype=torch.float32).cuda()
batch_cls_loss = torch.tensor([0.], dtype=torch.float32).cuda()
batch_dir_loss = torch.tensor([0.], dtype=torch.float32).cuda()
# Third Get the student_cls mask and teacher_cls mask
for i in range(batch_sz):
student_mask = student_score[i] > 0.3
teacher_mask = teacher_score[i] > 0.3
mask_stu = (student_box[i][:, :3] >= self.post_center_range[:3]).all(1)
mask_stu &= (student_box[i][:, :3] <= self.post_center_range[3:]).all(1)
student_mask &= mask_stu
mask_tea = (teacher_box[i][:, :3] >= self.post_center_range[:3]).all(1)
mask_tea &= (teacher_box[i][:, :3] <= self.post_center_range[3:]).all(1)
teacher_mask &= mask_tea
if student_mask.sum() > 0 and teacher_mask.sum() > 0:
student_cls_filter = student_cls[i][student_mask]
student_box_filter = student_box[i][student_mask]
student_dir_filter = student_dir[i][student_mask]
teacher_cls_filter = teacher_cls[i][teacher_mask]
teacher_box_filter = teacher_box[i][teacher_mask]
teacher_dir_filter = teacher_dir[i][teacher_mask]
# See how many box will be remain....
con_box_loss, idx1, idx2, mask1, mask2 = self.nn_distance(student_box_filter, teacher_box_filter)
if con_box_loss is None:
continue
student_cls_selected = torch.sigmoid(student_cls_filter[mask1])
teacher_cls_selected = torch.sigmoid(teacher_cls_filter[mask2][idx1])
student_dir_selected = F.softmax(student_dir_filter[mask1], dim=-1)
teacher_dir_selected = F.softmax(teacher_dir_filter[mask2][idx1], dim=-1)
batch_box_loss += con_box_loss
batch_cls_loss += cost_function(student_cls_selected, teacher_cls_selected)
batch_dir_loss += cost_function(student_dir_selected, teacher_dir_selected)
consistency_loss = (batch_dir_loss + batch_box_loss + batch_cls_loss ) / batch_sz
tb_dict = {
'consistency_loss': consistency_loss.item(),
'consistency_dir_loss': batch_dir_loss.item(),
'consistency_box_loss': batch_box_loss.item(),
'consistency_cls_loss': batch_cls_loss.item(),
}
return consistency_loss, tb_dict
def get_kd_reg_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.knowledge_forward_rect['box_reg_targets']
box_cls_labels = self.knowledge_forward_rect['box_cls_labels']
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
anchors = torch.cat(self.anchors, dim=-3)
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
box_preds = box_preds.view(batch_size, -1,
box_preds.shape[-1] // self.num_anchors_per_location if not self.use_multihead else
box_preds.shape[-1])
# sin(a - b) = sinacosb-cosasinb
box_preds_sin, reg_targets_sin = self.add_sin_difference(box_preds, box_reg_targets)
loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, weights=reg_weights) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
teach_loss = loc_loss
tb_dict = {
'rpn_kd_loss_loc': loc_loss.item()
}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
dir_logits = box_dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
teach_loss += dir_loss
tb_dict['rpn_kd_loss_dir'] = dir_loss.item()
return teach_loss, tb_dict
def get_kd_cls_loss(self):
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.knowledge_forward_rect['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
tb_dict = {
'rpn_kd_loss_cls': cls_loss.item(),
}
return cls_loss, tb_dict
def get_hint_loss(self):
'''
Reference the NeuroIPS 2020 Richness Feature Knowledge Distillation...
Using the diversity of cls pred to get the richness feature, we called them Diversity Feature.....
'''
cost_function = nn.MSELoss()
#cost_function = nn.KLDivLoss(reduction='batchmean')
student_feature = self.forward_ret_dict['student_feature']
student_mask = F.sigmoid(self.forward_ret_dict['cls_preds'])
teacher_feature = self.knowledge_forward_rect['teacher_feature']
teacher_mask = F.sigmoid(self.knowledge_forward_rect['teacher_cls_pred'])
teacher_mask = teacher_mask.view(teacher_mask.shape[0], self.num_anchors_per_location, 200, 176, teacher_mask.shape[2])
student_mask = student_mask.view(student_mask.shape[0], 200, 176, self.num_anchors_per_location, -1)
mask_filter_teacher, _ = torch.max(teacher_mask, dim=1, keepdim=True)
mask_filter_teacher, _ = torch.max(mask_filter_teacher, dim=-1)
mask_filter_student, _ = torch.max(student_mask, dim=-2, keepdim=True)
mask_filter_student = mask_filter_student.permute(0, 3, 1, 2, 4)
mask_filter_student, _ = torch.max(mask_filter_student, dim=-1)
mask_filter = torch.abs(mask_filter_teacher - mask_filter_student)
teacher_div_feature = mask_filter * teacher_feature
student_div_feature = mask_filter * student_feature
student_cls_temp = self.forward_ret_dict['student_cls_temp'] * mask_filter
student_reg_temp = self.forward_ret_dict['student_reg_temp'] * mask_filter
teacher_cls_temp = self.knowledge_forward_rect['teacher_head_cls_temp'] * mask_filter
teacher_reg_temp = self.knowledge_forward_rect['teacher_head_reg_temp'] * mask_filter
fea_loss = cost_function(student_div_feature, teacher_div_feature)
cls_fea_loss = cost_function(student_cls_temp, teacher_cls_temp)
reg_fea_loss = cost_function(student_reg_temp, teacher_reg_temp)
fea_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['kd_fea_weight']
if self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS.get('kd_cls_weight', None) is not None:
cls_fea_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['kd_cls_weight']
else:
cls_fea_weight = fea_weight
if self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS.get('kd_reg_weight', None) is not None:
reg_fea_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['kd_reg_weight']
else:
reg_fea_weight = fea_weight
feat_totall_loss = fea_loss * fea_weight + cls_fea_loss * cls_fea_weight + reg_fea_loss * reg_fea_weight
fea_loss = fea_loss / student_feature.shape[0]
tb_dict = {
'rpn_spatial_feature_loss': fea_loss.item(),
'rpn_cls_fea_loss': cls_fea_loss.item(),
'rpn_reg_fea_loss': reg_fea_loss.item(),
'rpn_feat_totall_loss': feat_totall_loss.item(),
}
return fea_loss, tb_dict
def get_loss(self):
cls_loss, tb_dict = self.get_cls_layer_loss()
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
kd_cls_loss, tb_dict_cls_teach = self.get_kd_cls_loss()
kd_reg_loss, tb_dict_reg_teach = self.get_kd_reg_loss()
rpn_loss = cls_loss + box_loss + kd_cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['kd_hard_cls_weight'] \
+ kd_reg_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['kd_hard_reg_weight']
tb_dict.update(tb_dict_box)
tb_dict.update(tb_dict_cls_teach)
if self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS.get('kd_fea_weight', None) is not None:
fea_loss, tb_fea_dict = self.get_hint_loss()
rpn_loss += fea_loss
tb_dict.update(tb_fea_dict)
if self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS.get('kd_con_weight', None) is not None:
con_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['kd_con_weight']
con_loss, tb_con_dict = self.consistency_loss()
rpn_loss += con_loss[0] * con_weight
tb_dict.update(tb_con_dict)
tb_dict['rpn_loss'] = rpn_loss.item()
return rpn_loss, tb_dict
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_temp = self.cls_stems(spatial_features_2d)
reg_temp = self.reg_stems(spatial_features_2d)
cls_preds = self.conv_cls(cls_temp)
box_preds = self.conv_box(reg_temp)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(reg_temp)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
### In here we should add some code to assign the target for soft label...
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
teacher_dict = self.assign_targets(
gt_boxes=data_dict['teacher_box']
)
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
self.forward_ret_dict['gt_boxes'] = data_dict['gt_boxes']
self.forward_ret_dict['student_feature'] = spatial_features_2d
self.forward_ret_dict['student_cls_temp'] = cls_temp
self.forward_ret_dict['student_reg_temp'] = reg_temp
self.knowledge_forward_rect.update(teacher_dict)
self.knowledge_forward_rect['teacher_feature'] = data_dict['teacher_feature']
self.knowledge_forward_rect['teacher_cls_pred'] = data_dict['teacher_cls_pred']
self.knowledge_forward_rect['teacher_head_cls_temp'] = data_dict['teacher_cls_feature']
self.knowledge_forward_rect['teacher_head_reg_temp'] = data_dict['teacher_reg_feature']
self.knowledge_forward_rect['teacher_dir_pred'] = data_dict['teacher_dir_pred']
self.knowledge_forward_rect['teacher_box_pred'] = data_dict['teacher_box_pred']
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
self.forward_ret_dict['batch_cls_preds'] = batch_cls_preds
self.forward_ret_dict['batch_box_preds'] = batch_box_preds
self.forward_ret_dict['batch_dir_cls_preds'] = dir_cls_preds
data_dict['cls_preds_normalized'] = False
return data_dict
| [
"torch.nn.MSELoss",
"numpy.log",
"torch.nn.Conv2d",
"torch.cat",
"torch.nn.functional.softmax",
"torch.sigmoid",
"torch.clamp",
"torch.max",
"torch.nn.init.normal_",
"torch.nn.functional.sigmoid",
"torch.nn.SmoothL1Loss",
"torch.abs",
"torch.tensor"
] | [((1446, 1523), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(self.num_anchors_per_location * self.num_class)'], {'kernel_size': '(1)'}), '(256, self.num_anchors_per_location * self.num_class, kernel_size=1)\n', (1455, 1523), True, 'import torch.nn as nn\n'), ((1582, 1673), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(self.num_anchors_per_location * self.box_coder.code_size)'], {'kernel_size': '(1)'}), '(256, self.num_anchors_per_location * self.box_coder.code_size,\n kernel_size=1)\n', (1591, 1673), True, 'import torch.nn as nn\n'), ((1733, 1827), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS)'], {'kernel_size': '(1)'}), '(256, self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,\n kernel_size=1)\n', (1742, 1827), True, 'import torch.nn as nn\n'), ((2585, 2641), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv_box.weight'], {'mean': '(0)', 'std': '(0.001)'}), '(self.conv_box.weight, mean=0, std=0.001)\n', (2600, 2641), True, 'import torch.nn as nn\n'), ((2813, 2841), 'torch.max', 'torch.max', (['iou_ground'], {'dim': '(1)'}), '(iou_ground, dim=1)\n', (2822, 2841), False, 'import torch\n'), ((2863, 2891), 'torch.max', 'torch.max', (['iou_ground'], {'dim': '(0)'}), '(iou_ground, dim=0)\n', (2872, 2891), False, 'import torch\n'), ((3231, 3259), 'torch.max', 'torch.max', (['iou_ground'], {'dim': '(1)'}), '(iou_ground, dim=1)\n', (3240, 3259), False, 'import torch\n'), ((3281, 3309), 'torch.max', 'torch.max', (['iou_ground'], {'dim': '(0)'}), '(iou_ground, dim=0)\n', (3290, 3309), False, 'import torch\n'), ((4003, 4039), 'torch.clamp', 'torch.clamp', (['pos_normalizer'], {'min': '(1.0)'}), '(pos_normalizer, min=1.0)\n', (4014, 4039), False, 'import torch\n'), ((4183, 4206), 'torch.nn.SmoothL1Loss', 'torch.nn.SmoothL1Loss', ([], {}), '()\n', (4204, 4206), False, 'import torch\n'), ((8166, 8202), 'torch.clamp', 'torch.clamp', (['pos_normalizer'], {'min': '(1.0)'}), '(pos_normalizer, min=1.0)\n', (8177, 8202), False, 'import torch\n'), ((8222, 8253), 'torch.cat', 'torch.cat', (['self.anchors'], {'dim': '(-3)'}), '(self.anchors, dim=-3)\n', (8231, 8253), False, 'import torch\n'), ((10564, 10600), 'torch.clamp', 'torch.clamp', (['pos_normalizer'], {'min': '(1.0)'}), '(pos_normalizer, min=1.0)\n', (10575, 10600), False, 'import torch\n'), ((10624, 10660), 'torch.clamp', 'torch.clamp', (['pos_normalizer'], {'min': '(1.0)'}), '(pos_normalizer, min=1.0)\n', (10635, 10660), False, 'import torch\n'), ((11811, 11823), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (11821, 11823), True, 'import torch.nn as nn\n'), ((11980, 12025), 'torch.nn.functional.sigmoid', 'F.sigmoid', (["self.forward_ret_dict['cls_preds']"], {}), "(self.forward_ret_dict['cls_preds'])\n", (11989, 12025), True, 'import torch.nn.functional as F\n'), ((12125, 12183), 'torch.nn.functional.sigmoid', 'F.sigmoid', (["self.knowledge_forward_rect['teacher_cls_pred']"], {}), "(self.knowledge_forward_rect['teacher_cls_pred'])\n", (12134, 12183), True, 'import torch.nn.functional as F\n'), ((12456, 12500), 'torch.max', 'torch.max', (['teacher_mask'], {'dim': '(1)', 'keepdim': '(True)'}), '(teacher_mask, dim=1, keepdim=True)\n', (12465, 12500), False, 'import torch\n'), ((12534, 12572), 'torch.max', 'torch.max', (['mask_filter_teacher'], {'dim': '(-1)'}), '(mask_filter_teacher, dim=-1)\n', (12543, 12572), False, 'import torch\n'), ((12606, 12651), 'torch.max', 'torch.max', (['student_mask'], {'dim': '(-2)', 'keepdim': '(True)'}), '(student_mask, dim=-2, keepdim=True)\n', (12615, 12651), False, 'import torch\n'), ((12758, 12796), 'torch.max', 'torch.max', (['mask_filter_student'], {'dim': '(-1)'}), '(mask_filter_student, dim=-1)\n', (12767, 12796), False, 'import torch\n'), ((12819, 12871), 'torch.abs', 'torch.abs', (['(mask_filter_teacher - mask_filter_student)'], {}), '(mask_filter_teacher - mask_filter_student)\n', (12828, 12871), False, 'import torch\n'), ((926, 962), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(256)', '(1)', '(1)'], {}), '(input_channels, 256, 1, 1)\n', (935, 962), True, 'import torch.nn as nn\n'), ((1006, 1034), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (1015, 1034), True, 'import torch.nn as nn\n'), ((1078, 1106), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (1087, 1106), True, 'import torch.nn as nn\n'), ((1200, 1236), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(256)', '(1)', '(1)'], {}), '(input_channels, 256, 1, 1)\n', (1209, 1236), True, 'import torch.nn as nn\n'), ((1280, 1308), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (1289, 1308), True, 'import torch.nn as nn\n'), ((1352, 1380), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (1361, 1380), True, 'import torch.nn as nn\n'), ((5003, 5033), 'torch.max', 'torch.max', (['student_cls'], {'dim': '(-1)'}), '(student_cls, dim=-1)\n', (5012, 5033), False, 'import torch\n'), ((5061, 5091), 'torch.max', 'torch.max', (['teacher_cls'], {'dim': '(-1)'}), '(teacher_cls, dim=-1)\n', (5070, 5091), False, 'import torch\n'), ((1965, 2015), 'torch.tensor', 'torch.tensor', (['post_center_range'], {'dtype': 'torch.float'}), '(post_center_range, dtype=torch.float)\n', (1977, 2015), False, 'import torch\n'), ((2554, 2575), 'numpy.log', 'np.log', (['((1 - pi) / pi)'], {}), '((1 - pi) / pi)\n', (2560, 2575), True, 'import numpy as np\n'), ((5163, 5203), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'torch.float32'}), '([0.0], dtype=torch.float32)\n', (5175, 5203), False, 'import torch\n'), ((5235, 5275), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'torch.float32'}), '([0.0], dtype=torch.float32)\n', (5247, 5275), False, 'import torch\n'), ((5307, 5347), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'torch.float32'}), '([0.0], dtype=torch.float32)\n', (5319, 5347), False, 'import torch\n'), ((6702, 6742), 'torch.sigmoid', 'torch.sigmoid', (['student_cls_filter[mask1]'], {}), '(student_cls_filter[mask1])\n', (6715, 6742), False, 'import torch\n'), ((6782, 6828), 'torch.sigmoid', 'torch.sigmoid', (['teacher_cls_filter[mask2][idx1]'], {}), '(teacher_cls_filter[mask2][idx1])\n', (6795, 6828), False, 'import torch\n'), ((6868, 6912), 'torch.nn.functional.softmax', 'F.softmax', (['student_dir_filter[mask1]'], {'dim': '(-1)'}), '(student_dir_filter[mask1], dim=-1)\n', (6877, 6912), True, 'import torch.nn.functional as F\n'), ((6952, 7002), 'torch.nn.functional.softmax', 'F.softmax', (['teacher_dir_filter[mask2][idx1]'], {'dim': '(-1)'}), '(teacher_dir_filter[mask2][idx1], dim=-1)\n', (6961, 7002), True, 'import torch.nn.functional as F\n')] |
""" Tests of the command-line interface
:Author: <NAME> <<EMAIL>>
:Date: 2021-08-12
:Copyright: 2021, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from biosimulators_masspy import __main__
from biosimulators_masspy import core
from biosimulators_masspy.data_model import KISAO_ALGORITHM_MAP
from biosimulators_utils.combine import data_model as combine_data_model
from biosimulators_utils.combine.exceptions import CombineArchiveExecutionError
from biosimulators_utils.combine.io import CombineArchiveWriter
from biosimulators_utils.config import get_config
from biosimulators_utils.report import data_model as report_data_model
from biosimulators_utils.report.io import ReportReader
from biosimulators_utils.simulator.exec import exec_sedml_docs_in_archive_with_containerized_simulator
from biosimulators_utils.simulator.specs import gen_algorithms_from_specs
from biosimulators_utils.sedml import data_model as sedml_data_model
from biosimulators_utils.sedml.io import SedmlSimulationWriter
from biosimulators_utils.sedml.utils import append_all_nested_children_to_doc
from biosimulators_utils.warnings import BioSimulatorsWarning
from kisao.exceptions import AlgorithmCannotBeSubstitutedException
from unittest import mock
import copy
import datetime
import dateutil.tz
import json
import mass
import numpy
import numpy.testing
import os
import shutil
import tempfile
import unittest
import yaml
class CliTestCase(unittest.TestCase):
EXAMPLE_MODEL_FILENAME = os.path.join(os.path.dirname(__file__), 'fixtures', 'textbook.xml')
NAMESPACES = {
'sbml': 'http://www.sbml.org/sbml/level3/version1/core'
}
SPECIFICATIONS_FILENAME = os.path.join(os.path.dirname(__file__), '..', 'biosimulators.json')
DOCKER_IMAGE = 'ghcr.io/biosimulators/biosimulators_masspy/masspy:latest'
def setUp(self):
self.dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dirname)
def test_exec_sed_task_successfully(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000019',
changes=[
sedml_data_model.AlgorithmParameterChange(
kisao_id='KISAO_0000209',
new_value='1e-8',
)
]
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
sedml_data_model.Variable(
id='f6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_f6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
sedml_data_model.Variable(
id='HEX1',
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_HEX1']",
target_namespaces=self.NAMESPACES,
task=task),
]
# execute simulation
variable_results, log = core.exec_sed_task(task, variables)
# check that the simulation was executed correctly
self.assertEqual(set(variable_results.keys()), set(['Time', 'g6p', 'f6p', 'HEX1']))
for variable_result in variable_results.values():
self.assertFalse(numpy.any(numpy.isnan(variable_result)))
numpy.testing.assert_allclose(
variable_results['Time'],
numpy.linspace(
task.simulation.output_start_time,
task.simulation.output_end_time,
task.simulation.number_of_points + 1,
))
# check that log can be serialized to JSON
self.assertEqual(log.algorithm, 'KISAO_0000019')
self.assertEqual(log.simulator_details['integrator'], 'cvode')
self.assertEqual(log.simulator_details['relative_tolerance'], 1e-8)
json.dumps(log.to_json())
log.out_dir = self.dirname
log.export()
with open(os.path.join(self.dirname, get_config().LOG_PATH), 'rb') as file:
log_data = yaml.load(file, Loader=yaml.Loader)
json.dumps(log_data)
def test_exec_sed_task_non_zero_initial_time(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=10.,
output_start_time=20.,
output_end_time=30.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000019',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
]
# execute simulation
variable_results, log = core.exec_sed_task(task, variables)
# check that the simulation was executed correctly
self.assertEqual(set(variable_results.keys()), set(['Time', 'g6p']))
for variable_result in variable_results.values():
self.assertFalse(numpy.any(numpy.isnan(variable_result)))
numpy.testing.assert_allclose(
variable_results['Time'],
numpy.linspace(
task.simulation.output_start_time,
task.simulation.output_end_time,
task.simulation.number_of_points + 1,
))
def test_exec_sed_task_alt_alg(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000086',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
]
# execute simulation
variable_results, log = core.exec_sed_task(task, variables)
# check that the simulation was executed correctly
self.assertEqual(set(variable_results.keys()), set(['Time', 'g6p']))
for variable_result in variable_results.values():
self.assertFalse(numpy.any(numpy.isnan(variable_result)), variable_result)
numpy.testing.assert_allclose(
variable_results['Time'],
numpy.linspace(
task.simulation.output_start_time,
task.simulation.output_end_time,
task.simulation.number_of_points + 1,
))
def test_exec_sed_task_alg_substitution(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000019',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
]
# execute simulation
task_2 = copy.deepcopy(task)
task_2.simulation.algorithm.kisao_id = 'KISAO_0000560'
with mock.patch.dict('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'}):
with self.assertRaises(AlgorithmCannotBeSubstitutedException):
core.exec_sed_task(task_2, variables)
task_2 = copy.deepcopy(task)
task_2.simulation.algorithm.kisao_id = 'KISAO_0000560'
with mock.patch.dict('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'SIMILAR_VARIABLES'}):
core.exec_sed_task(task_2, variables)
task_2 = copy.deepcopy(task)
task_2.simulation.algorithm.changes.append(sedml_data_model.AlgorithmParameterChange(
kisao_id='KISAO_0000488',
new_value='1',
))
with mock.patch.dict('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'}):
with self.assertRaises(NotImplementedError):
core.exec_sed_task(task_2, variables)
with mock.patch.dict('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'SIMILAR_VARIABLES'}):
with self.assertWarns(BioSimulatorsWarning):
core.exec_sed_task(task_2, variables)
task_2 = copy.deepcopy(task)
task_2.simulation.algorithm.changes.append(sedml_data_model.AlgorithmParameterChange(
kisao_id='KISAO_0000209',
new_value='abc',
))
with mock.patch.dict('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'}):
with self.assertRaises(ValueError):
core.exec_sed_task(task_2, variables)
with mock.patch.dict('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'SIMILAR_VARIABLES'}):
with self.assertWarns(BioSimulatorsWarning):
core.exec_sed_task(task_2, variables)
def test_exec_sed_task_with_changes(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000019',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
]
mass_model = mass.io.sbml.read_sbml_model(task.model.source)
for met in mass_model.metabolites:
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_{}']".format(met.id),
target_namespaces=self.NAMESPACES,
new_value=None))
variables.append(sedml_data_model.Variable(
id=met.id,
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_{}']".format(met.id),
target_namespaces=self.NAMESPACES,
task=task))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='v_R_HEX1']",
target_namespaces=self.NAMESPACES,
new_value=10))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_PFK_R01']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id='Keq_PFK_A']",
target_namespaces=self.NAMESPACES,
new_value=20))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_SK_lac__L_c']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id='lac__L_b']",
target_namespaces=self.NAMESPACES,
new_value=25))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ADK1']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id='kf_R_ADK1']",
target_namespaces=self.NAMESPACES,
new_value=30))
# execute simulation
preprocessed_task = core.preprocess_sed_task(task, variables)
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model",
target_namespaces=self.NAMESPACES,
new_value=None))
with self.assertRaises(ValueError):
core.preprocess_sed_task(task, variables)
task.model.changes = []
results, _ = core.exec_sed_task(task, variables, preprocessed_task=preprocessed_task)
for met in mass_model.metabolites:
with self.assertRaises(AssertionError):
numpy.testing.assert_allclose(results[met.id][0:task.simulation.number_of_points + 1],
results[met.id][(-task.simulation.number_of_points + 1):])
task.simulation.output_end_time = task.simulation.output_end_time / 2
task.simulation.number_of_points = int(task.simulation.number_of_points / 2)
results2, _ = core.exec_sed_task(task, variables, preprocessed_task=preprocessed_task)
for met in mass_model.metabolites:
numpy.testing.assert_allclose(results2[met.id], results[met.id][0:task.simulation.number_of_points + 1])
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_{}']".format(met.id),
target_namespaces=self.NAMESPACES,
new_value=results2[met.id][-1]))
results3, _ = core.exec_sed_task(task, variables, preprocessed_task=preprocessed_task)
for met in mass_model.metabolites:
numpy.testing.assert_allclose(results3[met.id], results[met.id][-(task.simulation.number_of_points + 1):])
# parameters
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='v_R_HEX1']",
target_namespaces=self.NAMESPACES,
new_value=10))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_PFK_R01']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id='Keq_PFK_A']",
target_namespaces=self.NAMESPACES,
new_value=20))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_SK_lac__L_c']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id='lac__L_b']",
target_namespaces=self.NAMESPACES,
new_value=25))
task.model.changes.append(sedml_data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ADK1']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id='kf_R_ADK1']",
target_namespaces=self.NAMESPACES,
new_value=30))
core.exec_sed_task(task, variables, preprocessed_task=preprocessed_task)
self.assertEqual(preprocessed_task['model']['model'].parameters['v']['v_HEX1'], 10)
self.assertEqual(preprocessed_task['model']['model'].parameters['Custom']['Keq_PFK_A'], 20)
self.assertEqual(preprocessed_task['model']['model'].parameters['Boundary']['lac__L_b'], 25)
self.assertEqual(preprocessed_task['model']['model'].parameters['kf']['kf_ADK1'], 30)
def test_exec_sed_task_sim_error_handling(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000030',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
]
# execute simulation
with self.assertRaisesRegex(ValueError, 'Simulation failed'):
core.exec_sed_task(task, variables)
def test_exec_sed_task_error_handling(self):
# configure simulation
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000019',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=task),
]
# execute simulation
variables_2 = copy.deepcopy(variables)
variables_2[0].symbol = 'mass'
with self.assertRaisesRegex(NotImplementedError, 'The following symbols are not supported'):
core.exec_sed_task(task, variables_2)
variables_2 = copy.deepcopy(variables)
variables_2[1].target = '/sbml:sbml'
with self.assertRaisesRegex(ValueError, 'The following targets are not supported'):
core.exec_sed_task(task, variables_2)
task_2 = copy.deepcopy(task)
task_2.simulation.output_start_time = 1.5
with self.assertRaisesRegex(NotImplementedError, 'must be an integer'):
core.exec_sed_task(task_2, variables)
def test_exec_sedml_docs_in_combine_archive_successfully(self):
doc, archive_filename = self._build_combine_archive()
out_dir = os.path.join(self.dirname, 'out')
config = get_config()
config.REPORT_FORMATS = [report_data_model.ReportFormat.h5]
config.BUNDLE_OUTPUTS = True
config.KEEP_INDIVIDUAL_OUTPUTS = True
_, log = core.exec_sedml_docs_in_combine_archive(archive_filename, out_dir, config=config)
if log.exception:
raise log.exception
self._assert_combine_archive_outputs(doc, out_dir)
def _build_combine_archive(self, algorithm=None):
doc = self._build_sed_doc(algorithm=algorithm)
archive_dirname = os.path.join(self.dirname, 'archive')
if not os.path.isdir(archive_dirname):
os.mkdir(archive_dirname)
model_filename = os.path.join(archive_dirname, 'model.xml')
shutil.copyfile(self.EXAMPLE_MODEL_FILENAME, model_filename)
sim_filename = os.path.join(archive_dirname, 'sim.sedml')
SedmlSimulationWriter().run(doc, sim_filename)
archive = combine_data_model.CombineArchive(
contents=[
combine_data_model.CombineArchiveContent(
'model.xml', combine_data_model.CombineArchiveContentFormat.SBML.value),
combine_data_model.CombineArchiveContent(
'sim.sedml', combine_data_model.CombineArchiveContentFormat.SED_ML.value),
],
)
archive_filename = os.path.join(self.dirname, 'archive.omex')
CombineArchiveWriter().run(archive, archive_dirname, archive_filename)
return (doc, archive_filename)
def _build_sed_doc(self, algorithm=None):
if algorithm is None:
algorithm = sedml_data_model.Algorithm(
kisao_id='KISAO_0000019',
)
doc = sedml_data_model.SedDocument()
doc.models.append(sedml_data_model.Model(
id='model',
source='model.xml',
language=sedml_data_model.ModelLanguage.SBML.value,
))
doc.simulations.append(sedml_data_model.UniformTimeCourseSimulation(
id='sim_time_course',
initial_time=0.,
output_start_time=0.,
output_end_time=10.,
number_of_points=10,
algorithm=algorithm,
))
doc.tasks.append(sedml_data_model.Task(
id='task_1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(sedml_data_model.DataGenerator(
id='data_gen_time',
variables=[
sedml_data_model.Variable(
id='var_time',
symbol=sedml_data_model.Symbol.time.value,
task=doc.tasks[0],
),
],
math='var_time',
))
doc.data_generators.append(sedml_data_model.DataGenerator(
id='data_gen_g6p',
variables=[
sedml_data_model.Variable(
id='var_g6p',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_g6p_c']",
target_namespaces=self.NAMESPACES,
task=doc.tasks[0],
),
],
math='var_g6p',
))
doc.outputs.append(sedml_data_model.Report(
id='report',
data_sets=[
sedml_data_model.DataSet(id='data_set_time', label='Time', data_generator=doc.data_generators[0]),
sedml_data_model.DataSet(id='data_set_g6p', label='g6p', data_generator=doc.data_generators[1]),
],
))
append_all_nested_children_to_doc(doc)
return doc
def _assert_combine_archive_outputs(self, doc, out_dir):
self.assertEqual(set(['reports.h5']).difference(set(os.listdir(out_dir))), set())
report = ReportReader().run(doc.outputs[0], out_dir, 'sim.sedml/report', format=report_data_model.ReportFormat.h5)
self.assertEqual(sorted(report.keys()), sorted([d.id for d in doc.outputs[0].data_sets]))
sim = doc.tasks[0].simulation
self.assertEqual(len(report[doc.outputs[0].data_sets[0].id]), sim.number_of_points + 1)
for data_set_result in report.values():
self.assertFalse(numpy.any(numpy.isnan(data_set_result)))
self.assertIn('data_set_time', report)
numpy.testing.assert_allclose(report[doc.outputs[0].data_sets[0].id],
numpy.linspace(sim.output_start_time, sim.output_end_time, sim.number_of_points + 1))
def test_exec_sedml_docs_in_combine_archive_with_all_algorithms(self):
failures = []
for alg in gen_algorithms_from_specs(self.SPECIFICATIONS_FILENAME).values():
doc, archive_filename = self._build_combine_archive(algorithm=alg)
out_dir = os.path.join(self.dirname, alg.kisao_id)
config = get_config()
config.REPORT_FORMATS = [report_data_model.ReportFormat.h5]
config.BUNDLE_OUTPUTS = True
config.KEEP_INDIVIDUAL_OUTPUTS = True
try:
_, log = core.exec_sedml_docs_in_combine_archive(archive_filename, out_dir, config=config)
if log.exception:
raise log.exception
self._assert_combine_archive_outputs(doc, out_dir)
except CombineArchiveExecutionError as exception:
if 'Simulation failed' in str(exception):
failures.append(alg.kisao_id)
else:
raise
self.assertEqual(failures, ['KISAO_0000030', 'KISAO_0000032']) # model can't be executed with these algorithms
def test_exec_sedml_docs_in_combine_archive_with_cli(self):
doc, archive_filename = self._build_combine_archive()
out_dir = os.path.join(self.dirname, 'out')
env = self._get_combine_archive_exec_env()
with mock.patch.dict(os.environ, env):
with __main__.App(argv=['-i', archive_filename, '-o', out_dir]) as app:
app.run()
self._assert_combine_archive_outputs(doc, out_dir)
def _get_combine_archive_exec_env(self):
return {
'REPORT_FORMATS': 'h5'
}
def test_exec_sedml_docs_in_combine_archive_with_docker_image(self):
doc, archive_filename = self._build_combine_archive()
out_dir = os.path.join(self.dirname, 'out')
docker_image = self.DOCKER_IMAGE
env = self._get_combine_archive_exec_env()
exec_sedml_docs_in_archive_with_containerized_simulator(
archive_filename, out_dir, docker_image, environment=env, pull_docker_image=False)
self._assert_combine_archive_outputs(doc, out_dir)
| [
"os.mkdir",
"yaml.load",
"biosimulators_utils.simulator.specs.gen_algorithms_from_specs",
"biosimulators_utils.sedml.data_model.ModelAttributeChange",
"biosimulators_utils.combine.io.CombineArchiveWriter",
"biosimulators_utils.sedml.data_model.Model",
"json.dumps",
"numpy.isnan",
"mass.io.sbml.read_... | [((1500, 1525), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1515, 1525), False, 'import os\n'), ((1687, 1712), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1702, 1712), False, 'import os\n'), ((1865, 1883), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1881, 1883), False, 'import tempfile\n'), ((1917, 1944), 'shutil.rmtree', 'shutil.rmtree', (['self.dirname'], {}), '(self.dirname)\n', (1930, 1944), False, 'import shutil\n'), ((3804, 3839), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {}), '(task, variables)\n', (3822, 3839), False, 'from biosimulators_masspy import core\n'), ((4893, 4913), 'json.dumps', 'json.dumps', (['log_data'], {}), '(log_data)\n', (4903, 4913), False, 'import json\n'), ((6058, 6093), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {}), '(task, variables)\n', (6076, 6093), False, 'from biosimulators_masspy import core\n'), ((7761, 7796), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {}), '(task, variables)\n', (7779, 7796), False, 'from biosimulators_masspy import core\n'), ((9475, 9494), 'copy.deepcopy', 'copy.deepcopy', (['task'], {}), '(task)\n', (9488, 9494), False, 'import copy\n'), ((9792, 9811), 'copy.deepcopy', 'copy.deepcopy', (['task'], {}), '(task)\n', (9805, 9811), False, 'import copy\n'), ((10043, 10062), 'copy.deepcopy', 'copy.deepcopy', (['task'], {}), '(task)\n', (10056, 10062), False, 'import copy\n'), ((10661, 10680), 'copy.deepcopy', 'copy.deepcopy', (['task'], {}), '(task)\n', (10674, 10680), False, 'import copy\n'), ((12107, 12154), 'mass.io.sbml.read_sbml_model', 'mass.io.sbml.read_sbml_model', (['task.model.source'], {}), '(task.model.source)\n', (12135, 12154), False, 'import mass\n'), ((14007, 14048), 'biosimulators_masspy.core.preprocess_sed_task', 'core.preprocess_sed_task', (['task', 'variables'], {}), '(task, variables)\n', (14031, 14048), False, 'from biosimulators_masspy import core\n'), ((14395, 14467), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {'preprocessed_task': 'preprocessed_task'}), '(task, variables, preprocessed_task=preprocessed_task)\n', (14413, 14467), False, 'from biosimulators_masspy import core\n'), ((14958, 15030), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {'preprocessed_task': 'preprocessed_task'}), '(task, variables, preprocessed_task=preprocessed_task)\n', (14976, 15030), False, 'from biosimulators_masspy import core\n'), ((15499, 15571), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {'preprocessed_task': 'preprocessed_task'}), '(task, variables, preprocessed_task=preprocessed_task)\n', (15517, 15571), False, 'from biosimulators_masspy import core\n'), ((16978, 17050), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {'preprocessed_task': 'preprocessed_task'}), '(task, variables, preprocessed_task=preprocessed_task)\n', (16996, 17050), False, 'from biosimulators_masspy import core\n'), ((19788, 19812), 'copy.deepcopy', 'copy.deepcopy', (['variables'], {}), '(variables)\n', (19801, 19812), False, 'import copy\n'), ((20026, 20050), 'copy.deepcopy', 'copy.deepcopy', (['variables'], {}), '(variables)\n', (20039, 20050), False, 'import copy\n'), ((20256, 20275), 'copy.deepcopy', 'copy.deepcopy', (['task'], {}), '(task)\n', (20269, 20275), False, 'import copy\n'), ((20606, 20639), 'os.path.join', 'os.path.join', (['self.dirname', '"""out"""'], {}), "(self.dirname, 'out')\n", (20618, 20639), False, 'import os\n'), ((20658, 20670), 'biosimulators_utils.config.get_config', 'get_config', ([], {}), '()\n', (20668, 20670), False, 'from biosimulators_utils.config import get_config\n'), ((20840, 20926), 'biosimulators_masspy.core.exec_sedml_docs_in_combine_archive', 'core.exec_sedml_docs_in_combine_archive', (['archive_filename', 'out_dir'], {'config': 'config'}), '(archive_filename, out_dir, config=\n config)\n', (20879, 20926), False, 'from biosimulators_masspy import core\n'), ((21177, 21214), 'os.path.join', 'os.path.join', (['self.dirname', '"""archive"""'], {}), "(self.dirname, 'archive')\n", (21189, 21214), False, 'import os\n'), ((21326, 21368), 'os.path.join', 'os.path.join', (['archive_dirname', '"""model.xml"""'], {}), "(archive_dirname, 'model.xml')\n", (21338, 21368), False, 'import os\n'), ((21377, 21437), 'shutil.copyfile', 'shutil.copyfile', (['self.EXAMPLE_MODEL_FILENAME', 'model_filename'], {}), '(self.EXAMPLE_MODEL_FILENAME, model_filename)\n', (21392, 21437), False, 'import shutil\n'), ((21462, 21504), 'os.path.join', 'os.path.join', (['archive_dirname', '"""sim.sedml"""'], {}), "(archive_dirname, 'sim.sedml')\n", (21474, 21504), False, 'import os\n'), ((21993, 22035), 'os.path.join', 'os.path.join', (['self.dirname', '"""archive.omex"""'], {}), "(self.dirname, 'archive.omex')\n", (22005, 22035), False, 'import os\n'), ((22355, 22385), 'biosimulators_utils.sedml.data_model.SedDocument', 'sedml_data_model.SedDocument', ([], {}), '()\n', (22383, 22385), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((24220, 24258), 'biosimulators_utils.sedml.utils.append_all_nested_children_to_doc', 'append_all_nested_children_to_doc', (['doc'], {}), '(doc)\n', (24253, 24258), False, 'from biosimulators_utils.sedml.utils import append_all_nested_children_to_doc\n'), ((26431, 26464), 'os.path.join', 'os.path.join', (['self.dirname', '"""out"""'], {}), "(self.dirname, 'out')\n", (26443, 26464), False, 'import os\n'), ((26996, 27029), 'os.path.join', 'os.path.join', (['self.dirname', '"""out"""'], {}), "(self.dirname, 'out')\n", (27008, 27029), False, 'import os\n'), ((27131, 27273), 'biosimulators_utils.simulator.exec.exec_sedml_docs_in_archive_with_containerized_simulator', 'exec_sedml_docs_in_archive_with_containerized_simulator', (['archive_filename', 'out_dir', 'docker_image'], {'environment': 'env', 'pull_docker_image': '(False)'}), '(archive_filename,\n out_dir, docker_image, environment=env, pull_docker_image=False)\n', (27186, 27273), False, 'from biosimulators_utils.simulator.exec import exec_sedml_docs_in_archive_with_containerized_simulator\n'), ((2877, 2965), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (2902, 2965), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((3024, 3193), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (3049, 3193), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((3263, 3432), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""f6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_f6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'f6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_f6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (3288, 3432), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((3502, 3674), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""HEX1"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_HEX1\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'HEX1\', target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_HEX1\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (3527, 3674), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((4209, 4334), 'numpy.linspace', 'numpy.linspace', (['task.simulation.output_start_time', 'task.simulation.output_end_time', '(task.simulation.number_of_points + 1)'], {}), '(task.simulation.output_start_time, task.simulation.\n output_end_time, task.simulation.number_of_points + 1)\n', (4223, 4334), False, 'import numpy\n'), ((4849, 4884), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.Loader'}), '(file, Loader=yaml.Loader)\n', (4858, 4884), False, 'import yaml\n'), ((5612, 5700), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (5637, 5700), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((5759, 5928), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (5784, 5928), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((6448, 6573), 'numpy.linspace', 'numpy.linspace', (['task.simulation.output_start_time', 'task.simulation.output_end_time', '(task.simulation.number_of_points + 1)'], {}), '(task.simulation.output_start_time, task.simulation.\n output_end_time, task.simulation.number_of_points + 1)\n', (6462, 6573), False, 'import numpy\n'), ((7315, 7403), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (7340, 7403), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((7462, 7631), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (7487, 7631), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((8168, 8293), 'numpy.linspace', 'numpy.linspace', (['task.simulation.output_start_time', 'task.simulation.output_end_time', '(task.simulation.number_of_points + 1)'], {}), '(task.simulation.output_start_time, task.simulation.\n output_end_time, task.simulation.number_of_points + 1)\n', (8182, 8293), False, 'import numpy\n'), ((9044, 9132), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (9069, 9132), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((9191, 9360), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (9216, 9360), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((9571, 9643), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'}"], {}), "('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'})\n", (9586, 9643), False, 'from unittest import mock\n'), ((9888, 9977), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'ALGORITHM_SUBSTITUTION_POLICY': 'SIMILAR_VARIABLES'}"], {}), "('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY':\n 'SIMILAR_VARIABLES'})\n", (9903, 9977), False, 'from unittest import mock\n'), ((9987, 10024), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (10005, 10024), False, 'from biosimulators_masspy import core\n'), ((10114, 10200), 'biosimulators_utils.sedml.data_model.AlgorithmParameterChange', 'sedml_data_model.AlgorithmParameterChange', ([], {'kisao_id': '"""KISAO_0000488"""', 'new_value': '"""1"""'}), "(kisao_id='KISAO_0000488',\n new_value='1')\n", (10155, 10200), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((10246, 10318), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'}"], {}), "('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'})\n", (10261, 10318), False, 'from unittest import mock\n'), ((10445, 10534), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'ALGORITHM_SUBSTITUTION_POLICY': 'SIMILAR_VARIABLES'}"], {}), "('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY':\n 'SIMILAR_VARIABLES'})\n", (10460, 10534), False, 'from unittest import mock\n'), ((10732, 10820), 'biosimulators_utils.sedml.data_model.AlgorithmParameterChange', 'sedml_data_model.AlgorithmParameterChange', ([], {'kisao_id': '"""KISAO_0000209"""', 'new_value': '"""abc"""'}), "(kisao_id='KISAO_0000209',\n new_value='abc')\n", (10773, 10820), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((10866, 10938), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'}"], {}), "('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY': 'NONE'})\n", (10881, 10938), False, 'from unittest import mock\n'), ((11056, 11145), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'ALGORITHM_SUBSTITUTION_POLICY': 'SIMILAR_VARIABLES'}"], {}), "('os.environ', {'ALGORITHM_SUBSTITUTION_POLICY':\n 'SIMILAR_VARIABLES'})\n", (11071, 11145), False, 'from unittest import mock\n'), ((11941, 12029), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (11966, 12029), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((12770, 12951), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id=\'v_R_HEX1\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(10)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id=\'v_R_HEX1\']"\n , target_namespaces=self.NAMESPACES, new_value=10)\n', (12807, 12951), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((13014, 13274), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_PFK_R01\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'Keq_PFK_A\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(20)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_PFK_R01\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'Keq_PFK_A\']"\n , target_namespaces=self.NAMESPACES, new_value=20)\n', (13051, 13274), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((13337, 13600), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_SK_lac__L_c\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'lac__L_b\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(25)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_SK_lac__L_c\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'lac__L_b\']"\n , target_namespaces=self.NAMESPACES, new_value=25)\n', (13374, 13600), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((13663, 13920), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_ADK1\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'kf_R_ADK1\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(30)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_ADK1\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'kf_R_ADK1\']"\n , target_namespaces=self.NAMESPACES, new_value=30)\n', (13700, 13920), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((14084, 14208), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': 'None'}), "(target='/sbml:sbml/sbml:model',\n target_namespaces=self.NAMESPACES, new_value=None)\n", (14121, 14208), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((14299, 14340), 'biosimulators_masspy.core.preprocess_sed_task', 'core.preprocess_sed_task', (['task', 'variables'], {}), '(task, variables)\n', (14323, 14340), False, 'from biosimulators_masspy import core\n'), ((15087, 15196), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['results2[met.id]', 'results[met.id][0:task.simulation.number_of_points + 1]'], {}), '(results2[met.id], results[met.id][0:task.\n simulation.number_of_points + 1])\n', (15116, 15196), False, 'import numpy\n'), ((15627, 15738), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['results3[met.id]', 'results[met.id][-(task.simulation.number_of_points + 1):]'], {}), '(results3[met.id], results[met.id][-(task.\n simulation.number_of_points + 1):])\n', (15656, 15738), False, 'import numpy\n'), ((15790, 15971), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id=\'v_R_HEX1\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(10)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id=\'v_R_HEX1\']"\n , target_namespaces=self.NAMESPACES, new_value=10)\n', (15827, 15971), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((16034, 16294), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_PFK_R01\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'Keq_PFK_A\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(20)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_PFK_R01\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'Keq_PFK_A\']"\n , target_namespaces=self.NAMESPACES, new_value=20)\n', (16071, 16294), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((16357, 16620), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_SK_lac__L_c\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'lac__L_b\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(25)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_SK_lac__L_c\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'lac__L_b\']"\n , target_namespaces=self.NAMESPACES, new_value=25)\n', (16394, 16620), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((16683, 16940), 'biosimulators_utils.sedml.data_model.ModelAttributeChange', 'sedml_data_model.ModelAttributeChange', ([], {'target': '"""/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_ADK1\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'kf_R_ADK1\']"""', 'target_namespaces': 'self.NAMESPACES', 'new_value': '(30)'}), '(target=\n "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id=\'R_ADK1\']/sbml:kineticLaw/sbml:listOfLocalParameters/sbml:localParameter[@id=\'kf_R_ADK1\']"\n , target_namespaces=self.NAMESPACES, new_value=30)\n', (16720, 16940), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((18131, 18219), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (18156, 18219), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((18278, 18447), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (18303, 18447), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((18627, 18662), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables'], {}), '(task, variables)\n', (18645, 18662), False, 'from biosimulators_masspy import core\n'), ((19352, 19440), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""Time"""', 'symbol': 'sedml_data_model.Symbol.time', 'task': 'task'}), "(id='Time', symbol=sedml_data_model.Symbol.time,\n task=task)\n", (19377, 19440), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((19499, 19668), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'task'}), '(id=\'g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=task)\n', (19524, 19668), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((19965, 20002), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables_2'], {}), '(task, variables_2)\n', (19983, 20002), False, 'from biosimulators_masspy import core\n'), ((20200, 20237), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task', 'variables_2'], {}), '(task, variables_2)\n', (20218, 20237), False, 'from biosimulators_masspy import core\n'), ((20418, 20455), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (20436, 20455), False, 'from biosimulators_masspy import core\n'), ((21230, 21260), 'os.path.isdir', 'os.path.isdir', (['archive_dirname'], {}), '(archive_dirname)\n', (21243, 21260), False, 'import os\n'), ((21274, 21299), 'os.mkdir', 'os.mkdir', (['archive_dirname'], {}), '(archive_dirname)\n', (21282, 21299), False, 'import os\n'), ((22256, 22308), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000019"""'}), "(kisao_id='KISAO_0000019')\n", (22282, 22308), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((22412, 22523), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'id': '"""model"""', 'source': '"""model.xml"""', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), "(id='model', source='model.xml', language=\n sedml_data_model.ModelLanguage.SBML.value)\n", (22434, 22523), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((22598, 22777), 'biosimulators_utils.sedml.data_model.UniformTimeCourseSimulation', 'sedml_data_model.UniformTimeCourseSimulation', ([], {'id': '"""sim_time_course"""', 'initial_time': '(0.0)', 'output_start_time': '(0.0)', 'output_end_time': '(10.0)', 'number_of_points': '(10)', 'algorithm': 'algorithm'}), "(id='sim_time_course',\n initial_time=0.0, output_start_time=0.0, output_end_time=10.0,\n number_of_points=10, algorithm=algorithm)\n", (22642, 22777), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((22877, 22968), 'biosimulators_utils.sedml.data_model.Task', 'sedml_data_model.Task', ([], {'id': '"""task_1"""', 'model': 'doc.models[0]', 'simulation': 'doc.simulations[0]'}), "(id='task_1', model=doc.models[0], simulation=doc.\n simulations[0])\n", (22898, 22968), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((25072, 25161), 'numpy.linspace', 'numpy.linspace', (['sim.output_start_time', 'sim.output_end_time', '(sim.number_of_points + 1)'], {}), '(sim.output_start_time, sim.output_end_time, sim.\n number_of_points + 1)\n', (25086, 25161), False, 'import numpy\n'), ((25442, 25482), 'os.path.join', 'os.path.join', (['self.dirname', 'alg.kisao_id'], {}), '(self.dirname, alg.kisao_id)\n', (25454, 25482), False, 'import os\n'), ((25505, 25517), 'biosimulators_utils.config.get_config', 'get_config', ([], {}), '()\n', (25515, 25517), False, 'from biosimulators_utils.config import get_config\n'), ((26530, 26562), 'unittest.mock.patch.dict', 'mock.patch.dict', (['os.environ', 'env'], {}), '(os.environ, env)\n', (26545, 26562), False, 'from unittest import mock\n'), ((2080, 2195), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (2102, 2195), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((5058, 5173), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (5080, 5173), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((6763, 6878), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (6785, 6878), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((8492, 8607), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (8514, 8607), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((9736, 9773), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (9754, 9773), False, 'from biosimulators_masspy import core\n'), ((10393, 10430), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (10411, 10430), False, 'from biosimulators_masspy import core\n'), ((10605, 10642), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (10623, 10642), False, 'from biosimulators_masspy import core\n'), ((11004, 11041), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (11022, 11041), False, 'from biosimulators_masspy import core\n'), ((11216, 11253), 'biosimulators_masspy.core.exec_sed_task', 'core.exec_sed_task', (['task_2', 'variables'], {}), '(task_2, variables)\n', (11234, 11253), False, 'from biosimulators_masspy import core\n'), ((11389, 11504), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (11411, 11504), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((14580, 14733), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['results[met.id][0:task.simulation.number_of_points + 1]', 'results[met.id][-task.simulation.number_of_points + 1:]'], {}), '(results[met.id][0:task.simulation.\n number_of_points + 1], results[met.id][-task.simulation.\n number_of_points + 1:])\n', (14609, 14733), False, 'import numpy\n'), ((17579, 17694), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (17601, 17694), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((18800, 18915), 'biosimulators_utils.sedml.data_model.Model', 'sedml_data_model.Model', ([], {'source': 'self.EXAMPLE_MODEL_FILENAME', 'language': 'sedml_data_model.ModelLanguage.SBML.value'}), '(source=self.EXAMPLE_MODEL_FILENAME, language=\n sedml_data_model.ModelLanguage.SBML.value)\n', (18822, 18915), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((21513, 21536), 'biosimulators_utils.sedml.io.SedmlSimulationWriter', 'SedmlSimulationWriter', ([], {}), '()\n', (21534, 21536), False, 'from biosimulators_utils.sedml.io import SedmlSimulationWriter\n'), ((22044, 22066), 'biosimulators_utils.combine.io.CombineArchiveWriter', 'CombineArchiveWriter', ([], {}), '()\n', (22064, 22066), False, 'from biosimulators_utils.combine.io import CombineArchiveWriter\n'), ((24449, 24463), 'biosimulators_utils.report.io.ReportReader', 'ReportReader', ([], {}), '()\n', (24461, 24463), False, 'from biosimulators_utils.report.io import ReportReader\n'), ((25275, 25330), 'biosimulators_utils.simulator.specs.gen_algorithms_from_specs', 'gen_algorithms_from_specs', (['self.SPECIFICATIONS_FILENAME'], {}), '(self.SPECIFICATIONS_FILENAME)\n', (25300, 25330), False, 'from biosimulators_utils.simulator.specs import gen_algorithms_from_specs\n'), ((25724, 25810), 'biosimulators_masspy.core.exec_sedml_docs_in_combine_archive', 'core.exec_sedml_docs_in_combine_archive', (['archive_filename', 'out_dir'], {'config': 'config'}), '(archive_filename, out_dir, config=\n config)\n', (25763, 25810), False, 'from biosimulators_masspy import core\n'), ((26581, 26639), 'biosimulators_masspy.__main__.App', '__main__.App', ([], {'argv': "['-i', archive_filename, '-o', out_dir]"}), "(argv=['-i', archive_filename, '-o', out_dir])\n", (26593, 26639), False, 'from biosimulators_masspy import __main__\n'), ((4089, 4117), 'numpy.isnan', 'numpy.isnan', (['variable_result'], {}), '(variable_result)\n', (4100, 4117), False, 'import numpy\n'), ((6328, 6356), 'numpy.isnan', 'numpy.isnan', (['variable_result'], {}), '(variable_result)\n', (6339, 6356), False, 'import numpy\n'), ((8031, 8059), 'numpy.isnan', 'numpy.isnan', (['variable_result'], {}), '(variable_result)\n', (8042, 8059), False, 'import numpy\n'), ((21653, 21770), 'biosimulators_utils.combine.data_model.CombineArchiveContent', 'combine_data_model.CombineArchiveContent', (['"""model.xml"""', 'combine_data_model.CombineArchiveContentFormat.SBML.value'], {}), "('model.xml', combine_data_model.\n CombineArchiveContentFormat.SBML.value)\n", (21693, 21770), True, 'from biosimulators_utils.combine import data_model as combine_data_model\n'), ((21804, 21923), 'biosimulators_utils.combine.data_model.CombineArchiveContent', 'combine_data_model.CombineArchiveContent', (['"""sim.sedml"""', 'combine_data_model.CombineArchiveContentFormat.SED_ML.value'], {}), "('sim.sedml', combine_data_model.\n CombineArchiveContentFormat.SED_ML.value)\n", (21844, 21923), True, 'from biosimulators_utils.combine import data_model as combine_data_model\n'), ((24401, 24420), 'os.listdir', 'os.listdir', (['out_dir'], {}), '(out_dir)\n', (24411, 24420), False, 'import os\n'), ((24877, 24905), 'numpy.isnan', 'numpy.isnan', (['data_set_result'], {}), '(data_set_result)\n', (24888, 24905), False, 'import numpy\n'), ((4787, 4799), 'biosimulators_utils.config.get_config', 'get_config', ([], {}), '()\n', (4797, 4799), False, 'from biosimulators_utils.config import get_config\n'), ((5459, 5511), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000019"""'}), "(kisao_id='KISAO_0000019')\n", (5485, 5511), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((7162, 7214), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000086"""'}), "(kisao_id='KISAO_0000086')\n", (7188, 7214), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((8891, 8943), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000019"""'}), "(kisao_id='KISAO_0000019')\n", (8917, 8943), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((11788, 11840), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000019"""'}), "(kisao_id='KISAO_0000019')\n", (11814, 11840), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((17978, 18030), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000030"""'}), "(kisao_id='KISAO_0000030')\n", (18004, 18030), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((19199, 19251), 'biosimulators_utils.sedml.data_model.Algorithm', 'sedml_data_model.Algorithm', ([], {'kisao_id': '"""KISAO_0000019"""'}), "(kisao_id='KISAO_0000019')\n", (19225, 19251), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((23152, 23259), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""var_time"""', 'symbol': 'sedml_data_model.Symbol.time.value', 'task': 'doc.tasks[0]'}), "(id='var_time', symbol=sedml_data_model.Symbol.\n time.value, task=doc.tasks[0])\n", (23177, 23259), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((23528, 23709), 'biosimulators_utils.sedml.data_model.Variable', 'sedml_data_model.Variable', ([], {'id': '"""var_g6p"""', 'target': '"""/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']"""', 'target_namespaces': 'self.NAMESPACES', 'task': 'doc.tasks[0]'}), '(id=\'var_g6p\', target=\n "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=\'M_g6p_c\']",\n target_namespaces=self.NAMESPACES, task=doc.tasks[0])\n', (23553, 23709), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((23973, 24075), 'biosimulators_utils.sedml.data_model.DataSet', 'sedml_data_model.DataSet', ([], {'id': '"""data_set_time"""', 'label': '"""Time"""', 'data_generator': 'doc.data_generators[0]'}), "(id='data_set_time', label='Time', data_generator=\n doc.data_generators[0])\n", (23997, 24075), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((24088, 24188), 'biosimulators_utils.sedml.data_model.DataSet', 'sedml_data_model.DataSet', ([], {'id': '"""data_set_g6p"""', 'label': '"""g6p"""', 'data_generator': 'doc.data_generators[1]'}), "(id='data_set_g6p', label='g6p', data_generator=doc\n .data_generators[1])\n", (24112, 24188), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n'), ((2607, 2696), 'biosimulators_utils.sedml.data_model.AlgorithmParameterChange', 'sedml_data_model.AlgorithmParameterChange', ([], {'kisao_id': '"""KISAO_0000209"""', 'new_value': '"""1e-8"""'}), "(kisao_id='KISAO_0000209',\n new_value='1e-8')\n", (2648, 2696), True, 'from biosimulators_utils.sedml import data_model as sedml_data_model\n')] |
## @ingroup Methods-Geometry-Two_Dimensional-Cross_Section-Airfoil
# import_airfoil_polars.py
#
# Created: Mar 2019, <NAME>
# Mar 2020, <NAME>
# Sep 2020, <NAME>
# May 2021, <NAME>
# Nov 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data
import numpy as np
## @ingroup Methods-Geometry-Two_Dimensional-Cross_Section-Airfoil
def import_airfoil_polars(airfoil_polar_files):
"""This imports airfoil polars from a text file output from XFOIL or Airfoiltools.com
Assumptions:
Input airfoil polars file is obtained from XFOIL or from Airfoiltools.com
Source:
http://airfoiltools.com/
Inputs:
airfoil polar files <list of strings>
Outputs:
data numpy array with airfoil data
Properties Used:
N/A
"""
# number of airfoils
num_airfoils = len(airfoil_polar_files)
num_polars = 0
for i in range(num_airfoils):
n_p = len(airfoil_polar_files[i])
if n_p < 3:
raise AttributeError('Provide three or more airfoil polars to compute surrogate')
num_polars = max(num_polars, n_p)
# create empty data structures
airfoil_data = Data()
dim_aoa = 89 # this is done to get an AoA discretization of 0.25
CL = np.zeros((num_airfoils,num_polars,dim_aoa))
CD = np.zeros((num_airfoils,num_polars,dim_aoa))
Re = np.zeros((num_airfoils,num_polars))
AoA_interp = np.linspace(-6,16,dim_aoa)
for i in range(num_airfoils):
for j in range(len(airfoil_polar_files[i])):
# Open file and read column names and data block
f = open(airfoil_polar_files[i][j])
data_block = f.readlines()
f.close()
# Ignore header
for header_line in range(len(data_block)):
line = data_block[header_line]
if 'Re =' in line:
Re[i,j] = float(line[25:40].strip().replace(" ", ""))
if '---' in line:
data_block = data_block[header_line+1:]
break
# Remove any extra lines at end of file:
last_line = False
while last_line == False:
if data_block[-1]=='\n':
data_block = data_block[0:-1]
else:
last_line = True
data_len = len(data_block)
airfoil_aoa= np.zeros(data_len)
airfoil_cl = np.zeros(data_len)
airfoil_cd = np.zeros(data_len)
# Loop through each value: append to each column
for line_count , line in enumerate(data_block):
airfoil_aoa[line_count] = float(data_block[line_count][0:8].strip())
airfoil_cl[line_count] = float(data_block[line_count][10:17].strip())
airfoil_cd[line_count] = float(data_block[line_count][20:27].strip())
CL[i,j,:] = np.interp(AoA_interp,airfoil_aoa,airfoil_cl)
CD[i,j,:] = np.interp(AoA_interp,airfoil_aoa,airfoil_cd)
airfoil_data.angle_of_attacks = AoA_interp
airfoil_data.reynolds_number = Re
airfoil_data.lift_coefficients = CL
airfoil_data.drag_coefficients = CD
return airfoil_data
| [
"numpy.interp",
"numpy.zeros",
"numpy.linspace",
"SUAVE.Core.Data"
] | [((1371, 1377), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1375, 1377), False, 'from SUAVE.Core import Data\n'), ((1471, 1516), 'numpy.zeros', 'np.zeros', (['(num_airfoils, num_polars, dim_aoa)'], {}), '((num_airfoils, num_polars, dim_aoa))\n', (1479, 1516), True, 'import numpy as np\n'), ((1534, 1579), 'numpy.zeros', 'np.zeros', (['(num_airfoils, num_polars, dim_aoa)'], {}), '((num_airfoils, num_polars, dim_aoa))\n', (1542, 1579), True, 'import numpy as np\n'), ((1598, 1634), 'numpy.zeros', 'np.zeros', (['(num_airfoils, num_polars)'], {}), '((num_airfoils, num_polars))\n', (1606, 1634), True, 'import numpy as np\n'), ((1656, 1684), 'numpy.linspace', 'np.linspace', (['(-6)', '(16)', 'dim_aoa'], {}), '(-6, 16, dim_aoa)\n', (1667, 1684), True, 'import numpy as np\n'), ((2700, 2718), 'numpy.zeros', 'np.zeros', (['data_len'], {}), '(data_len)\n', (2708, 2718), True, 'import numpy as np\n'), ((2744, 2762), 'numpy.zeros', 'np.zeros', (['data_len'], {}), '(data_len)\n', (2752, 2762), True, 'import numpy as np\n'), ((2788, 2806), 'numpy.zeros', 'np.zeros', (['data_len'], {}), '(data_len)\n', (2796, 2806), True, 'import numpy as np\n'), ((3239, 3285), 'numpy.interp', 'np.interp', (['AoA_interp', 'airfoil_aoa', 'airfoil_cl'], {}), '(AoA_interp, airfoil_aoa, airfoil_cl)\n', (3248, 3285), True, 'import numpy as np\n'), ((3308, 3354), 'numpy.interp', 'np.interp', (['AoA_interp', 'airfoil_aoa', 'airfoil_cd'], {}), '(AoA_interp, airfoil_aoa, airfoil_cd)\n', (3317, 3354), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
parmap (or parmapper): Tool for easy parallel function mapping
without requiring a pickleable function (e.g. lambdas).
"""
from __future__ import print_function, unicode_literals, division
__version__ = '20190531'
import multiprocessing as mp
import multiprocessing.dummy as mpd
from threading import Thread
import threading
import sys
from collections import defaultdict
import warnings
import math
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
import tqdm
except ImportError:
tqdm = None
from functools import partial
if sys.version_info[0] > 2:
unicode = str
xrange = range
imap = map
else:
from itertools import imap
CPU_COUNT = mp.cpu_count()
class _Exception(object):
"""Storage of an exception (and easy detection)"""
def __init__(self,E,infun=True):
self.E = E
self.infun = infun
def parmap(fun,seq,N=None,Nt=1,chunksize=1,ordered=True,\
daemon=False,progress=False,
args=(),kwargs=None,
star=False,kwstar=False,
exception=None):
"""
parmap -- Simple parallel mapper that can split amongst processes (N)
and threads (Nt) (within the processes).
Does *NOT* require functions to be pickleable (unlike
vanilla multiprocess.Pool.map)
Inputs:
-------
fun
Single input function. Use lambdas or functools.partial
to enable/exapnd multi-input. See example
seq
Sequence of inputs to map in parallel
Options:
--------
N [None] (integer or None)
Number of processes to use. If `None`, will use the CPU_COUNT
Nt [1] (integer)
Number of threads to use. See notes below on multi-threaded vs
multi-processes.
chunksize [1] (int)
How to be break up the incoming sequence. Useful if also using threads.
Will be (re)set to max(chunksize,Nt).
Alternativly, if len(seq) exists and chunksize=-1 it will be reset
to ceil(len(seq)/(N*Nt)). If chunksize=-1 and len(sequence) is not
known, a warning will be emitted and chucksize will be reset to
max(chunksize,Nt)
ordered [True] (bool)
Whether or not to order the results. If False, will return in whatever
order they finished.
daemon [False] (bool)
Sets the multiprocessing `daemon` flag. If True, can not spawn child
processes (i.e. cannot nest parmap) but should allow for CTRL+C type
stopping. Supposedly, there may be issues with CTRL+C with it set to
False. Use at your own risk
progress [False] (bool)
Display a progress bar or counter.
Warning: Inconsistant in iPython/Jupyter notebooks and may clear
other printed content. Instead, specify as 'nb' to use a Jupyter
Widget progress bar.
args [tuple()]
Specify additional arguments for the function
kwargs [dict()]
Specify additional keyword arguments
star [False]
If True, the arguments to the function will be "starred" so, for example
if `seq = [ (1,2), (3,4) ]`, the function will be called as
star is False: fun((1,2))
star is True: fun(1,2) <==> fun(*(1,2))
Can also set to None to not send anything
kwstar [False]
Assumes all items are (vals,kwvals) where `vals` RESPECTS `star`
setting and still includes `args` and `kwvals`. See "Additional
Arguments" section below.
exception ['raise' if N>1 else 'proc']
Choose how to handle an exception in a child process
'raise' : [Default] raise the exception (outside of the Process).
Also terminates all existing processes.
'return' : Return the Exception instead of raising it.
'proc' : Raise the exception inside the process. NOT RECOMMENDED
unless used in debugging (and with N=1)
Note: An additional attribute called `seq_index` will also be set
in the exception (whether raised or returned) to aid in debugging.
Additional Arguments
--------------------
As noted above, there are many ways to pass additional arguments to
your function. All of these are not completely needed since parmap
makes using lambdas so easy, but they are there if preffered.
Assume the following function:
def dj(dictA,dictB):
'''Join dictA and dictB where dictB takes precedence'''
dictA = dictA.copy()
dictA.update(dictB) # NOTE: dictB takes precedence
return dictA
Then the behavior is as follows where `args` and `kwargs` come from
they main function call. The `val` (singular), `vals` (sequence/tuple of
values), and `kwvals` are set via the sequence.
| star | kwstar | expected item | function args | function keywords |
|-------|--------|---------------|----------------|---------------------|
| False | False | val | *((val,)+args) | **kwargs |†
| True | False | vals | *(vals+args) | **kwargs |
| None | False | --- | *args | **kwargs |°
| None | True | --- | *args | **dj(kwargs,kwvals) |‡
| False | True | val,kwval | *((val,)+args) | **dj(kwargs,kwvals) |‡
| True | True | vals,kwval | *(vals+args) | **dj(kwargs,kwvals) |‡
† Default
° If kwargs and args are empty, basically calls with nothing
‡ Note the ordering so kwvals takes precedance
Note:
------
Performs SEMI-lazy iteration based on chunksize. It will exhaust the input
iterator but will yield as results are computed (This is similar to the
`multiprocessing.Pool().imap` behavior)
Explicitly wrap the parmap call in a list(...) to force immediate
evaluation
Threads and/or processes:
-------------------------
This tool has the ability to split work amongst python processes
(via multiprocessing) and python threads (via the multiprocessing.dummy
module). Python is not very performant in multi-threaded situations
(due to the GIL) therefore, processes are the usually the best for CPU
bound tasks and threading is good for those that release the GIL (such
as IO-bound tasks).
WARNING: Many NumPy functions *do* release the GIL and can be threaded,
but many NumPy functions are, themselves, multi-threaded.
Alternatives:
-------------
This tool allows more data types, can split with threads, has an optional
progress bar, and has fewer pickling issues, but these come at a small cost.
For simple needs, the following may be better:
>>> import multiprocessing as mp
>>> pool = mp.Pool(N) # Or mp.Pool() for N=None
>>> results = list( pool.imap(fun,seq) ) # or just pool.map
>>> pool.close()
Additional Note
---------------
For the sake of convienance, a `map=imap=__call__` and
`close = lamba *a,**k:None` are also added so a parmap function can mimic
a multiprocessing pool object with duck typing
Version:
-------
__version__
"""
# Build up a dummy function with args,vals,kwargs, and kwvals
if kwargs is None:
kwargs = {}
def _fun(ss):
_args = list(args)
_kw = kwargs.copy()
try:
# Check for None before boolean
if star is None and kwstar: # 4
_kw.update(ss)
elif star is None and not kwstar: # 3
pass
elif not star and not kwstar: # 1
_args = [ss] + _args
elif star and not kwstar: # 2
_args = list(ss) + _args
elif not star and kwstar: # 5
_args = [ss[0]] + _args
_kw.update(ss[1])
elif star and kwstar: # 6
_args = list(ss[0]) + _args
_kw.update(ss[1])
else:
raise TypeError()
except TypeError: # Mostly because bad input types
return _Exception(TypeError('Ensure `args` are tuples and `kwargs` are dicts'),infun=False)
except Exception as E:
return _Exception(E,infun=False)
if exception == 'proc':
return fun(*_args,**_kw) # Outside of a try
try:
return fun(*_args,**_kw)
except Exception as E:
return _Exception(E)
# It would be great to include all of sys.exc_info() but tracebacks
# cannot be pickled.
try:
tot = len(seq)
except TypeError:
tot = None
N = CPU_COUNT if N is None else N
if exception is None:
exception = 'raise' if N>1 else 'proc'
if chunksize == -1:
if tot is None:
warnings.warn('chunksize=-1 does not work when len(seq) is not known')
else:
chunksize = math.ceil(tot/(N*Nt))
chunksize = max(chunksize,Nt) # Reset
# Consider resetting N
if tot is not None:
N = min(N,tot//chunksize)
# Build a counter iterator based on settings and tqdm
if tqdm is None:
if isinstance(progress,(str,unicode))\
and progress.lower() in ['jupyter','notebook','nb']:
counter = partial(_counter_nb,tot=tot)
else:
counter = partial(_counter,tot=tot)
else:
if isinstance(progress,(str,unicode))\
and progress.lower() in ['jupyter','notebook','nb']\
and hasattr(tqdm,'tqdm_notebook'):
counter = partial(tqdm.tqdm_notebook,total=tot)
else:
counter = partial(tqdm.tqdm,total=tot) # Set the total since tqdm won't be able to get it.
# Handle N=1 without any multiprocessing
if N == 1:
if Nt == 1:
out = imap(_fun,seq)
else:
pool = mpd.Pool(Nt) # thread pools don't have the pickle issues
out = pool.imap(_fun,seq)
if progress:
out = counter(out)
for count,item in enumerate(out):
if isinstance(item,_Exception):
item.E.seq_index = count
if not item.infun:
exception = 'raise' # reset
if exception == 'raise':
raise item.E
elif exception == 'return':
item = item.E
elif exception == 'proc':
pass
else:
raise ValueError("Unrecognized `exception` setting '{}'".format(exception))
yield item
if Nt > 1:
pool.close()
return
q_in = mp.JoinableQueue() # Will need to `join` later to make sure is empty
q_out = mp.Queue()
# Start the workers
workers = [mp.Process(target=_worker, args=(_fun, q_in, q_out,Nt)) for _ in range(N)]
for worker in workers:
worker.daemon = daemon
worker.start()
# Create a separate thread to add to the queue in the background
def add_to_queue():
for iixs in _iter_chunks(enumerate(seq),chunksize):
q_in.put(iixs)
# Once (if ever) it is exhausted, send None to close workers
for _ in xrange(N):
q_in.put(None)
add_to_queue_thread = Thread(target=add_to_queue)
add_to_queue_thread.start()
# Define a generator that will pull from the q_out and then run through
# the rest of our generator/iterator chain for progress and ordering
def queue_getter():
finished = 0
count = 0
while finished < N:
out = q_out.get()
if out is None:
finished += 1
continue
yield out
# Chain generators on output
out = queue_getter()
if progress:
out = counter(out)
if ordered:
out = _sort_generator_unique_integers(out,key=lambda a:a[0])
# Return items
for item in out:
count = item[0]
item = item[1]
if isinstance(item,_Exception):
item.E.seq_index = count
if not item.infun:
exception = 'raise' # reset
if exception == 'raise':
for worker in workers:
worker.terminate()
raise item.E
elif exception == 'return':
item = item.E
elif exception == 'proc':
pass
else:
for worker in workers:
worker.terminate()
raise ValueError("Unrecognized `exception` setting '{}'".format(exception))
yield item
# Clean up threads and processes. Make sure the queue is exhausted
add_to_queue_thread.join() # Make sure we've exhausted the input
q_in.join() # Make sure there is nothing left in the queue
for worker in workers:
worker.join() # shut it down
# Add dummy methods
parmap.map = parmap.imap = parmap.__call__
parmap.close = lambda *a,**k:None
parmap.__doc__ = parmap.__doc__.replace('__version__',__version__)
parmapper = parmap # Rename
def _counter(items,tot=None):
for ii,item in enumerate(items):
if tot is not None:
_txtbar(ii,tot,ticks=50,text='')
else:
txt = '{}'.format(ii+1)
print('\r%s' % txt,end='')
sys.stdout.flush()
yield item
def _counter_nb(items,tot=None):
from ipywidgets import IntProgress,IntText
from IPython.display import display
if tot is not None:
g = IntText(value=0,description='total = %d' % tot)
f = IntProgress(min=0,max=tot)
display(f)
g.desription='hi'
else:
g = IntText(value=0)
f = None
display(g)
for ii,item in enumerate(items):
if f:
f.value += 1
g.value+=1
yield item
def _worker(fun,q_in,q_out,Nt):
""" This actually runs everything including threadpools"""
if Nt > 1:
pool = mpd.Pool(Nt)
_map = pool.map # thread pools don't have the pickle issues
else:
_map = map
while True:
iixs = q_in.get()
if iixs is None:
q_out.put(None)
q_in.task_done()
break
# for ix in iixs:
def _ap(ix):
i,x = ix
q_out.put((i, fun(x)))
list(_map(_ap,iixs)) # list forces the iteration
q_in.task_done()
if Nt >1:
pool.close()
def _iter_chunks(seq,n):
"""
yield a len(n) tuple from seq. If not divisible, the last one would be less
than n
"""
_n = 0;
for item in seq:
if _n == 0:
group = [item]
else:
group.append(item)
_n += 1
if _n == n:
yield tuple(group)
_n = 0
if _n > 0:
yield tuple(group)
def _sort_generator_unique_integers(items,start=0,key=None):
"""
Yield from `items` in order assuming UNIQUE keys w/o any missing!
The items ( or key(item) ) MUST be an integer, without repeats, starting
at `start`
"""
queue = dict()
for item in items:
if key is not None:
ik = key(item)
else:
ik = item
if ik == start:
yield item
start += 1
# Get any stored items
while start in queue:
yield queue.pop(start) # average O(1), worse-case O(N)
start += 1 # but based on ref below, should be O(1)
else: # for integer keys.
queue[ik] = item # Ref: https://wiki.python.org/moin/TimeComplexity
# Exhaust the rest
while start in queue:
yield queue.pop(start)
start += 1
def _txtbar(count,N,ticks=50,text='Progress'):
"""
Print a text-based progress bar.
Usage:
_txtbar(count,N)
Inputs:
count : Iteration count (start at 0)
N : Iteration size
ticks : [50] Number of ticks
text : ['Progress'] Text to display (don't include `:`)
Prints a text-based progress bar to the terminal. Obviosly
printing other things to screen will mess this up:
"""
count = int(count+1)
ticks = min(ticks,N)
isCount = int(1.0*count%round(1.0*N/ticks)) == 0
if not (isCount or count == 1 or count == N):
return
Npound = int(round(1.0 * count/N*ticks));
Nspace = int(1.0*ticks - Npound);
Nprint = int(round(1.0 * count/N*100));
if count == 1:
Nprint = 0
if len(text)>0:
text +=': '
txt = '{:s}{:s}{:s} : {:3d}% '.format(text,'#'*Npound,'-'*Nspace,Nprint)
print('\r%s' % txt,end='')
sys.stdout.flush()
technical_details = """\
This code uses iterators/generators to handle and distribute the workload.
By doing this, it is easy to have all results pass through a common
counting function for display of the progress without the use of
global (multiprocessing manager) variables and locks.
With the exception of when N == 1 (where it falls back to serial methods)
the code works as follows:
- A background thread is started that will iterate over the incoming sequence
and add items to the queue. If the incoming sequence is exhausted, the
worker sends kill signals into the queue. The items are also chunked and
enumerated (used later to sort).
- After the background thread is started a function to pull from the OUTPUT
queue is created. This counts the number of closed processes but otherwise
yields the computed result items
- A pool of workers is created. Each worker will read from the input queue
and distribute the work amongst threads (if using). It will then
return the resuts into a queue
- Now the main work happens. It is done as chain of generators/iterators.
The background worker has already begin adding items to the queue so
now we work through the output queue. Note that this is in serial
since the work was already done in parallel
- Generator to pull from the result queue
- Generator to count and display progress (if progress=True).
- Generator to hold on to and return items in a sorted manner
if sorting is requested. This can cause itermediate results to be
stored until they can be returned in order
- The output generator chain is iterated pulling items through and then
are yielded.
- cleanup and close processes (if/when the input is exhausted)
"""
np = None # will be imported when a ParEval is instantiated
class ParEval(object):
"""
Evaluate the *vectoorized* fun(X) (where X is a numpy array) in chunks
using parmap. If fun(X) is not vectorized, use the regular parmap
Requires numpy
Inputs:
-------
fun
Function to call. Use parmap keywords (e.g. args,kwargs,star) to
add and/or control function call.
Specify one of
n_chunks
How many chunks to split it up into
n_eval
How many evaluations per chunk (and split accordingly). Will be
the upper limit.
if neither is specified, then n_chunks is set to CPU_COUNT
Options:
-------
n_min [0]
Minimum size for a chunk. Will also override n_eval if needed
(since n_eval gets convered to n_chunks)
All additional options are passed to parmap
Splits along the first axis.
"""
def __init__(self,fun,n_chunks=None,n_eval=None,n_min=0,**kwargs):
global np
if np is None:
import numpy as np
self.fun = fun
if (n_chunks is not None) and (n_eval is not None):
raise ValueError('Must specify EITHER n_chunks OR n_eval')
if n_chunks is None and n_eval is None:
n_chunks = CPU_COUNT
self.n_chunks = n_chunks
self.n_eval = n_eval
self.n_min = n_min
self.kwargs = kwargs
def __call__(self,X):
chunker = _chunker(X,n_chunks=self.n_chunks,
n_eval=self.n_eval,
n_min=self.n_min)
res = list(parmap(self.fun,chunker,**self.kwargs))
return np.concatenate(res)
class _chunker(object):
"""Object to actually break into chunks and has a __len__"""
def __init__(self,X,n_chunks=None,n_eval=None,n_min=0):
global np
if np is None:
import numpy as np
self.X = X = np.atleast_1d(X)
n = len(X)
# Get number of chunks
if n_eval is not None:
n_eval = max(n_min,n_eval)
n_chunks = int(np.ceil(n/n_eval))
if n_chunks is not None:
n_chunks = n_chunks
if n // n_chunks < n_min:
n_chunks = n // n_min
stops = np.asarray([n // n_chunks]*n_chunks,dtype=int)
stops[:n % n_chunks] += 1
self.stops = stops = np.cumsum(stops).tolist()
self.len = len(stops)
self.ii = 0
def __next__(self):
ii = self.ii
if ii == self.len:
raise StopIteration()
a = 0 if ii == 0 else self.stops[ii-1]
b = self.stops[ii]
self.ii += 1
return self.X[a:b]
next = __next__
def __iter__(self):
return self
def __len__(self):
return self.len
################################################################################
################################################################################
## Below is a simpler version of parmap. It really only serves the purpose of
## being used to copy/paste when a short-and-sweet parmap is needed in a
## function or method and you do not want to require parmap(per).py
##
## It is basically *just* for reference
################################################################################
################################################################################
# def simple_parmap(fun,seq,N=None,daemon=True):
# """
# Simple, bare-bones parallel map function similar to parmap
# (or parmapper [1]) except much, much simpler. It lacks all
# bells and whistles but *does* perform parallel mapping
#
# Note: This always returns a list and not an iterator!
# And will not return until all computation is complete
#
# Use parmap if it is availible.
#
# Inspired by [2]
#
# [1]:https://github.com/Jwink3101/parmapper
# [2]:https://stackoverflow.com/a/16071616/3633154
# """
# import multiprocessing as mp
# if N is None:
# N = mp.cpu_count()
# def _fun(fun, q_in, q_out):
# while True:
# i, x = q_in.get()
# if i is None:
# q_in.task_done()
# break
# q_out.put((i, fun(x)))
# q_in.task_done()
#
# q_in,q_out = mp.JoinableQueue(),mp.Queue()
#
# proc = [mp.Process(target=_fun, args=(fun, q_in, q_out)) for _ in range(N)]
# for p in proc:
# p.daemon=daemon
# p.start()
#
# count = 0
# for ii,x in enumerate(seq):
# q_in.put((ii,x))
# count += 1
#
# for _ in range(N): q_in.put((None,None))
# res = [q_out.get() for _ in range(count)]
#
# q_in.join()
# for p in proc: p.join()
#
# return [x for i, x in sorted(res)]
| [
"sys.stdout.flush",
"multiprocessing.Queue",
"itertools.imap",
"multiprocessing.cpu_count",
"multiprocessing.dummy.Pool",
"IPython.display.display",
"numpy.cumsum",
"multiprocessing.JoinableQueue",
"threading.Thread",
"functools.partial",
"ipywidgets.IntText",
"numpy.ceil",
"math.ceil",
"n... | [((754, 768), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (766, 768), True, 'import multiprocessing as mp\n'), ((11026, 11044), 'multiprocessing.JoinableQueue', 'mp.JoinableQueue', ([], {}), '()\n', (11042, 11044), True, 'import multiprocessing as mp\n'), ((11107, 11117), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (11115, 11117), True, 'import multiprocessing as mp\n'), ((11647, 11674), 'threading.Thread', 'Thread', ([], {'target': 'add_to_queue'}), '(target=add_to_queue)\n', (11653, 11674), False, 'from threading import Thread\n'), ((14118, 14128), 'IPython.display.display', 'display', (['g'], {}), '(g)\n', (14125, 14128), False, 'from IPython.display import display\n'), ((17096, 17114), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17112, 17114), False, 'import sys\n'), ((11158, 11214), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_worker', 'args': '(_fun, q_in, q_out, Nt)'}), '(target=_worker, args=(_fun, q_in, q_out, Nt))\n', (11168, 11214), True, 'import multiprocessing as mp\n'), ((13924, 13972), 'ipywidgets.IntText', 'IntText', ([], {'value': '(0)', 'description': "('total = %d' % tot)"}), "(value=0, description='total = %d' % tot)\n", (13931, 13972), False, 'from ipywidgets import IntProgress, IntText\n'), ((13984, 14011), 'ipywidgets.IntProgress', 'IntProgress', ([], {'min': '(0)', 'max': 'tot'}), '(min=0, max=tot)\n', (13995, 14011), False, 'from ipywidgets import IntProgress, IntText\n'), ((14019, 14029), 'IPython.display.display', 'display', (['f'], {}), '(f)\n', (14026, 14029), False, 'from IPython.display import display\n'), ((14079, 14095), 'ipywidgets.IntText', 'IntText', ([], {'value': '(0)'}), '(value=0)\n', (14086, 14095), False, 'from ipywidgets import IntProgress, IntText\n'), ((14370, 14382), 'multiprocessing.dummy.Pool', 'mpd.Pool', (['Nt'], {}), '(Nt)\n', (14378, 14382), True, 'import multiprocessing.dummy as mpd\n'), ((20601, 20620), 'numpy.concatenate', 'np.concatenate', (['res'], {}), '(res)\n', (20615, 20620), True, 'import numpy as np\n'), ((20872, 20888), 'numpy.atleast_1d', 'np.atleast_1d', (['X'], {}), '(X)\n', (20885, 20888), True, 'import numpy as np\n'), ((21221, 21270), 'numpy.asarray', 'np.asarray', (['([n // n_chunks] * n_chunks)'], {'dtype': 'int'}), '([n // n_chunks] * n_chunks, dtype=int)\n', (21231, 21270), True, 'import numpy as np\n'), ((9172, 9242), 'warnings.warn', 'warnings.warn', (['"""chunksize=-1 does not work when len(seq) is not known"""'], {}), "('chunksize=-1 does not work when len(seq) is not known')\n", (9185, 9242), False, 'import warnings\n'), ((9281, 9306), 'math.ceil', 'math.ceil', (['(tot / (N * Nt))'], {}), '(tot / (N * Nt))\n', (9290, 9306), False, 'import math\n'), ((9644, 9673), 'functools.partial', 'partial', (['_counter_nb'], {'tot': 'tot'}), '(_counter_nb, tot=tot)\n', (9651, 9673), False, 'from functools import partial\n'), ((9709, 9735), 'functools.partial', 'partial', (['_counter'], {'tot': 'tot'}), '(_counter, tot=tot)\n', (9716, 9735), False, 'from functools import partial\n'), ((9922, 9960), 'functools.partial', 'partial', (['tqdm.tqdm_notebook'], {'total': 'tot'}), '(tqdm.tqdm_notebook, total=tot)\n', (9929, 9960), False, 'from functools import partial\n'), ((9996, 10025), 'functools.partial', 'partial', (['tqdm.tqdm'], {'total': 'tot'}), '(tqdm.tqdm, total=tot)\n', (10003, 10025), False, 'from functools import partial\n'), ((10180, 10195), 'itertools.imap', 'imap', (['_fun', 'seq'], {}), '(_fun, seq)\n', (10184, 10195), False, 'from itertools import imap\n'), ((10228, 10240), 'multiprocessing.dummy.Pool', 'mpd.Pool', (['Nt'], {}), '(Nt)\n', (10236, 10240), True, 'import multiprocessing.dummy as mpd\n'), ((13724, 13742), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13740, 13742), False, 'import sys\n'), ((21036, 21055), 'numpy.ceil', 'np.ceil', (['(n / n_eval)'], {}), '(n / n_eval)\n', (21043, 21055), True, 'import numpy as np\n'), ((21331, 21347), 'numpy.cumsum', 'np.cumsum', (['stops'], {}), '(stops)\n', (21340, 21347), True, 'import numpy as np\n')] |
'''
Author: <NAME>
Email: <EMAIL>
Date created: 2020/6/16
Python Version: 3.6
'''
import numpy as np
from numpy.linalg import inv as inv
# functions for train_model,py
def kr_prod(a, b):
return np.einsum('ir, jr -> ijr', a, b).reshape(a.shape[0] * b.shape[0], -1)
def cp_combine(U, V, X):
return np.einsum('is, js, ts -> ijt', U, V, X)
def ten2mat(tensor, mode):
return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1), order = 'F')
# ALS algorithm for CP completion
def CP_ALS(sparse_tensor_input, rank, maxiter, test_info=None):
sparse_tensor = sparse_tensor_input.copy()
dim1, dim2, dim3 = sparse_tensor.shape
dim = np.array([dim1, dim2, dim3])
U = 0.1 * np.random.rand(dim1, rank)
V = 0.1 * np.random.rand(dim2, rank)
X = 0.1 * np.random.rand(dim3, rank)
pos = np.where(sparse_tensor != 0)
binary_tensor = np.zeros((dim1, dim2, dim3))
binary_tensor[pos] = 1
tensor_hat = np.zeros((dim1, dim2, dim3))
min_test_cls = 999
min_test_cls_iteration = -1
for iters in range(maxiter):
for order in range(dim.shape[0]):
if order == 0:
var1 = kr_prod(X, V).T
elif order == 1:
var1 = kr_prod(X, U).T
else:
var1 = kr_prod(V, U).T
var2 = kr_prod(var1, var1)
var3 = np.matmul(var2, ten2mat(binary_tensor, order).T).reshape([rank, rank, dim[order]])
var4 = np.matmul(var1, ten2mat(sparse_tensor, order).T)
for i in range(dim[order]):
var_Lambda = var3[:, :, i]
inv_var_Lambda = inv((var_Lambda + var_Lambda.T) / 2 + 10e-12 * np.eye(rank))
vec = np.matmul(inv_var_Lambda, var4[:, i])
if order == 0:
U[i, :] = vec.copy()
elif order == 1:
V[i, :] = vec.copy()
else:
X[i, :] = vec.copy()
tensor_hat = cp_combine(U, V, X)
if (iters + 1) % 10 == 0:
mape = np.sum(np.abs(sparse_tensor[pos] - tensor_hat[pos]) / np.abs(sparse_tensor[pos])) / \
sparse_tensor[pos].shape[0]
mape = np.sum(np.abs(sparse_tensor[pos] - tensor_hat[pos]) / np.abs(sparse_tensor[pos])) / \
sparse_tensor[pos].shape[0]
rmse = np.sqrt(np.sum((sparse_tensor[pos] - tensor_hat[pos]) ** 2) / sparse_tensor[pos].shape[0])
print('Iter: {}'.format(iters + 1))
print('Training MAPE: {:.6}'.format(mape))
print('Training RMSE: {:.6}'.format(rmse))
print()
if test_info is not None:
# print test mape and rmse
test_pos_tuple, test_values = test_info
norm_tcs = np.linalg.norm(test_values)
error_tcs = np.linalg.norm(tensor_hat[test_pos_tuple] - test_values)
test_tcs = error_tcs / norm_tcs
test_rmse = np.sqrt(np.sum((test_values - tensor_hat[test_pos_tuple]) ** 2) \
/ test_values.shape[0])
print('Testing TCS: {:.6}'.format(test_tcs))
print('Testing RMSE: {:.6}'.format(test_rmse))
print()
## stop iteration if smallest test_tcs get
if test_tcs < min_test_cls:
min_test_cls = test_tcs
min_test_cls_iteration = iters
else:
if ((iters - min_test_cls_iteration) > 30):
break
return tensor_hat, U, V, X, min_test_cls, min_test_cls_iteration
def choose_test_index_for_location(location_array, test_ratio=0.2):
np.random.seed(2020)
location_choice = np.random.choice(location_array, size=int(len(location_array)*test_ratio), replace=False)
return location_choice
def choose_test_index(pos, num_of_locations=50):
test_index = np.array([])
for loc_num in range(num_of_locations):
location_array = np.where(pos[0] == loc_num)[0]
location_choice = choose_test_index_for_location(location_array)
location_choice.sort()
test_index = np.concatenate((test_index, location_choice), axis=0)
return test_index
| [
"numpy.moveaxis",
"numpy.random.seed",
"numpy.sum",
"numpy.abs",
"numpy.zeros",
"numpy.einsum",
"numpy.where",
"numpy.array",
"numpy.linalg.norm",
"numpy.matmul",
"numpy.random.rand",
"numpy.eye",
"numpy.concatenate"
] | [((324, 363), 'numpy.einsum', 'np.einsum', (['"""is, js, ts -> ijt"""', 'U', 'V', 'X'], {}), "('is, js, ts -> ijt', U, V, X)\n", (333, 363), True, 'import numpy as np\n'), ((684, 712), 'numpy.array', 'np.array', (['[dim1, dim2, dim3]'], {}), '([dim1, dim2, dim3])\n', (692, 712), True, 'import numpy as np\n'), ((847, 875), 'numpy.where', 'np.where', (['(sparse_tensor != 0)'], {}), '(sparse_tensor != 0)\n', (855, 875), True, 'import numpy as np\n'), ((896, 924), 'numpy.zeros', 'np.zeros', (['(dim1, dim2, dim3)'], {}), '((dim1, dim2, dim3))\n', (904, 924), True, 'import numpy as np\n'), ((969, 997), 'numpy.zeros', 'np.zeros', (['(dim1, dim2, dim3)'], {}), '((dim1, dim2, dim3))\n', (977, 997), True, 'import numpy as np\n'), ((3735, 3755), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (3749, 3755), True, 'import numpy as np\n'), ((3963, 3975), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3971, 3975), True, 'import numpy as np\n'), ((415, 443), 'numpy.moveaxis', 'np.moveaxis', (['tensor', 'mode', '(0)'], {}), '(tensor, mode, 0)\n', (426, 443), True, 'import numpy as np\n'), ((727, 753), 'numpy.random.rand', 'np.random.rand', (['dim1', 'rank'], {}), '(dim1, rank)\n', (741, 753), True, 'import numpy as np\n'), ((768, 794), 'numpy.random.rand', 'np.random.rand', (['dim2', 'rank'], {}), '(dim2, rank)\n', (782, 794), True, 'import numpy as np\n'), ((809, 835), 'numpy.random.rand', 'np.random.rand', (['dim3', 'rank'], {}), '(dim3, rank)\n', (823, 835), True, 'import numpy as np\n'), ((4201, 4254), 'numpy.concatenate', 'np.concatenate', (['(test_index, location_choice)'], {'axis': '(0)'}), '((test_index, location_choice), axis=0)\n', (4215, 4254), True, 'import numpy as np\n'), ((216, 248), 'numpy.einsum', 'np.einsum', (['"""ir, jr -> ijr"""', 'a', 'b'], {}), "('ir, jr -> ijr', a, b)\n", (225, 248), True, 'import numpy as np\n'), ((4045, 4072), 'numpy.where', 'np.where', (['(pos[0] == loc_num)'], {}), '(pos[0] == loc_num)\n', (4053, 4072), True, 'import numpy as np\n'), ((1729, 1766), 'numpy.matmul', 'np.matmul', (['inv_var_Lambda', 'var4[:, i]'], {}), '(inv_var_Lambda, var4[:, i])\n', (1738, 1766), True, 'import numpy as np\n'), ((2811, 2838), 'numpy.linalg.norm', 'np.linalg.norm', (['test_values'], {}), '(test_values)\n', (2825, 2838), True, 'import numpy as np\n'), ((2867, 2923), 'numpy.linalg.norm', 'np.linalg.norm', (['(tensor_hat[test_pos_tuple] - test_values)'], {}), '(tensor_hat[test_pos_tuple] - test_values)\n', (2881, 2923), True, 'import numpy as np\n'), ((2384, 2435), 'numpy.sum', 'np.sum', (['((sparse_tensor[pos] - tensor_hat[pos]) ** 2)'], {}), '((sparse_tensor[pos] - tensor_hat[pos]) ** 2)\n', (2390, 2435), True, 'import numpy as np\n'), ((2079, 2123), 'numpy.abs', 'np.abs', (['(sparse_tensor[pos] - tensor_hat[pos])'], {}), '(sparse_tensor[pos] - tensor_hat[pos])\n', (2085, 2123), True, 'import numpy as np\n'), ((2126, 2152), 'numpy.abs', 'np.abs', (['sparse_tensor[pos]'], {}), '(sparse_tensor[pos])\n', (2132, 2152), True, 'import numpy as np\n'), ((2231, 2275), 'numpy.abs', 'np.abs', (['(sparse_tensor[pos] - tensor_hat[pos])'], {}), '(sparse_tensor[pos] - tensor_hat[pos])\n', (2237, 2275), True, 'import numpy as np\n'), ((2278, 2304), 'numpy.abs', 'np.abs', (['sparse_tensor[pos]'], {}), '(sparse_tensor[pos])\n', (2284, 2304), True, 'import numpy as np\n'), ((3009, 3064), 'numpy.sum', 'np.sum', (['((test_values - tensor_hat[test_pos_tuple]) ** 2)'], {}), '((test_values - tensor_hat[test_pos_tuple]) ** 2)\n', (3015, 3064), True, 'import numpy as np\n'), ((1693, 1705), 'numpy.eye', 'np.eye', (['rank'], {}), '(rank)\n', (1699, 1705), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Tree Tracing
Here we trace a tree generated by Bayesian teaching
of a matrix of size (10*10) or (20*20)
"""
import os
import pickle
from datetime import datetime as dt
import numpy as np
import torch
# import torch.multiprocessing as mp
import multiprocessing as mp
CURDIR = os.path.abspath(os.curdir)
# os.chdir("../Stability/")
import sinkhorn_torch as sk
# os.chdir(CURDIR)
FL = torch.float64
LOG_FILES = {}
class TreeTracing:
"""
Tracing rooted tree branched by choosing different data to teach.
The tree is a rooted d+1 valence tree. Level can be infinite, but
we fix a max depth
Strategy is to do a 2-layer calculation, use multiprocessing in the second
Theoretically, all the matrices and priors are instances
of torch.Tensor
"""
@staticmethod
def gen_perturb_default(mat_teach, prior_teach, *args):
"""
Default Perturbations
"""
n_row, n_col = mat_teach.shape
perturb = args[0]
mat_learn = mat_teach.clone()
prior_learn = prior_teach.clone()
index_row, index_col = np.random.choice(n_row), np.random.choice(n_col)
mat_learn[index_row, index_col] += perturb
prior_learn[np.random.choice(n_col)] += perturb
# We need to perturb it in some way
mat_learn = sk.col_normalize(mat_learn, torch.ones(n_col, dtype=FL))
prior_learn /= torch.sum(prior_learn)
return mat_learn, prior_learn
def __init__(self, init_depth=2,
max_depth=5, name="test",
prefix=CURDIR, **args):
"""
Initialization
"""
self.correct_hypo = None
arg_processor = {"device": self.set_device,
"mat_teach": self.set_mat_teach,
"mat_learn": self.set_mat_learn,
"prior_teach": self.set_prior_teach,
"prior_learn": self.set_prior_learn,
"correct_hypo": self.set_correct_hypo,}
self.set_depth(init_depth, max_depth)
self.set_prefix(prefix+"/"+name)
for key, value in args.items():
arg_processor[key](value)
# self.log_files = LOG_FILES
# self.log_files is a collection of file handles, structure:
# log_files = {branch_id, [list of length=self.max_depth+1, all file handles ]}
def set_depth(self, init_depth, max_depth):
"""
Set init_depth and max_depth
"""
self.init_depth, self.max_depth = init_depth, max_depth
def set_prefix(self, prefix):
"""
set prefix and timestamp
"""
self.prefix = prefix
self.timestamp = dt.today().strftime("%Y-%m-%d_%H:%M:%S.%f")
def set_mat_teach(self, mat_teach):
"""
Set teaching matrix (accurate one)
"""
self.mat_teach = mat_teach
self.n_row, self.n_col = mat_teach.shape
def set_mat_learn(self, mat_learn):
"""
Set learning matrix (accurate one)
"""
self.mat_learn = mat_learn
def set_prior_learn(self, prior_learn):
"""
Set learning prior (accurate one)
"""
self.prior_learn = prior_learn
def set_prior_teach(self, prior_teach):
"""
Set teaching prior (accurate one)
"""
self.prior_teach = prior_teach
def set_correct_hypo(self, correct_hypo):
"""
Set the correct hypothesis
"""
self.correct_hypo = correct_hypo
def set_device(self, device):
"""
Set Device
"""
self.device = device
def init_default(self, n_row=10, n_col=10, perturb=0.1,
generate_perturbation=gen_perturb_default.__func__):
"""
Initialization
"""
self.n_row, self.n_col = n_row, n_col
mat_teach = torch.distributions.dirichlet.Dirichlet(torch.ones([n_row, n_col],
dtype=FL)).sample().T
prior_teach = torch.distributions.dirichlet.Dirichlet(torch.ones(n_row, dtype=FL)).sample()
mat_learn, prior_learn = generate_perturbation(mat_teach, prior_teach, perturb)
with open(self.prefix+"_setup.log", "wb") as file_ptr:
pickle.dump({"mat_teach": mat_teach,
"mat_learn": mat_learn,
"prior_teach": prior_teach,
"prior_learn": prior_learn},
file_ptr)
self.set_correct_hypo(0)
self.set_mat_teach(mat_teach)
self.set_mat_learn(mat_learn)
self.set_prior_teach(prior_teach)
self.set_prior_learn(prior_learn)
def init_log(self, branch_id, start_depth, target_depth, write_head=False):
"""
In the first round : write_head=True
others : write_head=False
In fact, each process may have different log_files, so just work with their own, no harm
"""
LOG_FILES[branch_id] = [None, ] * (self.max_depth + 1)
for layer in range(start_depth+(0 if write_head else 1), target_depth+1):
try:
LOG_FILES[branch_id][layer] = \
open(self.prefix+"_branch_"+str(branch_id)+"_layer_"+str(layer)+".log", "wb")
except IOError:
print("Error in open file at branch", branch_id, "layer", layer)
def finish_log(self, branch_id):
"""
Close all files in a branch, when finish a whole branch
"""
for handle in LOG_FILES[branch_id]:
if handle is not None:
handle.close()
# del LOG_FILES[branch_id]
print(list(map(lambda f: (f.closed if f is not None else None), LOG_FILES[branch_id])))
print("Branch", branch_id, "finished.")
def write_log(self, branch_id, layer, data):
"""
Safely write data to correct position
"""
# assert branch_id in self.log_files.keys()
# if we do things correct, no need to check this
# pickle.dump(data, self.log_files[branch_id][layer])
pickle.dump(data, LOG_FILES[branch_id][layer])
# print("Data written to branch:", branch_id, "\t layer:", layer)
# this is also for logs... we may use os.async to guarantee written.
def single_round(self, root_data):
"""
Single round entry in multiprocessing
may do some root node job before calling self.dfs()
root_data = {"branch_id": id of current branch, 0 is with grand root (layer one),
"start_depth": root = 0,
"target_depth": init_depth or max_depth,
"data": {"prob_bi": ,
"prob_scbi": ,
"prior_teach": current priors,
"prior_learn": ,
"prior_bayes": ,},
"write_head": whether branch root is written
}
"""
self.init_log(root_data["branch_id"],
root_data["start_depth"],
root_data["target_depth"],
root_data["write_head"])
self.dfs(root_data["start_depth"],
root_data["target_depth"],
root_data["data"],
root_data["branch_id"],
root_data["write_head"])
self.finish_log(root_data["branch_id"])
return root_data["branch_id"]
def dfs(self, current_depth, target_depth, data, branch_id, write=True):
"""
Depth-First-Search
save data at first when reaching this node
data = {"prob_scbi": probability of getting here through scbi,
"prob_bi": probability of bi
"teach": prior_teach
"learn":
"bayes":}
"""
if write:
self.write_log(branch_id, current_depth, data)
if current_depth == target_depth:
return
mat_teach = sk.sinkhorn_torch(self.mat_teach, col_sum=data["teach"]*self.n_row)
mat_learn = sk.sinkhorn_torch(self.mat_learn, col_sum=data["learn"]*self.n_row)
mat_bayes = sk.row_normalize(data["bayes"]*self.mat_teach.clone(),
torch.ones(self.n_row, dtype=FL))
for teach_d in range(self.n_row):
prob_bi = data["prob_bi"] * self.mat_teach[teach_d, self.correct_hypo]
prob_scbi = data["prob_scbi"] * mat_teach[teach_d, self.correct_hypo]
self.dfs(current_depth+1,
target_depth,
{"prob_scbi": prob_scbi.item(),
"prob_bi": prob_bi.item(),
"teach": mat_teach[teach_d],
"learn": mat_learn[teach_d],
"bayes": mat_bayes[teach_d]},
branch_id)
def main(self, num_branch=40):
"""
Main entry
"""
if self.correct_hypo is None:
self.init_default()
# >>> Layer 1 DFS
root_datum = {"branch_id" : 0,
"start_depth" : 0,
"target_depth": self.init_depth,
"data" : {"prob_scbi": 1.,
"prob_bi" : 1.,
"teach" : self.prior_teach,
"learn" : self.prior_learn,
"bayes" : self.prior_teach,},
"write_head" : True,}
self.single_round(root_datum)
# <<< Layer 1 DFS
# >>> Layer 2 DFS
root_data = []
with open(self.prefix+"_branch_0_layer_"+str(self.init_depth)+".log",
"rb") as file_ptr:
for branch_id in range(1, int(self.n_row ** self.init_depth) + 1):
root_data += [{"branch_id" : branch_id,
"start_depth" : self.init_depth,
"target_depth": self.max_depth,
"data" : pickle.load(file_ptr),
"write_head" : False}, ]
# possible error: we trust that file_ptr has just exact data we need!
# print(root_data)
# pool.map(self.test, range(10))
for i in range(int(len(root_data)/num_branch)+1):
pool = mp.Pool(num_branch)
pool.map(self.single_round, root_data[i * num_branch : min((i+1) * num_branch, len(root_data))])
pool.close()
# <<< Layer 2 DFS
if __name__ == '__main__':
MODEL = TreeTracing(2, 5, name="20by20")
MODEL.init_default(20, 20, 0.05)
MODEL.main(10)
| [
"torch.ones",
"os.path.abspath",
"pickle.dump",
"datetime.datetime.today",
"multiprocessing.Pool",
"pickle.load",
"sinkhorn_torch.sinkhorn_torch",
"numpy.random.choice",
"torch.sum"
] | [((307, 333), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (322, 333), False, 'import os\n'), ((1415, 1437), 'torch.sum', 'torch.sum', (['prior_learn'], {}), '(prior_learn)\n', (1424, 1437), False, 'import torch\n'), ((6158, 6204), 'pickle.dump', 'pickle.dump', (['data', 'LOG_FILES[branch_id][layer]'], {}), '(data, LOG_FILES[branch_id][layer])\n', (6169, 6204), False, 'import pickle\n'), ((8078, 8147), 'sinkhorn_torch.sinkhorn_torch', 'sk.sinkhorn_torch', (['self.mat_teach'], {'col_sum': "(data['teach'] * self.n_row)"}), "(self.mat_teach, col_sum=data['teach'] * self.n_row)\n", (8095, 8147), True, 'import sinkhorn_torch as sk\n'), ((8166, 8235), 'sinkhorn_torch.sinkhorn_torch', 'sk.sinkhorn_torch', (['self.mat_learn'], {'col_sum': "(data['learn'] * self.n_row)"}), "(self.mat_learn, col_sum=data['learn'] * self.n_row)\n", (8183, 8235), True, 'import sinkhorn_torch as sk\n'), ((1114, 1137), 'numpy.random.choice', 'np.random.choice', (['n_row'], {}), '(n_row)\n', (1130, 1137), True, 'import numpy as np\n'), ((1139, 1162), 'numpy.random.choice', 'np.random.choice', (['n_col'], {}), '(n_col)\n', (1155, 1162), True, 'import numpy as np\n'), ((1234, 1257), 'numpy.random.choice', 'np.random.choice', (['n_col'], {}), '(n_col)\n', (1250, 1257), True, 'import numpy as np\n'), ((1363, 1390), 'torch.ones', 'torch.ones', (['n_col'], {'dtype': 'FL'}), '(n_col, dtype=FL)\n', (1373, 1390), False, 'import torch\n'), ((4324, 4455), 'pickle.dump', 'pickle.dump', (["{'mat_teach': mat_teach, 'mat_learn': mat_learn, 'prior_teach': prior_teach,\n 'prior_learn': prior_learn}", 'file_ptr'], {}), "({'mat_teach': mat_teach, 'mat_learn': mat_learn, 'prior_teach':\n prior_teach, 'prior_learn': prior_learn}, file_ptr)\n", (4335, 4455), False, 'import pickle\n'), ((8346, 8378), 'torch.ones', 'torch.ones', (['self.n_row'], {'dtype': 'FL'}), '(self.n_row, dtype=FL)\n', (8356, 8378), False, 'import torch\n'), ((10476, 10495), 'multiprocessing.Pool', 'mp.Pool', (['num_branch'], {}), '(num_branch)\n', (10483, 10495), True, 'import multiprocessing as mp\n'), ((2717, 2727), 'datetime.datetime.today', 'dt.today', ([], {}), '()\n', (2725, 2727), True, 'from datetime import datetime as dt\n'), ((4121, 4148), 'torch.ones', 'torch.ones', (['n_row'], {'dtype': 'FL'}), '(n_row, dtype=FL)\n', (4131, 4148), False, 'import torch\n'), ((3939, 3975), 'torch.ones', 'torch.ones', (['[n_row, n_col]'], {'dtype': 'FL'}), '([n_row, n_col], dtype=FL)\n', (3949, 3975), False, 'import torch\n'), ((10171, 10192), 'pickle.load', 'pickle.load', (['file_ptr'], {}), '(file_ptr)\n', (10182, 10192), False, 'import pickle\n')] |
# import os
from observations import observation as obs
from observations import obs_collection as oc
import numpy as np
import pytest
# import sys
# sys.path.insert(1, "..")
# TEST_DIR = os.path.dirname(os.path.abspath(__file__))
# PROJECT_DIR = os.path.abspath(os.path.join(TEST_DIR, os.pardir))
# sys.path.insert(0, PROJECT_DIR)
# os.chdir(TEST_DIR)
# %% DINO
dinozip = r'./tests/data/2019-Dino-test/dino.zip'
def test_observation_gwq():
# single observation
fname = r'./tests/data/2019-Dino-test/Grondwatersamenstellingen_Put/B52C0057.txt'
ogq = obs.GroundwaterQualityObs.from_dino(fname, verbose=True)
return ogq
def test_observation_wl():
fname = r'./tests/data/2019-Dino-test/Peilschaal/P58A0001.csv'
wl = obs.WaterlvlObs.from_dino(fname, verbose=True)
return wl
def test_observation_gw():
fname = r'./tests/data/2019-Dino-test/Grondwaterstanden_Put/B33F0080001_1.csv'
gw = obs.GroundwaterObs.from_dino(fname=fname, verbose=True)
return gw
def test_observation_dino_download():
# download dino
location = "B57F0077"
filternr = 4.
gw2 = obs.GroundwaterObs.from_dino(location=location,
filternr=filternr,
tmin="2000-01-01",
tmax="2010-01-01", unit="NAP")
return gw2
def test_observation_dino_download2():
# download dino
gw2 = obs.GroundwaterObs.from_dino(location="B57B0069", filternr=1.,
tmin="2000-01-01",
tmax="2010-01-01", unit="NAP")
return gw2
def test_observation_dino_download3():
# download dino data from pb without extra metadata. For this pb
# io_dino.get_dino_piezometer_metadata() returns an empty list
location = "B45G1147"
filternr = 1.
gw3 = obs.GroundwaterObs.from_dino(location=location,
filternr=filternr,
tmin="1900-01-01",
tmax="2020-01-01", unit="NAP")
return gw3
def test_obscollection_fieldlogger():
# collection of observations
fl = oc.ObsCollection.from_fieldlogger(
r'./tests/data/2019-Dino-test/fieldlogger/locations.csv')
return fl
def test_obscollection_from_list():
dino_gw = oc.ObsCollection.from_dino(
dirname=dinozip,
ObsClass=obs.GroundwaterObs,
subdir='Grondwaterstanden_Put',
suffix='1.csv',
keep_all_obs=True,
verbose=False)
obs_list = [o for o in dino_gw.obs.values]
oc_list = oc.ObsCollection.from_list(obs_list)
return oc_list
# read dino directories
def test_obscollection_dinozip_gw():
# groundwater quantity
dino_gw = oc.ObsCollection.from_dino(
dirname=dinozip,
ObsClass=obs.GroundwaterObs,
subdir='Grondwaterstanden_Put',
suffix='1.csv',
keep_all_obs=False,
verbose=False)
return dino_gw
def test_obscollection_dinozip_gw_keep_all_obs():
# do not delete empty dataframes
dino_gw = oc.ObsCollection.from_dino(
dirname=dinozip,
ObsClass=obs.GroundwaterObs,
subdir='Grondwaterstanden_Put',
suffix='1.csv',
keep_all_obs=True,
verbose=False)
return dino_gw
def test_obscollection_dinozip_wl():
# surface water
dino_ps = oc.ObsCollection.from_dino(
dirname=dinozip,
ObsClass=obs.WaterlvlObs,
subdir='Peilschaal',
suffix='.csv',
verbose=True)
return dino_ps
def test_obscollection_dinozip_gwq():
# groundwater quality
dino_gwq = oc.ObsCollection.from_dino(
dirname=dinozip,
ObsClass=obs.GroundwaterQualityObs,
subdir='Grondwatersamenstellingen_Put',
suffix='.txt',
verbose=True)
return dino_gwq
def test_obscollection_dino_download_extent():
# download DINO from extent
extent = [117850, 117980, 439550, 439700] # Schoonhoven zoomed
dino_gw_extent = oc.ObsCollection.from_dino(
extent=extent, ObsClass=obs.GroundwaterObs, verbose=True)
return dino_gw_extent
def test_obscollection_dino_download_bbox():
# download DINO from bbox
bbox = [117850, 439550, 117980, 439700] # Schoonhoven zoomed
bbox = np.array([191608.334, 409880.402, 193072.317, 411477.894])
dino_gw_bbox = oc.ObsCollection.from_dino(
bbox=bbox, ObsClass=obs.GroundwaterObs, verbose=True)
return dino_gw_bbox
def test_obscollection_dino_download_bbox_only_metadata():
# check if the keep_all_obs argument works
bbox = [120110.8948323, 389471.92587313, 121213.23597266, 390551.29918915]
dino_gw_bbox = oc.ObsCollection.from_dino(bbox=bbox, verbose=True)
dino_gw_bbox_empty = oc.ObsCollection.from_dino(bbox=bbox,
keep_all_obs=False,
verbose=True)
assert dino_gw_bbox_empty.empty
return dino_gw_bbox
def test_obscollection_dino_download_bbox_empty():
# download DINO from bbox
bbox = [88596.63500000164, 407224.8449999988,
89623.4149999991, 407804.27800000086]
dino_gw_bbox = oc.ObsCollection.from_dino(
bbox=bbox, ObsClass=obs.GroundwaterObs, verbose=True)
return dino_gw_bbox
def test_obscollection_dino_download_bbox_do_not_keep_all_obs():
bbox = [120110.8948323, 389471.92587313, 121213.23597266, 390551.29918915]
dino_gw_bbox = oc.ObsCollection.from_dino(bbox=bbox, verbose=True)
return dino_gw_bbox
# collection methods
def test_obscollection_to_fieldlogger():
dino_gw = test_obscollection_dinozip_gw()
fdf = dino_gw.to_fieldlogger(
r'./tests/data/2019-Dino-test/fieldlogger/locations.csv', verbose=True)
return fdf
# %% FEWS
def test_obscollection_fews_highmemory():
fews_gw_prod = oc.ObsCollection.from_fews(
r'./tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip',
translate_dic={'locationId': 'locatie'},
verbose=True,
to_mnap=False,
remove_nan=False,
low_memory=False)
return fews_gw_prod
def test_obscollection_fews_lowmemory():
fews_gw_prod = oc.ObsCollection.from_fews(
r'./tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip',
verbose=True,
locations=None,
low_memory=True)
return fews_gw_prod
def test_obscollection_fews_selection():
fews_gw_prod = oc.ObsCollection.from_fews(
r'./tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip',
verbose=True,
locations=("MPN-N-2",)
)
return fews_gw_prod
# %% WISKI
def test_observation_wiskicsv_gw():
wiski_gw = obs.GroundwaterObs.from_wiski(
r"./tests/data/2019-WISKI-test/1016_PBF.csv",
sep=r'\s+',
header_sep=':',
header_identifier=':',
parse_dates={"datetime": [0, 1]},
index_col=["datetime"],
translate_dic={
'name': 'Station Number',
'x': 'GlobalX',
'y': 'GlobalY'},
verbose=True)
return wiski_gw
def test_obscollection_wiskizip_gw():
wiski_col = oc.ObsCollection.from_wiski(
r"./tests/data/2019-WISKI-test/1016_PBF.zip",
translate_dic={
'name': 'Station Number',
'x': 'GlobalX',
'y': 'GlobalY'},
sep=r'\s+',
header_sep=':',
dayfirst=True,
header_identifier=':',
parse_dates={"datetime": [0, 1]},
index_col=["datetime"],
verbose=True)
return wiski_col
# %% PASTAS PROJECTS AND PASTASTORE
@pytest.mark.skip(reason="needs installation pastastore")
def test_to_pastas_project():
dino_gw = test_obscollection_dinozip_gw()
pr = dino_gw.to_pastas_project(verbose=True)
return pr
@pytest.mark.skip(reason="needs installation pastastore")
def test_to_pastastore():
dino_gw = test_obscollection_dinozip_gw()
pstore = dino_gw.to_pastastore(verbose=True)
return pstore
@pytest.mark.skip(reason="needs installation pastastore")
def test_from_pastas_project():
pr = test_to_pastas_project()
pr_oc = oc.ObsCollection.from_pastas_project(pr)
return pr_oc
# %% PYSTORE
def test_obscollection_to_pystore():
obsc = test_obscollection_fews_lowmemory()
obsc.to_pystore("test_pystore", "./tests/data/2019-Pystore-test",
groupby="locatie", overwrite=True)
def test_obscollection_from_pystore():
obsc = oc.ObsCollection.from_pystore(
"test_pystore", "./tests/data/2019-Pystore-test")
return obsc
def test_obscollection_pystore_only_metadata():
obsc = oc.ObsCollection.from_pystore("test_pystore",
"./tests/data/2019-Pystore-test",
read_series=False)
return obsc
def test_obscollection_pystore_extent():
obsc = oc.ObsCollection.from_pystore("test_pystore",
"./tests/data/2019-Pystore-test",
extent=[115534, 115539, 0, 10000000]
)
return obsc
def test_obscollection_pystore_item_names():
obsc = oc.ObsCollection.from_pystore("test_pystore",
"./tests/data/2019-Pystore-test",
item_names=['MPN-N-2']
)
return obsc
def test_obs_from_pystore_item():
import pystore
pystore.set_path("./tests/data/2019-Pystore-test")
store = pystore.store("test_pystore")
coll = store.collection(store.collections[0])
item = coll.item(list(coll.list_items())[0])
o = obs.GroundwaterObs.from_pystore_item(item)
return o
# %% KNMI
def test_knmi_obs_from_stn():
return obs.KnmiObs.from_knmi(829, "RD")
def test_knmi_obs_from_xy():
return obs.KnmiObs.from_nearest_xy(100000, 350000, "RD")
def test_knmi_obs_from_obs():
pb = test_observation_gw()
return obs.KnmiObs.from_obs(pb, "EV24", fill_missing_obs=False)
# %% WATERINFO
def test_waterinfo_from_dir():
path = "./tests/data/waterinfo-test"
wi = oc.ObsCollection.from_waterinfo(path)
return wi
# %% MENYANTHES (still need a small menyanthes file to do the test)
# def test_obscollection_menyanthes():
#
# fname = r'export_from_ADI.men'
# obsc = oc.ObsCollection.from_menyanthes(fname, verbose=True)
#
# return obsc
| [
"observations.observation.GroundwaterQualityObs.from_dino",
"pystore.set_path",
"observations.observation.KnmiObs.from_nearest_xy",
"observations.observation.GroundwaterObs.from_wiski",
"pytest.mark.skip",
"observations.obs_collection.ObsCollection.from_fews",
"observations.obs_collection.ObsCollection.... | [((7670, 7726), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""needs installation pastastore"""'}), "(reason='needs installation pastastore')\n", (7686, 7726), False, 'import pytest\n'), ((7870, 7926), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""needs installation pastastore"""'}), "(reason='needs installation pastastore')\n", (7886, 7926), False, 'import pytest\n'), ((8070, 8126), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""needs installation pastastore"""'}), "(reason='needs installation pastastore')\n", (8086, 8126), False, 'import pytest\n'), ((568, 624), 'observations.observation.GroundwaterQualityObs.from_dino', 'obs.GroundwaterQualityObs.from_dino', (['fname'], {'verbose': '(True)'}), '(fname, verbose=True)\n', (603, 624), True, 'from observations import observation as obs\n'), ((745, 791), 'observations.observation.WaterlvlObs.from_dino', 'obs.WaterlvlObs.from_dino', (['fname'], {'verbose': '(True)'}), '(fname, verbose=True)\n', (770, 791), True, 'from observations import observation as obs\n'), ((927, 982), 'observations.observation.GroundwaterObs.from_dino', 'obs.GroundwaterObs.from_dino', ([], {'fname': 'fname', 'verbose': '(True)'}), '(fname=fname, verbose=True)\n', (955, 982), True, 'from observations import observation as obs\n'), ((1111, 1232), 'observations.observation.GroundwaterObs.from_dino', 'obs.GroundwaterObs.from_dino', ([], {'location': 'location', 'filternr': 'filternr', 'tmin': '"""2000-01-01"""', 'tmax': '"""2010-01-01"""', 'unit': '"""NAP"""'}), "(location=location, filternr=filternr, tmin=\n '2000-01-01', tmax='2010-01-01', unit='NAP')\n", (1139, 1232), True, 'from observations import observation as obs\n'), ((1431, 1549), 'observations.observation.GroundwaterObs.from_dino', 'obs.GroundwaterObs.from_dino', ([], {'location': '"""B57B0069"""', 'filternr': '(1.0)', 'tmin': '"""2000-01-01"""', 'tmax': '"""2010-01-01"""', 'unit': '"""NAP"""'}), "(location='B57B0069', filternr=1.0, tmin=\n '2000-01-01', tmax='2010-01-01', unit='NAP')\n", (1459, 1549), True, 'from observations import observation as obs\n'), ((1869, 1990), 'observations.observation.GroundwaterObs.from_dino', 'obs.GroundwaterObs.from_dino', ([], {'location': 'location', 'filternr': 'filternr', 'tmin': '"""1900-01-01"""', 'tmax': '"""2020-01-01"""', 'unit': '"""NAP"""'}), "(location=location, filternr=filternr, tmin=\n '1900-01-01', tmax='2020-01-01', unit='NAP')\n", (1897, 1990), True, 'from observations import observation as obs\n'), ((2200, 2295), 'observations.obs_collection.ObsCollection.from_fieldlogger', 'oc.ObsCollection.from_fieldlogger', (['"""./tests/data/2019-Dino-test/fieldlogger/locations.csv"""'], {}), "(\n './tests/data/2019-Dino-test/fieldlogger/locations.csv')\n", (2233, 2295), True, 'from observations import obs_collection as oc\n'), ((2367, 2529), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'dirname': 'dinozip', 'ObsClass': 'obs.GroundwaterObs', 'subdir': '"""Grondwaterstanden_Put"""', 'suffix': '"""1.csv"""', 'keep_all_obs': '(True)', 'verbose': '(False)'}), "(dirname=dinozip, ObsClass=obs.GroundwaterObs,\n subdir='Grondwaterstanden_Put', suffix='1.csv', keep_all_obs=True,\n verbose=False)\n", (2393, 2529), True, 'from observations import obs_collection as oc\n'), ((2632, 2668), 'observations.obs_collection.ObsCollection.from_list', 'oc.ObsCollection.from_list', (['obs_list'], {}), '(obs_list)\n', (2658, 2668), True, 'from observations import obs_collection as oc\n'), ((2792, 2955), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'dirname': 'dinozip', 'ObsClass': 'obs.GroundwaterObs', 'subdir': '"""Grondwaterstanden_Put"""', 'suffix': '"""1.csv"""', 'keep_all_obs': '(False)', 'verbose': '(False)'}), "(dirname=dinozip, ObsClass=obs.GroundwaterObs,\n subdir='Grondwaterstanden_Put', suffix='1.csv', keep_all_obs=False,\n verbose=False)\n", (2818, 2955), True, 'from observations import obs_collection as oc\n'), ((3119, 3281), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'dirname': 'dinozip', 'ObsClass': 'obs.GroundwaterObs', 'subdir': '"""Grondwaterstanden_Put"""', 'suffix': '"""1.csv"""', 'keep_all_obs': '(True)', 'verbose': '(False)'}), "(dirname=dinozip, ObsClass=obs.GroundwaterObs,\n subdir='Grondwaterstanden_Put', suffix='1.csv', keep_all_obs=True,\n verbose=False)\n", (3145, 3281), True, 'from observations import obs_collection as oc\n'), ((3415, 3538), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'dirname': 'dinozip', 'ObsClass': 'obs.WaterlvlObs', 'subdir': '"""Peilschaal"""', 'suffix': '""".csv"""', 'verbose': '(True)'}), "(dirname=dinozip, ObsClass=obs.WaterlvlObs,\n subdir='Peilschaal', suffix='.csv', verbose=True)\n", (3441, 3538), True, 'from observations import obs_collection as oc\n'), ((3676, 3834), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'dirname': 'dinozip', 'ObsClass': 'obs.GroundwaterQualityObs', 'subdir': '"""Grondwatersamenstellingen_Put"""', 'suffix': '""".txt"""', 'verbose': '(True)'}), "(dirname=dinozip, ObsClass=obs.\n GroundwaterQualityObs, subdir='Grondwatersamenstellingen_Put', suffix=\n '.txt', verbose=True)\n", (3702, 3834), True, 'from observations import obs_collection as oc\n'), ((4056, 4144), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'extent': 'extent', 'ObsClass': 'obs.GroundwaterObs', 'verbose': '(True)'}), '(extent=extent, ObsClass=obs.GroundwaterObs,\n verbose=True)\n', (4082, 4144), True, 'from observations import obs_collection as oc\n'), ((4330, 4388), 'numpy.array', 'np.array', (['[191608.334, 409880.402, 193072.317, 411477.894]'], {}), '([191608.334, 409880.402, 193072.317, 411477.894])\n', (4338, 4388), True, 'import numpy as np\n'), ((4408, 4493), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'bbox': 'bbox', 'ObsClass': 'obs.GroundwaterObs', 'verbose': '(True)'}), '(bbox=bbox, ObsClass=obs.GroundwaterObs, verbose=True\n )\n', (4434, 4493), True, 'from observations import obs_collection as oc\n'), ((4728, 4779), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'bbox': 'bbox', 'verbose': '(True)'}), '(bbox=bbox, verbose=True)\n', (4754, 4779), True, 'from observations import obs_collection as oc\n'), ((4806, 4877), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'bbox': 'bbox', 'keep_all_obs': '(False)', 'verbose': '(True)'}), '(bbox=bbox, keep_all_obs=False, verbose=True)\n', (4832, 4877), True, 'from observations import obs_collection as oc\n'), ((5245, 5330), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'bbox': 'bbox', 'ObsClass': 'obs.GroundwaterObs', 'verbose': '(True)'}), '(bbox=bbox, ObsClass=obs.GroundwaterObs, verbose=True\n )\n', (5271, 5330), True, 'from observations import obs_collection as oc\n'), ((5524, 5575), 'observations.obs_collection.ObsCollection.from_dino', 'oc.ObsCollection.from_dino', ([], {'bbox': 'bbox', 'verbose': '(True)'}), '(bbox=bbox, verbose=True)\n', (5550, 5575), True, 'from observations import obs_collection as oc\n'), ((5912, 6124), 'observations.obs_collection.ObsCollection.from_fews', 'oc.ObsCollection.from_fews', (['"""./tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip"""'], {'translate_dic': "{'locationId': 'locatie'}", 'verbose': '(True)', 'to_mnap': '(False)', 'remove_nan': '(False)', 'low_memory': '(False)'}), "(\n './tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip',\n translate_dic={'locationId': 'locatie'}, verbose=True, to_mnap=False,\n remove_nan=False, low_memory=False)\n", (5938, 6124), True, 'from observations import obs_collection as oc\n'), ((6248, 6397), 'observations.obs_collection.ObsCollection.from_fews', 'oc.ObsCollection.from_fews', (['"""./tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip"""'], {'verbose': '(True)', 'locations': 'None', 'low_memory': '(True)'}), "(\n './tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip',\n verbose=True, locations=None, low_memory=True)\n", (6274, 6397), True, 'from observations import obs_collection as oc\n'), ((6509, 6649), 'observations.obs_collection.ObsCollection.from_fews', 'oc.ObsCollection.from_fews', (['"""./tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip"""'], {'verbose': '(True)', 'locations': "('MPN-N-2',)"}), "(\n './tests/data/2019-FEWS-test/WaalenBurg_201810-20190215_prod.zip',\n verbose=True, locations=('MPN-N-2',))\n", (6535, 6649), True, 'from observations import obs_collection as oc\n'), ((6761, 7045), 'observations.observation.GroundwaterObs.from_wiski', 'obs.GroundwaterObs.from_wiski', (['"""./tests/data/2019-WISKI-test/1016_PBF.csv"""'], {'sep': '"""\\\\s+"""', 'header_sep': '""":"""', 'header_identifier': '""":"""', 'parse_dates': "{'datetime': [0, 1]}", 'index_col': "['datetime']", 'translate_dic': "{'name': 'Station Number', 'x': 'GlobalX', 'y': 'GlobalY'}", 'verbose': '(True)'}), "('./tests/data/2019-WISKI-test/1016_PBF.csv',\n sep='\\\\s+', header_sep=':', header_identifier=':', parse_dates={\n 'datetime': [0, 1]}, index_col=['datetime'], translate_dic={'name':\n 'Station Number', 'x': 'GlobalX', 'y': 'GlobalY'}, verbose=True)\n", (6790, 7045), True, 'from observations import observation as obs\n'), ((7213, 7510), 'observations.obs_collection.ObsCollection.from_wiski', 'oc.ObsCollection.from_wiski', (['"""./tests/data/2019-WISKI-test/1016_PBF.zip"""'], {'translate_dic': "{'name': 'Station Number', 'x': 'GlobalX', 'y': 'GlobalY'}", 'sep': '"""\\\\s+"""', 'header_sep': '""":"""', 'dayfirst': '(True)', 'header_identifier': '""":"""', 'parse_dates': "{'datetime': [0, 1]}", 'index_col': "['datetime']", 'verbose': '(True)'}), "('./tests/data/2019-WISKI-test/1016_PBF.zip',\n translate_dic={'name': 'Station Number', 'x': 'GlobalX', 'y': 'GlobalY'\n }, sep='\\\\s+', header_sep=':', dayfirst=True, header_identifier=':',\n parse_dates={'datetime': [0, 1]}, index_col=['datetime'], verbose=True)\n", (7240, 7510), True, 'from observations import obs_collection as oc\n'), ((8206, 8246), 'observations.obs_collection.ObsCollection.from_pastas_project', 'oc.ObsCollection.from_pastas_project', (['pr'], {}), '(pr)\n', (8242, 8246), True, 'from observations import obs_collection as oc\n'), ((8542, 8621), 'observations.obs_collection.ObsCollection.from_pystore', 'oc.ObsCollection.from_pystore', (['"""test_pystore"""', '"""./tests/data/2019-Pystore-test"""'], {}), "('test_pystore', './tests/data/2019-Pystore-test')\n", (8571, 8621), True, 'from observations import obs_collection as oc\n'), ((8708, 8810), 'observations.obs_collection.ObsCollection.from_pystore', 'oc.ObsCollection.from_pystore', (['"""test_pystore"""', '"""./tests/data/2019-Pystore-test"""'], {'read_series': '(False)'}), "('test_pystore',\n './tests/data/2019-Pystore-test', read_series=False)\n", (8737, 8810), True, 'from observations import obs_collection as oc\n'), ((8959, 9080), 'observations.obs_collection.ObsCollection.from_pystore', 'oc.ObsCollection.from_pystore', (['"""test_pystore"""', '"""./tests/data/2019-Pystore-test"""'], {'extent': '[115534, 115539, 0, 10000000]'}), "('test_pystore',\n './tests/data/2019-Pystore-test', extent=[115534, 115539, 0, 10000000])\n", (8988, 9080), True, 'from observations import obs_collection as oc\n'), ((9275, 9382), 'observations.obs_collection.ObsCollection.from_pystore', 'oc.ObsCollection.from_pystore', (['"""test_pystore"""', '"""./tests/data/2019-Pystore-test"""'], {'item_names': "['MPN-N-2']"}), "('test_pystore',\n './tests/data/2019-Pystore-test', item_names=['MPN-N-2'])\n", (9304, 9382), True, 'from observations import obs_collection as oc\n'), ((9578, 9628), 'pystore.set_path', 'pystore.set_path', (['"""./tests/data/2019-Pystore-test"""'], {}), "('./tests/data/2019-Pystore-test')\n", (9594, 9628), False, 'import pystore\n'), ((9641, 9670), 'pystore.store', 'pystore.store', (['"""test_pystore"""'], {}), "('test_pystore')\n", (9654, 9670), False, 'import pystore\n'), ((9778, 9820), 'observations.observation.GroundwaterObs.from_pystore_item', 'obs.GroundwaterObs.from_pystore_item', (['item'], {}), '(item)\n', (9814, 9820), True, 'from observations import observation as obs\n'), ((9887, 9919), 'observations.observation.KnmiObs.from_knmi', 'obs.KnmiObs.from_knmi', (['(829)', '"""RD"""'], {}), "(829, 'RD')\n", (9908, 9919), True, 'from observations import observation as obs\n'), ((9962, 10011), 'observations.observation.KnmiObs.from_nearest_xy', 'obs.KnmiObs.from_nearest_xy', (['(100000)', '(350000)', '"""RD"""'], {}), "(100000, 350000, 'RD')\n", (9989, 10011), True, 'from observations import observation as obs\n'), ((10086, 10142), 'observations.observation.KnmiObs.from_obs', 'obs.KnmiObs.from_obs', (['pb', '"""EV24"""'], {'fill_missing_obs': '(False)'}), "(pb, 'EV24', fill_missing_obs=False)\n", (10106, 10142), True, 'from observations import observation as obs\n'), ((10242, 10279), 'observations.obs_collection.ObsCollection.from_waterinfo', 'oc.ObsCollection.from_waterinfo', (['path'], {}), '(path)\n', (10273, 10279), True, 'from observations import obs_collection as oc\n')] |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import bifrost as bf
from bifrost.pipeline import TransformBlock
from bifrost.DataType import DataType
from bifrost.libbifrost import _bf
from datetime import datetime
import numpy as np
import hickle as hkl
from copy import deepcopy
class BeanfarmerDp4aBlock(bf.pipeline.TransformBlock):
def __init__(self, iring, n_avg=1, n_beam=32, n_chan=512, n_pol=2, n_ant=12, weights_file='',
*args, **kwargs):
super(BeanfarmerDp4aBlock, self).__init__(iring, *args, **kwargs)
self.n_avg = n_avg
self.n_beam = n_beam
self.n_pol = n_pol
self.n_chan = n_chan
self.n_ant = n_ant
self.frame_count = 0
self.weights_file = weights_file
def define_valid_input_spaces(self):
"""Return set of valid spaces (or 'any') for each input"""
return ('cuda',)
def on_sequence(self, iseq):
self.frame_count = 0
ihdr = iseq.header
itensor = ihdr['_tensor']
to_raise = False
if self.weights_file in ('', None):
to_raise = True
print('ERR: need to specify weights hickle file')
else:
w = hkl.load(self.weights_file)
try:
assert w.shape == (self.n_chan, self.n_beam, self.n_pol, self.n_ant)
assert w.dtype.names[0] == 're'
assert w.dtype.names[1] == 'im'
assert str(w.dtype[0]) == 'int8'
except AssertionError:
print('ERR: beam weight shape/dtype is incorrect')
print('ERR: beam weights shape is: %s' % str(w.shape))
print('ERR: shape should be %s' % str((self.n_chan, self.n_beam, self.n_pol, self.n_ant, 2)))
print('ERR: dtype should be int8, dtype: %s' % w.dtype.str)
to_raise = True
#w = np.ones((self.n_chan, self.n_beam, self.n_pol, self.n_ant), dtype='int8')
self.weights = bf.ndarray(w, dtype='ci8', space='cuda')
try:
assert(itensor['labels'] == ['time', 'freq', 'fine_time', 'pol', 'station'])
assert(itensor['dtype'] == 'ci8')
assert(ihdr['gulp_nframe'] == 1)
except AssertionError:
print('ERR: gulp_nframe %s (must be 1!)' % str(ihdr['gulp_nframe']))
print('ERR: Frame shape %s' % str(itensor['shape']))
print('ERR: Frame labels %s' % str(itensor['labels']))
print('ERR: Frame dtype %s' % itensor['dtype'])
to_raise = True
if to_raise:
raise RuntimeError('Correlator block misconfiguration. Check tensor labels, dtype, shape, gulp size).')
ohdr = deepcopy(ihdr)
otensor = ohdr['_tensor']
otensor['dtype'] = 'cf32'
# output is (time, channel, beam, fine_time)
ft0, fts = itensor['scales'][2]
otensor['shape'] = [itensor['shape'][0], itensor['shape'][1], self.n_beam, itensor['shape'][2] // self.n_avg]
otensor['labels'] = ['time', 'freq', 'beam', 'fine_time']
otensor['scales'] = [itensor['scales'][0], itensor['scales'][1], [0, 0], [ft0, fts / self.n_avg]]
otensor['units'] = [itensor['units'][0], itensor['units'][1], None, itensor['units'][2]]
otensor['dtype'] = 'f32'
return ohdr
def on_data(self, ispan, ospan):
idata = ispan.data
odata = ospan.data
# Run the beamformer
#print(idata.shape, self.weights.shape, odata.shape, self.n_avg)
res = _bf.BeanFarmer(idata.as_BFarray(), self.weights.as_BFarray(), odata.as_BFarray(), np.int32(self.n_avg))
ncommit = ispan.data.shape[0]
return ncommit
def beanfarmer(iring, n_avg=1, n_beam=32, n_chan=512, n_pol=2, n_ant=12, weights_file='', *args, **kwargs):
""" Beamform, detect + integrate (filterbank) array using GPU.
** Tensor Semantics **
Input: [time, freq, fine_time, pol, station]
Output: [time, freq, beam, fine_time]
Notes: Averages across fine_time.
Limitations:
* Requires 8-bit complex data input
* Currently only works if gulp_nframe = 1
Args:
nframe_to_avg (int): Number of frames to average across. 1 = no averaging.
iring (Ring or Block): Input data source.
n_avg (int): Number of frames to average together
n_beam (int): Number of beams to form
n_chan (int): Number of channels
n_pol (int): Number of polarizations for antennas (1 or 2)
n_ant (int): Number of antennas/stands (n_ant=12 and n_pol=2 means 24 inputs)
weights_file (str): Path to hickle file in which beam weights are stored. Beam weights
must have the same shape as (chan, pol, ant, beam) etc here.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
Returns:
BeanfarmerDp4aBlock: A new beanfarmer block instance.
"""
return BeanfarmerDp4aBlock(iring, n_avg, n_beam, n_chan, n_pol, n_ant, weights_file, *args, **kwargs)
| [
"hickle.load",
"bifrost.ndarray",
"copy.deepcopy",
"numpy.int32"
] | [((3476, 3516), 'bifrost.ndarray', 'bf.ndarray', (['w'], {'dtype': '"""ci8"""', 'space': '"""cuda"""'}), "(w, dtype='ci8', space='cuda')\n", (3486, 3516), True, 'import bifrost as bf\n'), ((4205, 4219), 'copy.deepcopy', 'deepcopy', (['ihdr'], {}), '(ihdr)\n', (4213, 4219), False, 'from copy import deepcopy\n'), ((2743, 2770), 'hickle.load', 'hkl.load', (['self.weights_file'], {}), '(self.weights_file)\n', (2751, 2770), True, 'import hickle as hkl\n'), ((5116, 5136), 'numpy.int32', 'np.int32', (['self.n_avg'], {}), '(self.n_avg)\n', (5124, 5136), True, 'import numpy as np\n')] |
import os
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import spacy
from data.loader import DataLoader
from model.rnn import SubjectObjectRelationModel
from utils import scorer, constant, helper
from utils.vocab import Vocab
CUDA_LAUNCH_BLOCKING = 1
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset')
parser.add_argument('--vocab_dir', type=str, default='vocab')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--dep_dim', type=int, default=30, help='DEP embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')
parser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N embeddings.')
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--attn', dest='attn', action='store_true', help='Use attention layer.')
parser.add_argument('--no-attn', dest='attn', action='store_false')
parser.set_defaults(attn=True)
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--pe_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--lr', type=float, default=1, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--optim', type=str, default='sgd', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=5, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='02', help='Model ID under which to save models.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default="torch.cuda.is_available()") #
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# make opt
opt = vars(args)
opt['num_class'] = len(constant.LABEL_TO_ID)
# load vocab
vocab_file = opt['vocab_dir'] + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
opt['vocab_size'] = vocab.size
emb_file = opt['vocab_dir'] + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == vocab.size
assert emb_matrix.shape[1] == opt['emb_dim']
# load spacy model for pos tags and dependency tags
spacy_model = spacy.load("en_core_web_lg")
# load data
print("Loading data from {} with batch size {}...".format(opt['data_dir'], opt['batch_size']))
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, spacy_model, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, spacy_model, evaluation=False)
model_id = opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
vocab.save(model_save_dir + '/vocab.pkl')
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_f1")
# print model info
helper.print_config(opt)
# model
model = SubjectObjectRelationModel(opt, emb_matrix=emb_matrix)
class2id = dict([(v, k) for k, v in constant.ID_TO_CLASS.items()])
id2label = dict([(v, k) for k, v in constant.LABEL_TO_ID.items()])
dev_f1_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch'] + 1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = model.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch, \
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = model.predict(batch)
predictions += preds
dev_loss += loss
predictions = [class2id[p] for p in predictions]
predictions = [id2label[p] for p in predictions]
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch, \
train_loss, dev_loss, dev_f1))
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_f1))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
model.save(model_file, epoch)
if epoch == 1 or dev_f1 > max(dev_f1_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_f1_history) > 10 and dev_f1 <= dev_f1_history[-1] and \
opt['optim'] in ['sgd', 'adagrad']:
current_lr *= opt['lr_decay']
model.update_lr(current_lr)
dev_f1_history += [dev_f1]
print("")
print("Training ended with {} epochs.".format(epoch))
| [
"numpy.load",
"os.remove",
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.constant.LABEL_TO_ID.items",
"spacy.load",
"random.seed",
"utils.helper.ensure_dir",
"utils.constant.ID_TO_CLASS.items",
"utils.helper.save_config",
"shutil.copyfile",
"datetime.datetime.now",
"utils.helper.Fil... | [((362, 387), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (385, 387), False, 'import argparse\n'), ((3033, 3061), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3050, 3061), False, 'import torch\n'), ((3063, 3088), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3077, 3088), True, 'import numpy as np\n'), ((3090, 3107), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (3101, 3107), False, 'import random\n'), ((3354, 3382), 'utils.vocab.Vocab', 'Vocab', (['vocab_file'], {'load': '(True)'}), '(vocab_file, load=True)\n', (3359, 3382), False, 'from utils.vocab import Vocab\n'), ((3477, 3494), 'numpy.load', 'np.load', (['emb_file'], {}), '(emb_file)\n', (3484, 3494), True, 'import numpy as np\n'), ((3655, 3683), 'spacy.load', 'spacy.load', (['"""en_core_web_lg"""'], {}), "('en_core_web_lg')\n", (3665, 3683), False, 'import spacy\n'), ((3812, 3921), 'data.loader.DataLoader', 'DataLoader', (["(opt['data_dir'] + '/train.json')", "opt['batch_size']", 'opt', 'vocab', 'spacy_model'], {'evaluation': '(False)'}), "(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab,\n spacy_model, evaluation=False)\n", (3822, 3921), False, 'from data.loader import DataLoader\n'), ((3931, 4038), 'data.loader.DataLoader', 'DataLoader', (["(opt['data_dir'] + '/dev.json')", "opt['batch_size']", 'opt', 'vocab', 'spacy_model'], {'evaluation': '(False)'}), "(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab,\n spacy_model, evaluation=False)\n", (3941, 4038), False, 'from data.loader import DataLoader\n'), ((4151, 4198), 'utils.helper.ensure_dir', 'helper.ensure_dir', (['model_save_dir'], {'verbose': '(True)'}), '(model_save_dir, verbose=True)\n', (4168, 4198), False, 'from utils import scorer, constant, helper\n'), ((4217, 4287), 'utils.helper.save_config', 'helper.save_config', (['opt', "(model_save_dir + '/config.json')"], {'verbose': '(True)'}), "(opt, model_save_dir + '/config.json', verbose=True)\n", (4235, 4287), False, 'from utils import scorer, constant, helper\n'), ((4346, 4451), 'utils.helper.FileLogger', 'helper.FileLogger', (["(model_save_dir + '/' + opt['log'])"], {'header': '"""# epoch\ttrain_loss\tdev_loss\tdev_f1"""'}), "(model_save_dir + '/' + opt['log'], header=\n '# epoch\\ttrain_loss\\tdev_loss\\tdev_f1')\n", (4363, 4451), False, 'from utils import scorer, constant, helper\n'), ((4470, 4494), 'utils.helper.print_config', 'helper.print_config', (['opt'], {}), '(opt)\n', (4489, 4494), False, 'from utils import scorer, constant, helper\n'), ((4515, 4569), 'model.rnn.SubjectObjectRelationModel', 'SubjectObjectRelationModel', (['opt'], {'emb_matrix': 'emb_matrix'}), '(opt, emb_matrix=emb_matrix)\n', (4541, 4569), False, 'from model.rnn import SubjectObjectRelationModel\n'), ((4793, 4804), 'time.time', 'time.time', ([], {}), '()\n', (4802, 4804), False, 'import time\n'), ((3171, 3204), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3193, 3204), False, 'import torch\n'), ((5098, 5109), 'time.time', 'time.time', ([], {}), '()\n', (5107, 5109), False, 'import time\n'), ((6555, 6610), 'shutil.copyfile', 'copyfile', (['model_file', "(model_save_dir + '/best_model.pt')"], {}), "(model_file, model_save_dir + '/best_model.pt')\n", (6563, 6610), False, 'from shutil import copyfile\n'), ((6700, 6721), 'os.remove', 'os.remove', (['model_file'], {}), '(model_file)\n', (6709, 6721), False, 'import os\n'), ((4609, 4637), 'utils.constant.ID_TO_CLASS.items', 'constant.ID_TO_CLASS.items', ([], {}), '()\n', (4635, 4637), False, 'from utils import scorer, constant, helper\n'), ((4677, 4705), 'utils.constant.LABEL_TO_ID.items', 'constant.LABEL_TO_ID.items', ([], {}), '()\n', (4703, 4705), False, 'from utils import scorer, constant, helper\n'), ((5272, 5283), 'time.time', 'time.time', ([], {}), '()\n', (5281, 5283), False, 'import time\n'), ((5334, 5348), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5346, 5348), False, 'from datetime import datetime\n')] |
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
from skimage.transform import AffineTransform
import cv2
import numpy as np
import torch
class SingleDataset(BaseDataset):
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A and A_paths
A(tensor) - - an image in one domain
A_paths(str) - - the path of the image
"""
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
n_inputs = int(self.opt.input_nc/3)
scales = np.linspace(0.999, 1.001, 30)
scale = np.random.choice(scales)
rots = np.linspace(-1 * (np.pi / 300), np.pi / 300, 30)
rot = np.random.choice(rots)
shears = np.linspace(-1 * (np.pi / 300), np.pi / 300, 30)
shear = np.random.choice(shears)
shifts = np.linspace(-1 * 2, 2, 30)
shift = np.random.choice(shifts)
tform = AffineTransform(scale=scale, rotation=rot, shear=shear, translation=shift)
matrix = np.linalg.inv(tform.params)[:2]
matrixinv = tform.params[:2]
A_img = np.asarray(A_img)
A_all = []
for n in range(n_inputs):
if (len(A_all) == 0):
A_img_transformed = cv2.warpAffine(A_img, matrix, (A_img.shape[0], A_img.shape[1]))
else:
A_img_transformed = cv2.warpAffine(A_img_prev, matrix, (A_img.shape[0], A_img.shape[1]))
A_img_prev = A_img_transformed.copy()
A_img_transformed=Image.fromarray(A_img_transformed)
# A_img_transformed.show()
A = self.transform(A_img_transformed)
A_all.append(A)
A_all = torch.cat(A_all,0)
return {'A': A_all, 'A_paths': A_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.A_paths)
| [
"data.base_dataset.BaseDataset.__init__",
"numpy.asarray",
"torch.cat",
"PIL.Image.open",
"PIL.Image.fromarray",
"cv2.warpAffine",
"numpy.linalg.inv",
"numpy.linspace",
"numpy.random.choice",
"skimage.transform.AffineTransform",
"data.image_folder.make_dataset",
"data.base_dataset.get_transfor... | [((677, 708), 'data.base_dataset.BaseDataset.__init__', 'BaseDataset.__init__', (['self', 'opt'], {}), '(self, opt)\n', (697, 708), False, 'from data.base_dataset import BaseDataset, get_transform\n'), ((907, 950), 'data.base_dataset.get_transform', 'get_transform', (['opt'], {'grayscale': '(input_nc == 1)'}), '(opt, grayscale=input_nc == 1)\n', (920, 950), False, 'from data.base_dataset import BaseDataset, get_transform\n'), ((1451, 1480), 'numpy.linspace', 'np.linspace', (['(0.999)', '(1.001)', '(30)'], {}), '(0.999, 1.001, 30)\n', (1462, 1480), True, 'import numpy as np\n'), ((1497, 1521), 'numpy.random.choice', 'np.random.choice', (['scales'], {}), '(scales)\n', (1513, 1521), True, 'import numpy as np\n'), ((1537, 1585), 'numpy.linspace', 'np.linspace', (['(-1 * (np.pi / 300))', '(np.pi / 300)', '(30)'], {}), '(-1 * (np.pi / 300), np.pi / 300, 30)\n', (1548, 1585), True, 'import numpy as np\n'), ((1600, 1622), 'numpy.random.choice', 'np.random.choice', (['rots'], {}), '(rots)\n', (1616, 1622), True, 'import numpy as np\n'), ((1640, 1688), 'numpy.linspace', 'np.linspace', (['(-1 * (np.pi / 300))', '(np.pi / 300)', '(30)'], {}), '(-1 * (np.pi / 300), np.pi / 300, 30)\n', (1651, 1688), True, 'import numpy as np\n'), ((1705, 1729), 'numpy.random.choice', 'np.random.choice', (['shears'], {}), '(shears)\n', (1721, 1729), True, 'import numpy as np\n'), ((1747, 1773), 'numpy.linspace', 'np.linspace', (['(-1 * 2)', '(2)', '(30)'], {}), '(-1 * 2, 2, 30)\n', (1758, 1773), True, 'import numpy as np\n'), ((1790, 1814), 'numpy.random.choice', 'np.random.choice', (['shifts'], {}), '(shifts)\n', (1806, 1814), True, 'import numpy as np\n'), ((1831, 1905), 'skimage.transform.AffineTransform', 'AffineTransform', ([], {'scale': 'scale', 'rotation': 'rot', 'shear': 'shear', 'translation': 'shift'}), '(scale=scale, rotation=rot, shear=shear, translation=shift)\n', (1846, 1905), False, 'from skimage.transform import AffineTransform\n'), ((2008, 2025), 'numpy.asarray', 'np.asarray', (['A_img'], {}), '(A_img)\n', (2018, 2025), True, 'import numpy as np\n'), ((2584, 2603), 'torch.cat', 'torch.cat', (['A_all', '(0)'], {}), '(A_all, 0)\n', (2593, 2603), False, 'import torch\n'), ((739, 787), 'data.image_folder.make_dataset', 'make_dataset', (['opt.dataroot', 'opt.max_dataset_size'], {}), '(opt.dataroot, opt.max_dataset_size)\n', (751, 787), False, 'from data.image_folder import make_dataset\n'), ((1923, 1950), 'numpy.linalg.inv', 'np.linalg.inv', (['tform.params'], {}), '(tform.params)\n', (1936, 1950), True, 'import numpy as np\n'), ((2416, 2450), 'PIL.Image.fromarray', 'Image.fromarray', (['A_img_transformed'], {}), '(A_img_transformed)\n', (2431, 2450), False, 'from PIL import Image\n'), ((1350, 1368), 'PIL.Image.open', 'Image.open', (['A_path'], {}), '(A_path)\n', (1360, 1368), False, 'from PIL import Image\n'), ((2149, 2212), 'cv2.warpAffine', 'cv2.warpAffine', (['A_img', 'matrix', '(A_img.shape[0], A_img.shape[1])'], {}), '(A_img, matrix, (A_img.shape[0], A_img.shape[1]))\n', (2163, 2212), False, 'import cv2\n'), ((2267, 2335), 'cv2.warpAffine', 'cv2.warpAffine', (['A_img_prev', 'matrix', '(A_img.shape[0], A_img.shape[1])'], {}), '(A_img_prev, matrix, (A_img.shape[0], A_img.shape[1]))\n', (2281, 2335), False, 'import cv2\n')] |
from datetime import datetime
from typing import Any, Dict, Generic, List, Optional, Set, Type, TypeVar, Union
import numpy as np
from paralleldomain.model.image import DecoderImage, Image, ImageDecoderProtocol
from paralleldomain.model.point_cloud import DecoderPointCloud, PointCloud, PointCloudDecoderProtocol
from paralleldomain.utilities.projection import DistortionLookupTable, project_points_3d_to_2d
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore
from paralleldomain.constants import CAMERA_MODEL_OPENCV_FISHEYE, CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_PD_FISHEYE
from paralleldomain.model.annotation import AnnotationType
from paralleldomain.model.type_aliases import AnnotationIdentifier, FrameId, SensorName
from paralleldomain.utilities.transformation import Transformation
T = TypeVar("T")
TDateTime = TypeVar("TDateTime", bound=Union[None, datetime])
class CameraModel:
"""Camera Model short hands for value-safe access.
Attributes:
OPENCV_PINHOLE: Returns internally used string-representation for OpenCV Pinhole camera model.
Accepts distortion parameters `(k1,k2,p1,p2[,k3[,k4,k5,k6]])` and uses projection (+ distortion) function
as described in the
`OpenCV Pinhole documentation <https://docs.opencv.org/4.5.3/d9/d0c/group__calib3d.html>`_
OPENCV_FISHEYE: Returns internally used string-representation for OpenCV Fisheye camera model
Accepts distortion parameters `(k1,k2,k3,k4)` and uses projection (+ distortion) function
as described in the
`OpenCV Fisheye documentation <https://docs.opencv.org/4.5.3/db/d58/group__calib3d__fisheye.html>`_
PD_FISHEYE: Returns internally used string-representation for Parallel Domain Fisheye camera model
Uses custom distortion lookup table for translation between non-distorted and distorted angles.
"""
OPENCV_PINHOLE: str = CAMERA_MODEL_OPENCV_PINHOLE
OPENCV_FISHEYE: str = CAMERA_MODEL_OPENCV_FISHEYE
PD_FISHEYE: str = CAMERA_MODEL_PD_FISHEYE
class SensorFrameDecoderProtocol(Protocol[TDateTime]):
def get_extrinsic(self, sensor_name: SensorName, frame_id: FrameId) -> "SensorExtrinsic":
pass
def get_sensor_pose(self, sensor_name: SensorName, frame_id: FrameId) -> "SensorPose":
pass
def get_annotations(
self, sensor_name: SensorName, frame_id: FrameId, identifier: AnnotationIdentifier, annotation_type: T
) -> List[T]:
pass
def get_available_annotation_types(
self, sensor_name: SensorName, frame_id: FrameId
) -> Dict[AnnotationType, AnnotationIdentifier]:
pass
def get_metadata(self, sensor_name: SensorName, frame_id: FrameId) -> Dict[str, Any]:
pass
def get_date_time(self, sensor_name: SensorName, frame_id: FrameId) -> TDateTime:
pass
class SensorFrame(Generic[TDateTime]):
def __init__(
self,
sensor_name: SensorName,
frame_id: FrameId,
decoder: SensorFrameDecoderProtocol[TDateTime],
):
self._frame_id = frame_id
self._decoder = decoder
self._sensor_name = sensor_name
@property
def extrinsic(self) -> "SensorExtrinsic":
return self._decoder.get_extrinsic(sensor_name=self.sensor_name, frame_id=self.frame_id)
@property
def pose(self) -> "SensorPose":
return self._decoder.get_sensor_pose(sensor_name=self.sensor_name, frame_id=self.frame_id)
@property
def sensor_name(self) -> str:
return self._sensor_name
@property
def frame_id(self) -> FrameId:
return self._frame_id
@property
def available_annotation_types(self) -> List[AnnotationType]:
return list(self._annotation_type_identifiers.keys())
@property
def metadata(self) -> Dict[str, Any]:
return self._decoder.get_metadata(sensor_name=self.sensor_name, frame_id=self.frame_id)
@property
def _annotation_type_identifiers(self) -> Dict[AnnotationType, AnnotationIdentifier]:
return self._decoder.get_available_annotation_types(sensor_name=self.sensor_name, frame_id=self.frame_id)
def get_annotations(self, annotation_type: Type[T]) -> T:
if annotation_type not in self._annotation_type_identifiers:
raise ValueError(f"The annotation type {annotation_type} is not available in this sensor frame!")
return self._decoder.get_annotations(
sensor_name=self.sensor_name,
frame_id=self.frame_id,
identifier=self._annotation_type_identifiers[annotation_type],
annotation_type=annotation_type,
)
@property
def date_time(self) -> TDateTime:
return self._decoder.get_date_time(sensor_name=self.sensor_name, frame_id=self.frame_id)
def __lt__(self, other: "SensorFrame[TDateTime]"):
if self.date_time is not None and other.date_time is not None:
# if isinstance(other, type(self)):
return self.date_time < other.date_time
return id(self) < id(other)
class LidarSensorFrameDecoderProtocol(SensorFrameDecoderProtocol[TDateTime], PointCloudDecoderProtocol):
...
class LidarSensorFrame(SensorFrame[TDateTime]):
def __init__(
self,
sensor_name: SensorName,
frame_id: FrameId,
decoder: LidarSensorFrameDecoderProtocol[TDateTime],
):
super().__init__(sensor_name=sensor_name, frame_id=frame_id, decoder=decoder)
self._decoder = decoder
@property
def point_cloud(self) -> PointCloud:
return DecoderPointCloud(decoder=self._decoder, sensor_name=self.sensor_name, frame_id=self.frame_id)
class CameraSensorFrameDecoderProtocol(SensorFrameDecoderProtocol[TDateTime], ImageDecoderProtocol):
def get_intrinsic(self, sensor_name: SensorName, frame_id: FrameId) -> "SensorIntrinsic":
pass
class CameraSensorFrame(SensorFrame[TDateTime]):
def __init__(
self,
sensor_name: SensorName,
frame_id: FrameId,
decoder: CameraSensorFrameDecoderProtocol[TDateTime],
):
super().__init__(sensor_name=sensor_name, frame_id=frame_id, decoder=decoder)
self._decoder = decoder
@property
def image(self) -> Image:
return DecoderImage(decoder=self._decoder, frame_id=self.frame_id, sensor_name=self.sensor_name)
@property
def intrinsic(self) -> "SensorIntrinsic":
return self._decoder.get_intrinsic(sensor_name=self.sensor_name, frame_id=self.frame_id)
def project_points_from_3d(
self, points_3d: np.ndarray, distortion_lookup: Optional[DistortionLookupTable] = None
) -> np.ndarray:
return project_points_3d_to_2d(
k_matrix=self.intrinsic.camera_matrix,
camera_model=self.intrinsic.camera_model,
distortion_parameters=self.intrinsic.distortion_parameters,
distortion_lookup=distortion_lookup,
points_3d=points_3d,
)
TSensorFrameType = TypeVar("TSensorFrameType", bound=SensorFrame)
class SensorDecoderProtocol(Protocol[TSensorFrameType]):
def get_sensor_frame(self, frame_id: FrameId, sensor_name: SensorName) -> TSensorFrameType:
pass
def get_frame_ids(self, sensor_name: SensorName) -> Set[FrameId]:
pass
class Sensor(Generic[TSensorFrameType]):
def __init__(
self,
sensor_name: SensorName,
decoder: SensorDecoderProtocol[TSensorFrameType],
):
self._decoder = decoder
self._sensor_name = sensor_name
@property
def name(self) -> str:
return self._sensor_name
@property
def frame_ids(self) -> Set[FrameId]:
return self._decoder.get_frame_ids(sensor_name=self.name)
def get_frame(self, frame_id: FrameId) -> TSensorFrameType:
return self._decoder.get_sensor_frame(frame_id=frame_id, sensor_name=self._sensor_name)
class CameraSensor(Sensor[CameraSensorFrame[TDateTime]]):
def get_frame(self, frame_id: FrameId) -> CameraSensorFrame[TDateTime]:
return self._decoder.get_sensor_frame(frame_id=frame_id, sensor_name=self._sensor_name)
class LidarSensor(Sensor[LidarSensorFrame[TDateTime]]):
def get_frame(self, frame_id: FrameId) -> LidarSensorFrame[TDateTime]:
return self._decoder.get_sensor_frame(frame_id=frame_id, sensor_name=self._sensor_name)
class SensorPose(Transformation):
...
class SensorExtrinsic(Transformation):
...
class SensorIntrinsic:
def __init__(
self,
cx=0.0,
cy=0.0,
fx=0.0,
fy=0.0,
k1=0.0,
k2=0.0,
p1=0.0,
p2=0.0,
k3=0.0,
k4=0.0,
k5=0.0,
k6=0.0,
skew=0.0,
fov=0.0,
camera_model=CameraModel.OPENCV_PINHOLE,
):
self.cx = cx
self.cy = cy
self.fx = fx
self.fy = fy
self.k1 = k1
self.k2 = k2
self.p1 = p1
self.p2 = p2
self.k3 = k3
self.k4 = k4
self.k5 = k5
self.k6 = k6
self.skew = skew
self.fov = fov
self.camera_model = camera_model
@property
def camera_matrix(self) -> np.ndarray:
return np.array(
[
[self.fx, self.skew, self.cx],
[0, self.fy, self.cy],
[0, 0, 1],
]
)
@property
def distortion_parameters(self) -> Optional[np.ndarray]:
if self.camera_model == CAMERA_MODEL_OPENCV_PINHOLE:
return np.array(
[
self.k1,
self.k2,
self.p1,
self.p2,
self.k3,
self.k4,
self.k5,
self.k6,
]
)
elif self.camera_model == CAMERA_MODEL_OPENCV_FISHEYE:
return np.array(
[
self.k1,
self.k2,
self.k3,
self.k4,
]
)
elif self.camera_model == CAMERA_MODEL_PD_FISHEYE:
return None
else:
raise NotImplementedError(f"No distortion parameters implemented for camera model {self.camera_model}")
| [
"paralleldomain.model.point_cloud.DecoderPointCloud",
"numpy.array",
"paralleldomain.utilities.projection.project_points_3d_to_2d",
"typing.TypeVar",
"paralleldomain.model.image.DecoderImage"
] | [((866, 878), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (873, 878), False, 'from typing import Any, Dict, Generic, List, Optional, Set, Type, TypeVar, Union\n'), ((891, 940), 'typing.TypeVar', 'TypeVar', (['"""TDateTime"""'], {'bound': 'Union[None, datetime]'}), "('TDateTime', bound=Union[None, datetime])\n", (898, 940), False, 'from typing import Any, Dict, Generic, List, Optional, Set, Type, TypeVar, Union\n'), ((7056, 7102), 'typing.TypeVar', 'TypeVar', (['"""TSensorFrameType"""'], {'bound': 'SensorFrame'}), "('TSensorFrameType', bound=SensorFrame)\n", (7063, 7102), False, 'from typing import Any, Dict, Generic, List, Optional, Set, Type, TypeVar, Union\n'), ((5634, 5732), 'paralleldomain.model.point_cloud.DecoderPointCloud', 'DecoderPointCloud', ([], {'decoder': 'self._decoder', 'sensor_name': 'self.sensor_name', 'frame_id': 'self.frame_id'}), '(decoder=self._decoder, sensor_name=self.sensor_name,\n frame_id=self.frame_id)\n', (5651, 5732), False, 'from paralleldomain.model.point_cloud import DecoderPointCloud, PointCloud, PointCloudDecoderProtocol\n'), ((6329, 6423), 'paralleldomain.model.image.DecoderImage', 'DecoderImage', ([], {'decoder': 'self._decoder', 'frame_id': 'self.frame_id', 'sensor_name': 'self.sensor_name'}), '(decoder=self._decoder, frame_id=self.frame_id, sensor_name=\n self.sensor_name)\n', (6341, 6423), False, 'from paralleldomain.model.image import DecoderImage, Image, ImageDecoderProtocol\n'), ((6741, 6978), 'paralleldomain.utilities.projection.project_points_3d_to_2d', 'project_points_3d_to_2d', ([], {'k_matrix': 'self.intrinsic.camera_matrix', 'camera_model': 'self.intrinsic.camera_model', 'distortion_parameters': 'self.intrinsic.distortion_parameters', 'distortion_lookup': 'distortion_lookup', 'points_3d': 'points_3d'}), '(k_matrix=self.intrinsic.camera_matrix, camera_model\n =self.intrinsic.camera_model, distortion_parameters=self.intrinsic.\n distortion_parameters, distortion_lookup=distortion_lookup, points_3d=\n points_3d)\n', (6764, 6978), False, 'from paralleldomain.utilities.projection import DistortionLookupTable, project_points_3d_to_2d\n'), ((9266, 9341), 'numpy.array', 'np.array', (['[[self.fx, self.skew, self.cx], [0, self.fy, self.cy], [0, 0, 1]]'], {}), '([[self.fx, self.skew, self.cx], [0, self.fy, self.cy], [0, 0, 1]])\n', (9274, 9341), True, 'import numpy as np\n'), ((9583, 9669), 'numpy.array', 'np.array', (['[self.k1, self.k2, self.p1, self.p2, self.k3, self.k4, self.k5, self.k6]'], {}), '([self.k1, self.k2, self.p1, self.p2, self.k3, self.k4, self.k5,\n self.k6])\n', (9591, 9669), True, 'import numpy as np\n'), ((9957, 10003), 'numpy.array', 'np.array', (['[self.k1, self.k2, self.k3, self.k4]'], {}), '([self.k1, self.k2, self.k3, self.k4])\n', (9965, 10003), True, 'import numpy as np\n')] |
# scalings.py shows how the prediction for the thermals height, density, and radius align with the simulation results
import xarray as xr
import numpy as np
import matplotlib
from faceted import faceted
from matplotlib import ticker
matplotlib.rcParams['mathtext.fontset'] = 'cm'
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from lighten_color import lighten_color
lam_ds = xr.open_mfdataset('/work/bnm/buoyant_entrainment/data/lam/rho_u_v_w/slice*.nc',concat_dim='t')
lam_omega = xr.open_dataarray('/work/bnm/buoyant_entrainment/data/lam/vort_phi/azi_lam_vort.nc')
lam_mask = xr.open_dataarray('/work/bnm/buoyant_entrainment/data/lam/mask/laminar_mask.nc',engine='scipy')
lam_track = xr.open_dataset('/work/bnm/buoyant_entrainment/data/lam/mask/thermal_boundary.nc')
lam_w_avg = xr.open_dataset('/work/bnm/buoyant_entrainment/data/lam/mask/w_avg.nc').w
lam_rho_avg = xr.open_dataarray('/work/bnm/buoyant_entrainment/data/lam/mask/rho_avg.nc')
lam_r = lam_track.r.max('z')
lam_z = xr.open_dataset('/work/bnm/buoyant_entrainment/data/lam/mask/z_top.nc').z
lam_z2 = np.loadtxt('/work/bnm/buoyant_entrainment/data/lam/mask/cloud_top_1e3_sim_5.txt')
turb_ds = xr.open_mfdataset('/work/bnm/buoyant_entrainment/data/turb/rho_u_v_w/slice*.nc', concat_dim='t')
turb_omega = xr.open_mfdataset('/work/bnm/buoyant_entrainment/data/turb/vort_phi/turb*.nc',concat_dim='t').omega_phi
turb_mask = xr.open_dataarray('/work/bnm/buoyant_entrainment/data/turb/mask/mask_correct.nc',engine='scipy')
turb_track = xr.open_dataset('/work/bnm/buoyant_entrainment/data/turb/mask/thermal_boundary.nc')
turb_w_avg = xr.open_dataset('/work/bnm/buoyant_entrainment/data/turb/mask/w_avg.nc').w
turb_rho_avg = xr.open_dataarray('/work/bnm/buoyant_entrainment/data/turb/mask/rho_avg.nc')
turb_r = turb_track.r.max('z')
turb_z = xr.open_dataset('/work/bnm/buoyant_entrainment/data/turb/mask/z_top.nc').z
turb_z2 = np.loadtxt('/work/bnm/buoyant_entrainment/data/turb/mask/cloud_top_1e4_sim_5.txt')
# initial values
t0 = 4
lrho0 = lam_rho_avg.sel(t=t0,method='nearest').values
lw0 = lam_w_avg.sel(t=t0,method='nearest').values
lr0 = lam_r.sel(t=t0,method='nearest').values
# lz0 = lam_z.sel(t=t0,method='nearest').values
lz0 = lam_z2[1][lam_z2[0] >= t0][0]
trho0 = turb_rho_avg.sel(t=t0,method='nearest').values
tw0 = turb_w_avg.sel(t=t0,method='nearest').values
tr0 = turb_r.sel(t=t0,method='nearest').values
# tz0 = turb_z.sel(t=t0,method='nearest').values
tz0 = turb_z2[1][lam_z2[0] >= t0][0]
# abbreviate time for readability
lam_t = lam_ds.t.values
# lam_zt = lam_z.t.values
lam_zt = lam_z2[0]
turb_t = turb_ds.t.values
# turb_zt = turb_z.t.values
turb_zt = turb_z2[0]
matplotlib.rcParams['axes.linewidth'] = 2 #set the value globally
matplotlib.rcParams['axes.titlepad'] = 31
tick_locator = ticker.MaxNLocator(nbins=2)
fig, axes = faceted(3, 2, width=5, aspect=1.0, bottom_pad=0.75, internal_pad=(0.,0.),sharex=False,sharey=False)
xlabels = ['','','','',r'$\tau$',r'$\tau$']
ylabels = [r'$\frac{a}{a_o}$','',r'$\frac{z}{z_o}$','',r'$\frac{\langle \rho^{\prime} \rangle}{ \langle \rho_o^{\prime} \rangle}$','']
# datum = [lam_r/lr0, turb_r/tr0, lam_z/lz0, turb_z/tz0, lam_rho_avg/lrho0, turb_rho_avg/trho0]
datum = [lam_r/lr0, turb_r/tr0, lam_z2[1]/lz0, turb_z2[1]/tz0, lam_rho_avg/lrho0, turb_rho_avg/trho0]
times = [lam_t/t0, turb_t/t0, lam_zt/t0, turb_zt/t0, lam_t/t0, turb_t/t0]
theory = [(lam_t/t0)**(1/2), (turb_t/t0)**(1/2), 1+2*lw0*t0/lz0*((lam_zt/t0)**(1/2)-1),
1+2*tw0*t0/tz0*((turb_zt/t0)**(1/2)-1), (lam_t/t0)**(-3/2), (turb_t/t0)**(-3/2)]
labels = [r'$(a)$', r'$(b)$', r'$(c)$',r'$(d)$', r'$(e)$', r'$(f)$']
# xlims = [[1,3.45],[1,4.9],[1,3.45],[1,4.9],[1,3.45],[1,4.9]]
xlims = [[1,4.9],[1,4.9],[1,4.9],[1,4.9],[1,4.9],[1,4.9]]
# xticka = [1,1.5,2,2.5,3]
xticka = [1,2,3,4]
xtickb = [1,2,3,4,4.9]
xticks = [xticka, xtickb, xticka, xtickb, xticka, xtickb]
# xticklabela1 = ['1','','2','','3']
# xticklabela2 = [' ','',' ','',' ']
xticklabela1 = ['1','2','3','4']
xticklabela2 = [' ',' ','',' ']
xticklabelb1 = ['1','2','3','4','5']
xticklabelb2 = [' ',' ',' ',' ','']
xticklabels = [xticklabela2, xticklabelb2,xticklabela2, xticklabelb2, xticklabela1, xticklabelb1]
ylims = [[1,2.3],[1,2.3],[1,2.6],[1,2.6],[0,1],[0,1]]
yticka = [1.0,1.4,1.8,2.2]
ytickb = [0,0.5,1.0]
yticks = [yticka,yticka,yticka,yticka,ytickb,ytickb]
yticklabela1 = ['1','1.4','1.8','2.2']
yticklabela2 = ['', '',' ','',' ']
yticklabelb1 = ['0','0.5','1']
yticklabelb2 = [' ',' ',' ',' ']
yticklabels = [yticklabela1, yticklabela2, yticklabela1, yticklabela2, yticklabelb1, yticklabelb2]
titles = [r'$\mathrm{Re} = 630$',r'$\mathrm{Re} = 6300$', '','','','']
label1 = [r'$simulation$','','','','','']
label2 = [r'$theory$','','','','','',]
colors = ['darkgoldenrod','rebeccapurple','darkgoldenrod','rebeccapurple','darkgoldenrod','rebeccapurple']
for i, ax in enumerate(axes):
# ax.plot(times[i],datum[i],color=lighten_color(colors[i]),marker='.',markersize=4,linewidth=1.5,label='simulation')
ax.plot(times[i],datum[i],color=lighten_color(colors[i]),linewidth=2.5,label='simulation')
ax.plot(times[i],theory[i],color=colors[i],linewidth=2.5,linestyle='dashed',label='theory')
ax.set_xlim(xlims[i])
ax.set_ylim(ylims[i])
ax.set_xlabel(xlabels[i],fontsize=18)
ax.set_ylabel(ylabels[i],fontsize=22)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in')
ax.set_xticks(xticks[i])
ax.set_xticklabels(xticklabels[i],fontsize=13)
ax.set_yticks(yticks[i])
ax.set_yticklabels(yticklabels[i],fontsize=13)
ax.text(0.05, .9, labels[i],transform=ax.transAxes,fontsize=15)
ax.set_title(titles[i],fontsize=16)
ax.update_datalim
# ax.legend(loc='upper center')
lines2= [Line2D([0],[0],color='darkgoldenrod',linewidth=2.5,linestyle='dashed'),
Line2D([0],[0],color=lighten_color('darkgoldenrod'),linewidth=2.5)]
labels2 = [r'$\mathrm{theory}$',r'$\mathrm{sim}$']
lines1 = [Line2D([0],[0],color='rebeccapurple',linewidth=2.5,linestyle='dashed'),
Line2D([0],[0],color=lighten_color('rebeccapurple'),linewidth=2.5)]
labels1 = [r'$\mathrm{theory}$',r'$\mathrm{sim}$']
leg = plt.legend(lines1, labels1,ncol=2,loc='upper left',bbox_to_anchor=(0, 3.20),fontsize=12.5,frameon=False)
ax.add_artist(leg)
plt.legend(lines2, labels2,ncol=2,loc='upper right',bbox_to_anchor=(0, 3.20),fontsize=12.5,frameon=False)
plt.show()
| [
"matplotlib.pyplot.show",
"faceted.faceted",
"matplotlib.lines.Line2D",
"matplotlib.ticker.MaxNLocator",
"xarray.open_dataset",
"matplotlib.pyplot.legend",
"numpy.loadtxt",
"xarray.open_mfdataset",
"xarray.open_dataarray",
"lighten_color.lighten_color"
] | [((400, 499), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['"""/work/bnm/buoyant_entrainment/data/lam/rho_u_v_w/slice*.nc"""'], {'concat_dim': '"""t"""'}), "('/work/bnm/buoyant_entrainment/data/lam/rho_u_v_w/slice*.nc',\n concat_dim='t')\n", (417, 499), True, 'import xarray as xr\n'), ((507, 596), 'xarray.open_dataarray', 'xr.open_dataarray', (['"""/work/bnm/buoyant_entrainment/data/lam/vort_phi/azi_lam_vort.nc"""'], {}), "(\n '/work/bnm/buoyant_entrainment/data/lam/vort_phi/azi_lam_vort.nc')\n", (524, 596), True, 'import xarray as xr\n'), ((603, 704), 'xarray.open_dataarray', 'xr.open_dataarray', (['"""/work/bnm/buoyant_entrainment/data/lam/mask/laminar_mask.nc"""'], {'engine': '"""scipy"""'}), "('/work/bnm/buoyant_entrainment/data/lam/mask/laminar_mask.nc'\n , engine='scipy')\n", (620, 704), True, 'import xarray as xr\n'), ((711, 798), 'xarray.open_dataset', 'xr.open_dataset', (['"""/work/bnm/buoyant_entrainment/data/lam/mask/thermal_boundary.nc"""'], {}), "(\n '/work/bnm/buoyant_entrainment/data/lam/mask/thermal_boundary.nc')\n", (726, 798), True, 'import xarray as xr\n'), ((894, 969), 'xarray.open_dataarray', 'xr.open_dataarray', (['"""/work/bnm/buoyant_entrainment/data/lam/mask/rho_avg.nc"""'], {}), "('/work/bnm/buoyant_entrainment/data/lam/mask/rho_avg.nc')\n", (911, 969), True, 'import xarray as xr\n'), ((1090, 1176), 'numpy.loadtxt', 'np.loadtxt', (['"""/work/bnm/buoyant_entrainment/data/lam/mask/cloud_top_1e3_sim_5.txt"""'], {}), "(\n '/work/bnm/buoyant_entrainment/data/lam/mask/cloud_top_1e3_sim_5.txt')\n", (1100, 1176), True, 'import numpy as np\n'), ((1183, 1284), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['"""/work/bnm/buoyant_entrainment/data/turb/rho_u_v_w/slice*.nc"""'], {'concat_dim': '"""t"""'}), "('/work/bnm/buoyant_entrainment/data/turb/rho_u_v_w/slice*.nc'\n , concat_dim='t')\n", (1200, 1284), True, 'import xarray as xr\n'), ((1409, 1516), 'xarray.open_dataarray', 'xr.open_dataarray', (['"""/work/bnm/buoyant_entrainment/data/turb/mask/mask_correct.nc"""'], {'engine': '"""scipy"""'}), "(\n '/work/bnm/buoyant_entrainment/data/turb/mask/mask_correct.nc', engine=\n 'scipy')\n", (1426, 1516), True, 'import xarray as xr\n'), ((1519, 1607), 'xarray.open_dataset', 'xr.open_dataset', (['"""/work/bnm/buoyant_entrainment/data/turb/mask/thermal_boundary.nc"""'], {}), "(\n '/work/bnm/buoyant_entrainment/data/turb/mask/thermal_boundary.nc')\n", (1534, 1607), True, 'import xarray as xr\n'), ((1706, 1782), 'xarray.open_dataarray', 'xr.open_dataarray', (['"""/work/bnm/buoyant_entrainment/data/turb/mask/rho_avg.nc"""'], {}), "('/work/bnm/buoyant_entrainment/data/turb/mask/rho_avg.nc')\n", (1723, 1782), True, 'import xarray as xr\n'), ((1908, 1995), 'numpy.loadtxt', 'np.loadtxt', (['"""/work/bnm/buoyant_entrainment/data/turb/mask/cloud_top_1e4_sim_5.txt"""'], {}), "(\n '/work/bnm/buoyant_entrainment/data/turb/mask/cloud_top_1e4_sim_5.txt')\n", (1918, 1995), True, 'import numpy as np\n'), ((2795, 2822), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'nbins': '(2)'}), '(nbins=2)\n', (2813, 2822), False, 'from matplotlib import ticker\n'), ((2836, 2944), 'faceted.faceted', 'faceted', (['(3)', '(2)'], {'width': '(5)', 'aspect': '(1.0)', 'bottom_pad': '(0.75)', 'internal_pad': '(0.0, 0.0)', 'sharex': '(False)', 'sharey': '(False)'}), '(3, 2, width=5, aspect=1.0, bottom_pad=0.75, internal_pad=(0.0, 0.0),\n sharex=False, sharey=False)\n', (2843, 2944), False, 'from faceted import faceted\n'), ((6199, 6312), 'matplotlib.pyplot.legend', 'plt.legend', (['lines1', 'labels1'], {'ncol': '(2)', 'loc': '"""upper left"""', 'bbox_to_anchor': '(0, 3.2)', 'fontsize': '(12.5)', 'frameon': '(False)'}), "(lines1, labels1, ncol=2, loc='upper left', bbox_to_anchor=(0, \n 3.2), fontsize=12.5, frameon=False)\n", (6209, 6312), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6437), 'matplotlib.pyplot.legend', 'plt.legend', (['lines2', 'labels2'], {'ncol': '(2)', 'loc': '"""upper right"""', 'bbox_to_anchor': '(0, 3.2)', 'fontsize': '(12.5)', 'frameon': '(False)'}), "(lines2, labels2, ncol=2, loc='upper right', bbox_to_anchor=(0, \n 3.2), fontsize=12.5, frameon=False)\n", (6333, 6437), True, 'import matplotlib.pyplot as plt\n'), ((6429, 6439), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6437, 6439), True, 'import matplotlib.pyplot as plt\n'), ((806, 877), 'xarray.open_dataset', 'xr.open_dataset', (['"""/work/bnm/buoyant_entrainment/data/lam/mask/w_avg.nc"""'], {}), "('/work/bnm/buoyant_entrainment/data/lam/mask/w_avg.nc')\n", (821, 877), True, 'import xarray as xr\n'), ((1007, 1078), 'xarray.open_dataset', 'xr.open_dataset', (['"""/work/bnm/buoyant_entrainment/data/lam/mask/z_top.nc"""'], {}), "('/work/bnm/buoyant_entrainment/data/lam/mask/z_top.nc')\n", (1022, 1078), True, 'import xarray as xr\n'), ((1293, 1391), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['"""/work/bnm/buoyant_entrainment/data/turb/vort_phi/turb*.nc"""'], {'concat_dim': '"""t"""'}), "('/work/bnm/buoyant_entrainment/data/turb/vort_phi/turb*.nc',\n concat_dim='t')\n", (1310, 1391), True, 'import xarray as xr\n'), ((1616, 1688), 'xarray.open_dataset', 'xr.open_dataset', (['"""/work/bnm/buoyant_entrainment/data/turb/mask/w_avg.nc"""'], {}), "('/work/bnm/buoyant_entrainment/data/turb/mask/w_avg.nc')\n", (1631, 1688), True, 'import xarray as xr\n'), ((1823, 1895), 'xarray.open_dataset', 'xr.open_dataset', (['"""/work/bnm/buoyant_entrainment/data/turb/mask/z_top.nc"""'], {}), "('/work/bnm/buoyant_entrainment/data/turb/mask/z_top.nc')\n", (1838, 1895), True, 'import xarray as xr\n'), ((5783, 5857), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""darkgoldenrod"""', 'linewidth': '(2.5)', 'linestyle': '"""dashed"""'}), "([0], [0], color='darkgoldenrod', linewidth=2.5, linestyle='dashed')\n", (5789, 5857), False, 'from matplotlib.lines import Line2D\n'), ((5993, 6067), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""rebeccapurple"""', 'linewidth': '(2.5)', 'linestyle': '"""dashed"""'}), "([0], [0], color='rebeccapurple', linewidth=2.5, linestyle='dashed')\n", (5999, 6067), False, 'from matplotlib.lines import Line2D\n'), ((5066, 5090), 'lighten_color.lighten_color', 'lighten_color', (['colors[i]'], {}), '(colors[i])\n', (5079, 5090), False, 'from lighten_color import lighten_color\n'), ((5884, 5914), 'lighten_color.lighten_color', 'lighten_color', (['"""darkgoldenrod"""'], {}), "('darkgoldenrod')\n", (5897, 5914), False, 'from lighten_color import lighten_color\n'), ((6094, 6124), 'lighten_color.lighten_color', 'lighten_color', (['"""rebeccapurple"""'], {}), "('rebeccapurple')\n", (6107, 6124), False, 'from lighten_color import lighten_color\n')] |
import numpy as np
import pytest
from kickscore.kernel import *
from kickscore.kernel.kernel import Kernel
TS = np.array([1.26, 1.46, 2.67])
KERNEL = {
"constant": Constant(2.5),
"exponential": Exponential(1.1, 2.2),
"matern32": Matern32(1.5, 0.7),
"matern52": Matern52(0.2, 5.0),
# In this case it's actually linear.
"affine": Affine(var_offset=0.0, var_slope=2.0, t0=0.0),
"wiener": Wiener(1.2, 0.0),
"add": Matern32(1.5, 0.7) + Matern52(0.2, 5.0),
}
# GPy code used to generate comparison values.
#
# import numpy as np
# from GPy.kern import Bias, Exponential, Matern32, Matern52, Linear, Brownian
# ts = np.array([1.26, 1.46, 2.67]).reshape(-1, 1)
#
# kernel = {
# "constant": Bias(input_dim=1, variance=2.5),
# "exponential": Exponential(input_dim=1, variance=1.1, lengthscale=2.2),
# "matern32": Matern32(input_dim=1, variance=1.5, lengthscale=0.7),
# "matern52": Matern52(input_dim=1, variance=0.2, lengthscale=5.0),
# "affine": Linear(input_dim=1, variances=2.0),
# "wiener": Brownian(input_dim=1, variance=1.2),
# "add": Matern32(input_dim=1, variance=1.5, lengthscale=0.7)
# + Matern52(input_dim=1, variance=0.2, lengthscale=5.0),
# }
#
# for name, k in kernel.items():
# print(name)
# print(k.K(ts))
GROUND_TRUTH = {
"constant": np.array([
[ 2.5, 2.5, 2.5],
[ 2.5, 2.5, 2.5],
[ 2.5, 2.5, 2.5]]),
"exponential": np.array([
[ 1.1, 1.00441079, 0.57949461],
[ 1.00441079, 1.1, 0.63464479],
[ 0.57949461, 0.63464479, 1.1 ]]),
"matern32": np.array([
[ 1.5, 1.36702084, 0.20560784],
[ 1.36702084, 1.5, 0.3000753 ],
[ 0.20560784, 0.3000753, 1.5 ]]),
"matern52": np.array([
[ 0.2, 0.19973384, 0.18769647],
[ 0.19973384, 0.2, 0.1907786 ],
[ 0.18769647, 0.1907786, 0.2 ]]),
"affine": np.array([
[ 3.1752, 3.6792, 6.7284],
[ 3.6792, 4.2632, 7.7964],
[ 6.7284, 7.7964, 14.2578]]),
"wiener": np.array([
[ 1.512, 1.512, 1.512],
[ 1.512, 1.752, 1.752],
[ 1.512, 1.752, 3.204]]),
"add": np.array([
[ 1.7, 1.56675469, 0.39330431],
[ 1.56675469, 1.7, 0.4908539 ],
[ 0.39330431, 0.4908539, 1.7 ]]),
}
@pytest.mark.parametrize("name", KERNEL.keys())
def test_kernel_matrix(name):
"""`k_mat` should match the output of GPy."""
assert np.allclose(KERNEL[name].k_mat(TS), GROUND_TRUTH[name])
@pytest.mark.parametrize("kernel", KERNEL.values())
def test_kernel_diag(kernel):
"""`k_diag` should match the diagonal of `k_mat`."""
ts = 10 * np.random.random(10)
assert np.allclose(np.diag(kernel.k_mat(ts)), kernel.k_diag(ts))
@pytest.mark.parametrize("kernel", KERNEL.values())
def test_kernel_order(kernel):
"""The SSM matrices & vectors should have the correct dims."""
m = kernel.order
assert kernel.state_mean(0.0).shape == (m,)
assert kernel.state_cov(0.0).shape == (m, m)
assert kernel.measurement_vector.shape == (m,)
assert kernel.feedback.shape == (m, m)
assert kernel.noise_effect.shape[0] == m
assert kernel.transition(0.0, 1.0).shape == (m, m)
assert kernel.noise_cov(0.0, 1.0).shape == (m, m)
@pytest.mark.parametrize("kernel", KERNEL.values())
def test_ssm_variance(kernel):
"""The measured state variance should match `k_diag`."""
ts = 10 * np.random.random(10)
h = kernel.measurement_vector
vars_ = [h.dot(kernel.state_cov(t)).dot(h) for t in ts]
assert np.allclose(vars_, kernel.k_diag(ts))
@pytest.mark.parametrize("kernel", KERNEL.values())
def test_ssm_matrices(kernel):
"""`transition` and `noise_cov` should match the numerical solution.`"""
deltas = [0.01, 1.0, 10.0]
for delta in deltas:
assert np.allclose(
Kernel.transition(kernel, 0.0, delta),
kernel.transition(0.0, delta))
assert np.allclose(
Kernel.noise_cov(kernel, 0.0, delta),
kernel.noise_cov(0.0, delta))
def test_simulate_constant():
"""A sample from a constant GP should be constant."""
kernel = Constant(1.0)
ts = np.linspace(-2, 7, num=10)
xs = kernel.simulate(ts)
assert all(xs == xs[0])
def test_simulate_affine():
"""A sample from an affine GP should be affine."""
kernel = Affine(var_offset=1.0, t0=0.0, var_slope=1.0)
ts = np.linspace(0, 10, num=10)
xs = kernel.simulate(ts)
slope = (xs[-1] - xs[0]) / 10
assert np.allclose(xs, xs[0] + slope * ts)
| [
"kickscore.kernel.kernel.Kernel.noise_cov",
"numpy.allclose",
"kickscore.kernel.kernel.Kernel.transition",
"numpy.random.random",
"numpy.array",
"numpy.linspace"
] | [((115, 143), 'numpy.array', 'np.array', (['[1.26, 1.46, 2.67]'], {}), '([1.26, 1.46, 2.67])\n', (123, 143), True, 'import numpy as np\n'), ((1393, 1454), 'numpy.array', 'np.array', (['[[2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [2.5, 2.5, 2.5]]'], {}), '([[2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [2.5, 2.5, 2.5]])\n', (1401, 1454), True, 'import numpy as np\n'), ((1509, 1617), 'numpy.array', 'np.array', (['[[1.1, 1.00441079, 0.57949461], [1.00441079, 1.1, 0.63464479], [0.57949461,\n 0.63464479, 1.1]]'], {}), '([[1.1, 1.00441079, 0.57949461], [1.00441079, 1.1, 0.63464479], [\n 0.57949461, 0.63464479, 1.1]])\n', (1517, 1617), True, 'import numpy as np\n'), ((1685, 1791), 'numpy.array', 'np.array', (['[[1.5, 1.36702084, 0.20560784], [1.36702084, 1.5, 0.3000753], [0.20560784, \n 0.3000753, 1.5]]'], {}), '([[1.5, 1.36702084, 0.20560784], [1.36702084, 1.5, 0.3000753], [\n 0.20560784, 0.3000753, 1.5]])\n', (1693, 1791), True, 'import numpy as np\n'), ((1861, 1967), 'numpy.array', 'np.array', (['[[0.2, 0.19973384, 0.18769647], [0.19973384, 0.2, 0.1907786], [0.18769647, \n 0.1907786, 0.2]]'], {}), '([[0.2, 0.19973384, 0.18769647], [0.19973384, 0.2, 0.1907786], [\n 0.18769647, 0.1907786, 0.2]])\n', (1869, 1967), True, 'import numpy as np\n'), ((2035, 2129), 'numpy.array', 'np.array', (['[[3.1752, 3.6792, 6.7284], [3.6792, 4.2632, 7.7964], [6.7284, 7.7964, 14.2578]]'], {}), '([[3.1752, 3.6792, 6.7284], [3.6792, 4.2632, 7.7964], [6.7284, \n 7.7964, 14.2578]])\n', (2043, 2129), True, 'import numpy as np\n'), ((2182, 2261), 'numpy.array', 'np.array', (['[[1.512, 1.512, 1.512], [1.512, 1.752, 1.752], [1.512, 1.752, 3.204]]'], {}), '([[1.512, 1.512, 1.512], [1.512, 1.752, 1.752], [1.512, 1.752, 3.204]])\n', (2190, 2261), True, 'import numpy as np\n'), ((2308, 2414), 'numpy.array', 'np.array', (['[[1.7, 1.56675469, 0.39330431], [1.56675469, 1.7, 0.4908539], [0.39330431, \n 0.4908539, 1.7]]'], {}), '([[1.7, 1.56675469, 0.39330431], [1.56675469, 1.7, 0.4908539], [\n 0.39330431, 0.4908539, 1.7]])\n', (2316, 2414), True, 'import numpy as np\n'), ((4356, 4382), 'numpy.linspace', 'np.linspace', (['(-2)', '(7)'], {'num': '(10)'}), '(-2, 7, num=10)\n', (4367, 4382), True, 'import numpy as np\n'), ((4593, 4619), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(10)'}), '(0, 10, num=10)\n', (4604, 4619), True, 'import numpy as np\n'), ((4694, 4729), 'numpy.allclose', 'np.allclose', (['xs', '(xs[0] + slope * ts)'], {}), '(xs, xs[0] + slope * ts)\n', (4705, 4729), True, 'import numpy as np\n'), ((2822, 2842), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (2838, 2842), True, 'import numpy as np\n'), ((3590, 3610), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (3606, 3610), True, 'import numpy as np\n'), ((4016, 4053), 'kickscore.kernel.kernel.Kernel.transition', 'Kernel.transition', (['kernel', '(0.0)', 'delta'], {}), '(kernel, 0.0, delta)\n', (4033, 4053), False, 'from kickscore.kernel.kernel import Kernel\n'), ((4146, 4182), 'kickscore.kernel.kernel.Kernel.noise_cov', 'Kernel.noise_cov', (['kernel', '(0.0)', 'delta'], {}), '(kernel, 0.0, delta)\n', (4162, 4182), False, 'from kickscore.kernel.kernel import Kernel\n')] |
import torch
import numpy as np
from collections import OrderedDict
from core.memories.base import BaseBuffer
from core.memories.greedy import GreedyBuffer
from core.memories.reservoir import ReservoirBuffer
class StratifiedBuffer(BaseBuffer):
"""Unbounded user replay buffer"""
def __init__(
self,
buffer_size,
*args,
buffer_class='GreedyBuffer',
**kwargs,
):
super().__init__(*args, **kwargs)
# lazy initialization
self.user_buffers = OrderedDict()
self.user_num = 0
self.inv_user_dict = {}
self.buffer_class = eval(buffer_class)
self.buffer_args = args
self.buffer_kwargs = kwargs
self.per_user_buffer_size = buffer_size
def add_data(self, wordpiece, wordend, user):
if self.user_buffers.get(user, None) is None:
self.inv_user_dict[self.user_num] = user
self.user_buffers[user] = self.buffer_class(self.per_user_buffer_size,
*self.buffer_args, **self.buffer_kwargs)
self.user_num += 1
# Add data to user specific buffer
return self.user_buffers[user].add_data(wordpiece, wordend, user)
def sample_data(self):
user_idx = np.random.randint(0, self.user_num)
sampled_user = self.inv_user_dict[user_idx]
return self.sample_user_data(sampled_user)
def sample_user_data(self, user):
if self.user_buffers.get(user, None) is not None:
return self.user_buffers[user].sample_data()
else:
raise ValueError('Unencountered User.')
def __len__(self):
return sum([ len(buf) for buf in self.user_buffers.values() ])
if __name__ == '__main__':
import time
from tqdm import tqdm
with launch_ipdb_on_exception():
print('Test buffer size increase...')
buffer = StratifiedBuffer(5, 50)
start_tm = time.time()
for user in tqdm(range(917359)):
for data in range(5):
buffer.add_data(list(range(5)), list(range(5)), user)
print('Add data: {} sec.'.format(time.time() - start_tm))
device = torch.device('cuda')
sample_tm = time.time()
for iter_id in tqdm(range(100)):
buffer.sample_batch(256, device)
print('Sample data: {} sec.'.format(time.time() - sample_tm))
| [
"collections.OrderedDict",
"numpy.random.randint",
"torch.device",
"time.time"
] | [((516, 529), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (527, 529), False, 'from collections import OrderedDict\n'), ((1284, 1319), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.user_num'], {}), '(0, self.user_num)\n', (1301, 1319), True, 'import numpy as np\n'), ((1953, 1964), 'time.time', 'time.time', ([], {}), '()\n', (1962, 1964), False, 'import time\n'), ((2194, 2214), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2206, 2214), False, 'import torch\n'), ((2235, 2246), 'time.time', 'time.time', ([], {}), '()\n', (2244, 2246), False, 'import time\n'), ((2151, 2162), 'time.time', 'time.time', ([], {}), '()\n', (2160, 2162), False, 'import time\n'), ((2377, 2388), 'time.time', 'time.time', ([], {}), '()\n', (2386, 2388), False, 'import time\n')] |
import tensorflow as tf
import random
import numpy as np
from LOADDATA import build_vocab,get_corpus_indices,data_format,get_data
from LSTM.LSTM import lstm
from DENSE.DENSE import dense
from EMBEDDING.EMBEDDING import embedding
random.seed(1)
class stentimentAnalysis:
def __init__(self,batch_size,lr,num_input,num_outputs,num_hiddens,vocabulary_size,embedding_size):
'''
定义所有的模块,包括lstm、embedding、dense
'''
self.batch_size=batch_size
self.lr=lr
self.vocab_size=vocabulary_size
self.lstm=lstm(num_input,num_hiddens,num_outputs)
self.embedding=embedding(self.vocab_size,embedding_size)
#全连接层的参数
self.dense1=dense(num_hiddens,num_outputs)
self.dense2=dense(num_outputs,1)#1为固定参数
self.dense3=dense(2*num_outputs,256)#256可以随意换成其他的
self.dense4=dense(256,2)#2是这里情感分析只有正面和负面之分
#随机梯度下降优化器
self.opt=tf.keras.optimizers.SGD(learning_rate=lr)
def __call__(self,data,state):
'''
data:为one_hot编码数据
'''
inputs=self.embedding.embedding_lookup(data)
outputs,state=self.lstm(inputs,state)
(H,_)=state
outputs1=tf.concat(outputs,0)
#重新获取最后一个lstm cell的输出
outputs3=self.dense1(H)
#全连接层
outputs5=self.dense2(outputs1)
output_tran5=[]
output_tran1=[]
#把outputs1和outputs5对应的第一维度和第二维度互换
for i in range(outputs5.shape[1]):
output_5=[]
output_1=[]
for j in range(outputs5.shape[0]):
output_5.append(outputs5[j][i][:])
output_1.append(outputs1[j][i][:])
output_tran5.append(output_5)
output_tran1.append(output_1)
outputs5=tf.reshape(output_tran5,[outputs5.shape[1],outputs5.shape[0],outputs5.shape[2]])
outputs1=tf.reshape(output_tran1,[outputs1.shape[1],outputs1.shape[0],outputs1.shape[2]])
#对权重outputs5进行归一化
outputs6=tf.nn.softmax(outputs5,1)
outputs7=outputs1*outputs6
#把所有lstm cell的输出按权重outputs6加到一起
outputs8=tf.reduce_sum(outputs7,1)
#把lstm cell最后的输出outputs3,与所有lstm cell输出相关的outputs8,拼接在一起
outputs9=tf.concat([outputs8,outputs3],1)
#全连接层
outputs=self.dense3(outputs9)
#全连接层
outputs=self.dense4(outputs)
return tf.nn.softmax(outputs)
def loss(self,logist,label):
'''
交叉熵损失函数
'''
return -1*tf.reduce_mean(tf.multiply(tf.math.log(logist+1e-10),label))
def get_params(self):
'''
返回模型所有参数
'''
params=[]
params.extend(self.lstm.get_params())
params.extend(self.embedding.get_params())
params.extend(self.dense1.get_params())
params.extend(self.dense2.get_params())
params.extend(self.dense3.get_params())
params.extend(self.dense4.get_params())
return params
def update_params(self,grads,params):
self.opt.apply_gradients(grads_and_vars=zip(grads,params))
def return_accuracy(temp_predict,temp_batch_label,batch_size):
'''
计算准确率
'''
rowMaxSoft=np.argmax(temp_predict, axis=1)+1
rowMax=np.argmax(temp_batch_label, axis=1)+1
rowMaxSoft=rowMaxSoft.reshape([1,-1])
rowMax=rowMax.reshape([1,-1])
nonO=rowMaxSoft-rowMax
exist = (nonO != 0) * 1.0
factor = np.ones([nonO.shape[1],1])
res = np.dot(exist, factor)
accuracy=(float(batch_size)-res[0][0])/float(batch_size)
return accuracy
def train(model,params,vocabulary,labels,chars_to_idx,label_chars_to_idx,batch_size):
'''
训练函数
'''
acc=[]
iter_data=get_data(data=vocabulary,labels=labels,chars_to_idx=chars_to_idx,label_chars_to_idx=label_chars_to_idx,batch_size=batch_size)
outputs=[]
Ys=[]
for x,y in iter_data:
state_lstm=model.lstm.init_lstm_state(len(y),num_hiddens)#初始化lstm的C和H
X,Y=data_format(x,y)#格式化数据
X,Y=tf.concat(X,0),tf.concat(Y,0)#把格式化后的组合到一个tensor里
X=tf.one_hot(X,model.vocab_size) #one_hot编码
Y=tf.one_hot(Y,len(label_idx_to_chars))#one_hot编码
Y=tf.reshape(Y,[Y.shape[0],Y.shape[-1]])#转化成2维度
with tf.GradientTape() as tape:
tape.watch(params)
output=model(X,state_lstm)
loss=model.loss(output,Y)#获取交叉熵结果
print("loss %f"%loss.numpy())
grads=tape.gradient(loss, params)#求梯度
grads,globalNorm=tf.clip_by_global_norm(grads, clipNorm)#梯度裁剪
model.update_params(grads,params)#参数更新
Ys.append(Y)#记录所有标签
outputs.append(output)#记录所有输出
outputs=tf.concat(outputs,0)
Ys=tf.concat(Ys,0)
#把准确率存到当前目录
filepath="acc.txt"
flie=open(filepath,"a+")
flie.write(str(tf.math.reduce_mean(return_accuracy(outputs,Ys,Ys.shape[0])).numpy())+"\n")
flie.close()
'''
for k in range(len(params)):
filepath="p"+str(k)+".txt"
np.savetxt(filepath,(params[k].numpy()).reshape(1,-1))
'''
def predict(model,params,test_vovab,test_labels,chars_to_idx,label_chars_to_idx,batch_size):
'''
预测函数
'''
test_acc=[]
iter_data=get_data(data=test_vovab,labels=test_labels,chars_to_idx=chars_to_idx,label_chars_to_idx=label_chars_to_idx,batch_size=batch_size)
outputs=[]
Ys=[]
for x,y in iter_data:
state_lstm=model.lstm.init_lstm_state(len(y),num_hiddens)#初始化lstm的C和H
X,Y=data_format(x,y)#格式化数据
X,Y=tf.concat(X,0),tf.concat(Y,0)#把格式化后的组合到一个tensor里
X=tf.one_hot(X,model.vocab_size)#one_hot编码
Y=tf.one_hot(Y,len(label_idx_to_chars))#one_hot编码
Y=tf.reshape(Y,[Y.shape[0],Y.shape[-1]])#转化成2维度
output=model(X,state_lstm)#
Ys.append(Y)
outputs.append(output)
outputs=tf.concat(outputs,0)
Ys=tf.concat(Ys,0)
accT=return_accuracy(outputs,Ys,Ys.shape[0])
#把准确率存到当前目录
test_acc.append(accT)
filepath="testacc.txt"
flie=open(filepath,"a+")
flie.write(str(tf.math.reduce_mean(test_acc).numpy())+"\n")
flie.close()
if __name__ == "__main__":
embedding_size=20
num_hiddens=128
clipNorm=1.0
batch_size=1
vocabulary,labels ,test_labels,test_vovab,chars_to_idx,idx_to_chars,vocab_size,label_idx_to_chars,label_chars_to_idx,label_size=build_vocab('data//data_single.csv')
sta=stentimentAnalysis(batch_size,1e-3,num_input=embedding_size,num_outputs=embedding_size,num_hiddens=num_hiddens,vocabulary_size=vocab_size,embedding_size=embedding_size)
params=sta.get_params()
epochs=30#轮次
'''
isContinue=True
if isContinue==True:
for k in range(len(params)):
filepath="p"+str(k)+".txt"
params[k].assign((np.loadtxt(filepath,dtype=np.float32)).reshape(params[k].shape))
'''
#训练
for i in range(epochs):
train(sta,params,vocabulary,labels,chars_to_idx,label_chars_to_idx,batch_size)
#测试
predict(sta,params,test_vovab,test_labels,chars_to_idx,label_chars_to_idx,batch_size)
| [
"tensorflow.reduce_sum",
"numpy.argmax",
"tensorflow.keras.optimizers.SGD",
"tensorflow.reshape",
"numpy.ones",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.softmax",
"tensorflow.math.log",
"tensorflow.one_hot",
"tensorflow.concat",
"EMBEDDING.EMBEDDING.embedding",
"random.seed",
"LOADDA... | [((230, 244), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (241, 244), False, 'import random\n'), ((3428, 3455), 'numpy.ones', 'np.ones', (['[nonO.shape[1], 1]'], {}), '([nonO.shape[1], 1])\n', (3435, 3455), True, 'import numpy as np\n'), ((3465, 3486), 'numpy.dot', 'np.dot', (['exist', 'factor'], {}), '(exist, factor)\n', (3471, 3486), True, 'import numpy as np\n'), ((3710, 3843), 'LOADDATA.get_data', 'get_data', ([], {'data': 'vocabulary', 'labels': 'labels', 'chars_to_idx': 'chars_to_idx', 'label_chars_to_idx': 'label_chars_to_idx', 'batch_size': 'batch_size'}), '(data=vocabulary, labels=labels, chars_to_idx=chars_to_idx,\n label_chars_to_idx=label_chars_to_idx, batch_size=batch_size)\n', (3718, 3843), False, 'from LOADDATA import build_vocab, get_corpus_indices, data_format, get_data\n'), ((4719, 4740), 'tensorflow.concat', 'tf.concat', (['outputs', '(0)'], {}), '(outputs, 0)\n', (4728, 4740), True, 'import tensorflow as tf\n'), ((4747, 4763), 'tensorflow.concat', 'tf.concat', (['Ys', '(0)'], {}), '(Ys, 0)\n', (4756, 4763), True, 'import tensorflow as tf\n'), ((5245, 5383), 'LOADDATA.get_data', 'get_data', ([], {'data': 'test_vovab', 'labels': 'test_labels', 'chars_to_idx': 'chars_to_idx', 'label_chars_to_idx': 'label_chars_to_idx', 'batch_size': 'batch_size'}), '(data=test_vovab, labels=test_labels, chars_to_idx=chars_to_idx,\n label_chars_to_idx=label_chars_to_idx, batch_size=batch_size)\n', (5253, 5383), False, 'from LOADDATA import build_vocab, get_corpus_indices, data_format, get_data\n'), ((5866, 5887), 'tensorflow.concat', 'tf.concat', (['outputs', '(0)'], {}), '(outputs, 0)\n', (5875, 5887), True, 'import tensorflow as tf\n'), ((5894, 5910), 'tensorflow.concat', 'tf.concat', (['Ys', '(0)'], {}), '(Ys, 0)\n', (5903, 5910), True, 'import tensorflow as tf\n'), ((6393, 6429), 'LOADDATA.build_vocab', 'build_vocab', (['"""data//data_single.csv"""'], {}), "('data//data_single.csv')\n", (6404, 6429), False, 'from LOADDATA import build_vocab, get_corpus_indices, data_format, get_data\n'), ((550, 591), 'LSTM.LSTM.lstm', 'lstm', (['num_input', 'num_hiddens', 'num_outputs'], {}), '(num_input, num_hiddens, num_outputs)\n', (554, 591), False, 'from LSTM.LSTM import lstm\n'), ((613, 655), 'EMBEDDING.EMBEDDING.embedding', 'embedding', (['self.vocab_size', 'embedding_size'], {}), '(self.vocab_size, embedding_size)\n', (622, 655), False, 'from EMBEDDING.EMBEDDING import embedding\n'), ((693, 724), 'DENSE.DENSE.dense', 'dense', (['num_hiddens', 'num_outputs'], {}), '(num_hiddens, num_outputs)\n', (698, 724), False, 'from DENSE.DENSE import dense\n'), ((744, 765), 'DENSE.DENSE.dense', 'dense', (['num_outputs', '(1)'], {}), '(num_outputs, 1)\n', (749, 765), False, 'from DENSE.DENSE import dense\n'), ((792, 819), 'DENSE.DENSE.dense', 'dense', (['(2 * num_outputs)', '(256)'], {}), '(2 * num_outputs, 256)\n', (797, 819), False, 'from DENSE.DENSE import dense\n'), ((850, 863), 'DENSE.DENSE.dense', 'dense', (['(256)', '(2)'], {}), '(256, 2)\n', (855, 863), False, 'from DENSE.DENSE import dense\n'), ((918, 959), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (941, 959), True, 'import tensorflow as tf\n'), ((1199, 1220), 'tensorflow.concat', 'tf.concat', (['outputs', '(0)'], {}), '(outputs, 0)\n', (1208, 1220), True, 'import tensorflow as tf\n'), ((1778, 1866), 'tensorflow.reshape', 'tf.reshape', (['output_tran5', '[outputs5.shape[1], outputs5.shape[0], outputs5.shape[2]]'], {}), '(output_tran5, [outputs5.shape[1], outputs5.shape[0], outputs5.\n shape[2]])\n', (1788, 1866), True, 'import tensorflow as tf\n'), ((1876, 1964), 'tensorflow.reshape', 'tf.reshape', (['output_tran1', '[outputs1.shape[1], outputs1.shape[0], outputs1.shape[2]]'], {}), '(output_tran1, [outputs1.shape[1], outputs1.shape[0], outputs1.\n shape[2]])\n', (1886, 1964), True, 'import tensorflow as tf\n'), ((2001, 2027), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['outputs5', '(1)'], {}), '(outputs5, 1)\n', (2014, 2027), True, 'import tensorflow as tf\n'), ((2120, 2146), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['outputs7', '(1)'], {}), '(outputs7, 1)\n', (2133, 2146), True, 'import tensorflow as tf\n'), ((2229, 2263), 'tensorflow.concat', 'tf.concat', (['[outputs8, outputs3]', '(1)'], {}), '([outputs8, outputs3], 1)\n', (2238, 2263), True, 'import tensorflow as tf\n'), ((2390, 2412), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['outputs'], {}), '(outputs)\n', (2403, 2412), True, 'import tensorflow as tf\n'), ((3194, 3225), 'numpy.argmax', 'np.argmax', (['temp_predict'], {'axis': '(1)'}), '(temp_predict, axis=1)\n', (3203, 3225), True, 'import numpy as np\n'), ((3239, 3274), 'numpy.argmax', 'np.argmax', (['temp_batch_label'], {'axis': '(1)'}), '(temp_batch_label, axis=1)\n', (3248, 3274), True, 'import numpy as np\n'), ((3987, 4004), 'LOADDATA.data_format', 'data_format', (['x', 'y'], {}), '(x, y)\n', (3998, 4004), False, 'from LOADDATA import build_vocab, get_corpus_indices, data_format, get_data\n'), ((4097, 4128), 'tensorflow.one_hot', 'tf.one_hot', (['X', 'model.vocab_size'], {}), '(X, model.vocab_size)\n', (4107, 4128), True, 'import tensorflow as tf\n'), ((4218, 4258), 'tensorflow.reshape', 'tf.reshape', (['Y', '[Y.shape[0], Y.shape[-1]]'], {}), '(Y, [Y.shape[0], Y.shape[-1]])\n', (4228, 4258), True, 'import tensorflow as tf\n'), ((5517, 5534), 'LOADDATA.data_format', 'data_format', (['x', 'y'], {}), '(x, y)\n', (5528, 5534), False, 'from LOADDATA import build_vocab, get_corpus_indices, data_format, get_data\n'), ((5611, 5642), 'tensorflow.one_hot', 'tf.one_hot', (['X', 'model.vocab_size'], {}), '(X, model.vocab_size)\n', (5621, 5642), True, 'import tensorflow as tf\n'), ((5720, 5760), 'tensorflow.reshape', 'tf.reshape', (['Y', '[Y.shape[0], Y.shape[-1]]'], {}), '(Y, [Y.shape[0], Y.shape[-1]])\n', (5730, 5760), True, 'import tensorflow as tf\n'), ((4031, 4046), 'tensorflow.concat', 'tf.concat', (['X', '(0)'], {}), '(X, 0)\n', (4040, 4046), True, 'import tensorflow as tf\n'), ((4046, 4061), 'tensorflow.concat', 'tf.concat', (['Y', '(0)'], {}), '(Y, 0)\n', (4055, 4061), True, 'import tensorflow as tf\n'), ((4277, 4294), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4292, 4294), True, 'import tensorflow as tf\n'), ((4541, 4580), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'clipNorm'], {}), '(grads, clipNorm)\n', (4563, 4580), True, 'import tensorflow as tf\n'), ((5552, 5567), 'tensorflow.concat', 'tf.concat', (['X', '(0)'], {}), '(X, 0)\n', (5561, 5567), True, 'import tensorflow as tf\n'), ((5567, 5582), 'tensorflow.concat', 'tf.concat', (['Y', '(0)'], {}), '(Y, 0)\n', (5576, 5582), True, 'import tensorflow as tf\n'), ((2540, 2567), 'tensorflow.math.log', 'tf.math.log', (['(logist + 1e-10)'], {}), '(logist + 1e-10)\n', (2551, 2567), True, 'import tensorflow as tf\n'), ((6077, 6106), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['test_acc'], {}), '(test_acc)\n', (6096, 6106), True, 'import tensorflow as tf\n')] |
"""
"""
import numpy as np
from dyconnmap import sliding_window_indx
if __name__ == '__main__':
ts = np.zeros((4, 100))
wlen = 10
indices1 = sliding_window_indx(ts, window_length=wlen, overlap=0.5)
indices3 = sliding_window_indx(ts, window_length=wlen, overlap=0.75)
indices6 = sliding_window_indx(ts, window_length=wlen, overlap=0.90)
print(indices1)
print(indices3)
print(indices6)
| [
"dyconnmap.sliding_window_indx",
"numpy.zeros"
] | [((111, 129), 'numpy.zeros', 'np.zeros', (['(4, 100)'], {}), '((4, 100))\n', (119, 129), True, 'import numpy as np\n'), ((160, 216), 'dyconnmap.sliding_window_indx', 'sliding_window_indx', (['ts'], {'window_length': 'wlen', 'overlap': '(0.5)'}), '(ts, window_length=wlen, overlap=0.5)\n', (179, 216), False, 'from dyconnmap import sliding_window_indx\n'), ((233, 290), 'dyconnmap.sliding_window_indx', 'sliding_window_indx', (['ts'], {'window_length': 'wlen', 'overlap': '(0.75)'}), '(ts, window_length=wlen, overlap=0.75)\n', (252, 290), False, 'from dyconnmap import sliding_window_indx\n'), ((306, 362), 'dyconnmap.sliding_window_indx', 'sliding_window_indx', (['ts'], {'window_length': 'wlen', 'overlap': '(0.9)'}), '(ts, window_length=wlen, overlap=0.9)\n', (325, 362), False, 'from dyconnmap import sliding_window_indx\n')] |
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import uuid
import numpy as np
from enum import Enum
from copy import deepcopy
from p3iv_types.motion import MotionStateArray
class ManeuverIntentions(Enum):
"""Intention of object to host"""
follow = 0
lead = 1
unclear = 2
class ManeuverProbability(object):
def __init__(self):
self.route = 1.0
self.intention = 1.0
self.maneuver = 0.0
def __add__(self, other):
assert isinstance(other, ManeuverProbability)
self.route += other.route
self.intention += other.intention
self.maneuver += other.maneuver
return self
class ManeuverBounds(object):
def __init__(self, dt, N):
self.dt = dt
self.N = N
self.time_horizon = np.arange(self.N) * self.dt
self._upper_pos_bound = None
self._lower_pos_bound = None
self._upper_spd_bound = None
self._lower_spd_bound = None
self.applied_intention = None
def __eq__(self, other):
return (
self.dt == other.dt
and self.N == other.N
and (np.abs(self._upper_pos_bound - other.upper_pos_bound) < 1e-6).all()
and (np.abs(self._lower_pos_bound - other.lower_pos_bound) < 1e-6).all()
and (np.abs(self._upper_spd_bound - other.upper_spd_bound) < 1e-6).all()
and (np.abs(self._lower_spd_bound - other.lower_spd_bound) < 1e-6).all()
)
@property
def upper_pos_bound(self):
return self._upper_pos_bound
@upper_pos_bound.setter
def upper_pos_bound(self, v):
self._upper_bound_setter("_upper_pos_bound", v)
@property
def lower_pos_bound(self):
return self._lower_pos_bound
@lower_pos_bound.setter
def lower_pos_bound(self, v):
self._lower_bound_setter("_lower_pos_bound", v)
@property
def upper_spd_bound(self):
return self._upper_spd_bound
@upper_spd_bound.setter
def upper_spd_bound(self, v):
self._upper_bound_setter("_upper_spd_bound", v)
@property
def lower_spd_bound(self):
return self._lower_spd_bound
@lower_spd_bound.setter
def lower_spd_bound(self, v):
self._lower_bound_setter("_lower_spd_bound", v)
def _lower_bound_setter(self, attribute, value):
if getattr(self, attribute) is not None:
value = np.max([getattr(self, attribute), value], axis=0)
setattr(self, attribute, value)
def _upper_bound_setter(self, attribute, value):
if getattr(self, attribute) is not None:
value = np.min([getattr(self, attribute), value], axis=0)
setattr(self, attribute, value)
class ManeuverHypothesis(object):
"""
Contains (applies) assumptions on the dynamics of other vehicles; i.e. intentions
Current position is the reference coordinate frame (0.0, 0.0)
"""
__slots__ = [
"id",
"path",
"dt",
"N",
"horizon",
"motion",
"progress",
"overlap",
"probability",
"speed_limit",
"maneuver_bounds",
]
def __init__(self, current_state, progress, laneletpath, speed_limits, dt, N, horizon):
self.id = uuid.uuid4()
self.path = laneletpath
self.dt = dt
self.N = N
self.horizon = horizon
self.motion = MotionStateArray()
self.motion.resize(self.N + 1)
self.motion.position.mean[0] = current_state.position.mean
self.motion.velocity.mean[0] = current_state.velocity.mean
self.progress = np.zeros(self.N + 1)
self.progress[0] = progress
self.overlap = [False] * (self.N + 1) # if position has any overlap with own ego/host-route
self.probability = ManeuverProbability()
self.speed_limit = speed_limits[0]
# variables to create an output motion
self.maneuver_bounds = ManeuverBounds(self.dt, self.N)
self.maneuver_bounds.upper_pos_bound = np.arange(1, self.N + 1) * self.speed_limit * self.dt
self.maneuver_bounds.upper_spd_bound = np.ones(self.N) * self.speed_limit # todo: consider current speed
self.maneuver_bounds.lower_pos_bound = np.zeros(self.N)
self.maneuver_bounds.lower_spd_bound = np.zeros(self.N)
def __eq__(self, other):
# if both maneuver bounds and the id, which indicates driving corridor, are the same,
# two maneuvers are considered equal
if self.maneuver_bounds == other.maneuver_bounds and self.id == other.id:
return True
else:
return False
def clone(self):
clone_ = deepcopy(self)
clone_.id = uuid.uuid4()
return clone_
class ManeuverHypotheses(object):
def __init__(self):
self._hypotheses = []
def __len__(self):
return len(self._hypotheses)
def add(self, maneuver_hypotheses):
for mh in maneuver_hypotheses:
self._hypotheses.append(mh)
@property
def hypotheses(self):
return self._hypotheses
@hypotheses.setter
def hypotheses(self, h):
self._hypotheses = h
| [
"copy.deepcopy",
"uuid.uuid4",
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"p3iv_types.motion.MotionStateArray"
] | [((3420, 3432), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3430, 3432), False, 'import uuid\n'), ((3558, 3576), 'p3iv_types.motion.MotionStateArray', 'MotionStateArray', ([], {}), '()\n', (3574, 3576), False, 'from p3iv_types.motion import MotionStateArray\n'), ((3774, 3794), 'numpy.zeros', 'np.zeros', (['(self.N + 1)'], {}), '(self.N + 1)\n', (3782, 3794), True, 'import numpy as np\n'), ((4397, 4413), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (4405, 4413), True, 'import numpy as np\n'), ((4461, 4477), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (4469, 4477), True, 'import numpy as np\n'), ((4831, 4845), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (4839, 4845), False, 'from copy import deepcopy\n'), ((4866, 4878), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4876, 4878), False, 'import uuid\n'), ((961, 978), 'numpy.arange', 'np.arange', (['self.N'], {}), '(self.N)\n', (970, 978), True, 'import numpy as np\n'), ((4283, 4298), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (4290, 4298), True, 'import numpy as np\n'), ((4182, 4206), 'numpy.arange', 'np.arange', (['(1)', '(self.N + 1)'], {}), '(1, self.N + 1)\n', (4191, 4206), True, 'import numpy as np\n'), ((1307, 1360), 'numpy.abs', 'np.abs', (['(self._upper_pos_bound - other.upper_pos_bound)'], {}), '(self._upper_pos_bound - other.upper_pos_bound)\n', (1313, 1360), True, 'import numpy as np\n'), ((1392, 1445), 'numpy.abs', 'np.abs', (['(self._lower_pos_bound - other.lower_pos_bound)'], {}), '(self._lower_pos_bound - other.lower_pos_bound)\n', (1398, 1445), True, 'import numpy as np\n'), ((1477, 1530), 'numpy.abs', 'np.abs', (['(self._upper_spd_bound - other.upper_spd_bound)'], {}), '(self._upper_spd_bound - other.upper_spd_bound)\n', (1483, 1530), True, 'import numpy as np\n'), ((1562, 1615), 'numpy.abs', 'np.abs', (['(self._lower_spd_bound - other.lower_spd_bound)'], {}), '(self._lower_spd_bound - other.lower_spd_bound)\n', (1568, 1615), True, 'import numpy as np\n')] |
import numpy as np
from optfolio.nsga2 import non_dominated_fronts
def test_non_dominated_fronts():
points = np.asarray([
[r * np.cos(phi), r * np.sin(phi)]
for r in (np.arange(5) + 1)
for phi in np.linspace(np.pi/2, np.pi, 10)
])
fronts, _ = non_dominated_fronts(points[:,1], points[:,0], np.zeros(len(points), dtype=np.float32))
for front_id, i in enumerate(reversed(range(5))):
expected_ids = np.arange(i * 10, (i+1) * 10)
front_ids = np.argwhere(fronts == front_id).reshape((-1,))
assert np.all(front_ids == expected_ids)
| [
"numpy.sin",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"numpy.argwhere",
"numpy.all"
] | [((450, 481), 'numpy.arange', 'np.arange', (['(i * 10)', '((i + 1) * 10)'], {}), '(i * 10, (i + 1) * 10)\n', (459, 481), True, 'import numpy as np\n'), ((563, 596), 'numpy.all', 'np.all', (['(front_ids == expected_ids)'], {}), '(front_ids == expected_ids)\n', (569, 596), True, 'import numpy as np\n'), ((227, 260), 'numpy.linspace', 'np.linspace', (['(np.pi / 2)', 'np.pi', '(10)'], {}), '(np.pi / 2, np.pi, 10)\n', (238, 260), True, 'import numpy as np\n'), ((500, 531), 'numpy.argwhere', 'np.argwhere', (['(fronts == front_id)'], {}), '(fronts == front_id)\n', (511, 531), True, 'import numpy as np\n'), ((142, 153), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (148, 153), True, 'import numpy as np\n'), ((159, 170), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (165, 170), True, 'import numpy as np\n'), ((190, 202), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (199, 202), True, 'import numpy as np\n')] |
# coding: utf-8
import sys
import numpy
import matplotlib.pyplot
def analyse (filename, outfile=None):
data = numpy.loadtxt(fname=filename, delimiter=',')
# Create a wide figure to hold subplots
fig = matplotlib.pyplot.figure (figsize = (10.0, 3.0))
# create placeholders for plots
subplot1 = fig.add_subplot (1,3,1)
subplot2 = fig.add_subplot (1,3,2)
subplot3 = fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis = 0))
subplot2.set_ylabel('maximum')
subplot2.plot(numpy.max(data, axis = 0))
subplot3.set_ylabel('minimum')
subplot3.plot(numpy.min(data, axis = 0))
# fig.tight_layout()
if outfile is None:
matplotlib.pyplot.show()
else:
matplotlib.pyplot.savefig(outfile)
def detect_problems (filename):
"""Some of our temperature files hace problems, check for these
This function reads a file (filename arguent) and reports on odd looking maxima and minima that add up to zero.
This seems to happen when the sensors break.
The function does not return any data.
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
if numpy.max (data, axis=0)[0] ==0 and numpy.max (data, axis=0)[20] ==20:
print("Suspiciuous looking maxima")
elif numpy.sum(numpy.min(data,axis=0)) == 0:
print("Minima add up to zero")
else:
print ("Data looks OK")
if __name__== "__main__":
print("Running ", sys.argv[0])
print (sys.argv[1])
analyse (sys.argv[1], outfile=sys.argv[2])
detect_problems(sys.argv[1])
| [
"numpy.max",
"numpy.mean",
"numpy.min",
"numpy.loadtxt"
] | [((119, 163), 'numpy.loadtxt', 'numpy.loadtxt', ([], {'fname': 'filename', 'delimiter': '""","""'}), "(fname=filename, delimiter=',')\n", (132, 163), False, 'import numpy\n'), ((1171, 1215), 'numpy.loadtxt', 'numpy.loadtxt', ([], {'fname': 'filename', 'delimiter': '""","""'}), "(fname=filename, delimiter=',')\n", (1184, 1215), False, 'import numpy\n'), ((481, 505), 'numpy.mean', 'numpy.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (491, 505), False, 'import numpy\n'), ((563, 586), 'numpy.max', 'numpy.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (572, 586), False, 'import numpy\n'), ((644, 667), 'numpy.min', 'numpy.min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (653, 667), False, 'import numpy\n'), ((1228, 1251), 'numpy.max', 'numpy.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1237, 1251), False, 'import numpy\n'), ((1264, 1287), 'numpy.max', 'numpy.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1273, 1287), False, 'import numpy\n'), ((1362, 1385), 'numpy.min', 'numpy.min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1371, 1385), False, 'import numpy\n')] |
import numpy as np
import pandas as pd
"""
Parse groups size DataFrame into the initial values required for the model
Params
------
groupsize_df: DataFrame
Includes columns for 'Population_Size', 'Recovery_Rate',
'Initial_Infection_Rate' for each of the groups.
return
-------
gn: array with the group names (index of the input dataframe)
gs: array with the group sizes
y0: tuple with Susceptible, Infection, Recovered at time zero. The length of each
array is equal to the number of groups
Initially no one has recovered an all but the initially infected are susceptible
"""
def build_initial_values(groupsize_df):
gn = groupsize_df.index.values
groups = len(gn) #number of groups
# proportion of group i that contacts group b
gs = groupsize_df['Population_Size'].values
recovery_rates = groupsize_df['Recovery_Rate'].values
# Initial number of infected and recovered individuals for each group
I0, R0 = groupsize_df['Initial_Infection_Rate'].values*gs, np.zeros(groups)
# Everyone else, S0, is susceptible to infection initially.
S0 = gs - I0
y0 = S0, I0, R0
return gn, gs, y0, recovery_rates
"""
Calculate the k value for exponential growth in prison rate infections for the
two prison population sizes.
params
------
group_size: pandas series with sizes of each group
prison_peak_date: Int
number of days until prison infection rate hits its peak
prison_index1: Int
index where the prison population for whites is in group size vector
prison_index2: Int
index where prison population for blacks is in group size vector
prison_peak_rate: Float
the rate that the prison infection rate will end at by the prison_peak_date
return
------
k1: The exponential constant of increase for the first group (Whites)
I_1 = I^(k1 * i) where i is in the days since the model started.
k2: The exponential constant of increase for the second group (Blacks)
"""
def prison_rate_build(
group_size, prison_peak_date, prison_index1, prison_index2, prison_peak_rate):
t1 = group_size[prison_index1]*prison_peak_rate
t2 = group_size[prison_index2]*prison_peak_rate
k_1 = np.log((t1))/prison_peak_date
k_2 = np.log((t2))/prison_peak_date
return k_1, k_2
"""Build model for policy intervention 1
This function is the same as build_model in build_models.py with the exception
of the additional parameter, group_size_p1. This is the group_df_p1 data.frame
from generate_matrix.py that reduces the prison release population a certain
number of days after the SIP_DATE.
"""
def build_model_p1(group_size_data, TIME, SIP_DATE, contact_matrix1, contact_matrix2,
transmission_rate, post_sip_transmission_rate, prison_peak_rate, prison_peak_date, jail_release_shrink,jail_release_date):
Group_Names, Group_Size, initial_sizes, recovery_rates = build_initial_values(group_size_data)
susceptible_rows = []
infected_rows = []
recovered_rows = []
lambda_matrix = contact_matrix1 * transmission_rate
S_t, I_t, R_t = initial_sizes
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
white_prison_i = np.where(Group_Names == 'White_Prison')
black_prison_i = np.where(Group_Names == 'Black_Prison')
k1, k2 = prison_rate_build(
Group_Size, prison_peak_date, white_prison_i, black_prison_i, prison_peak_rate)
# for use in shrinking jail size
jail_release_shrink_by_day = jail_release_shrink/(SIP_DATE - jail_release_date)
orig_prison_pop_white = Group_Size[white_prison_i]
orig_prison_pop_black = Group_Size[black_prison_i]
JAIL_OF_CORRECTIONS = 27296/(27296+1704) # fraction of jail/prison releases that are jail releases;
# comes from Calculations tab, cells C24 and C27
#represents new infections per day
delta_i = [R_t]
days_since_lockdown = 1
for i in range(0, TIME):
if i == SIP_DATE - 1:
lambda_matrix = contact_matrix2 * post_sip_transmission_rate
if i >= SIP_DATE & i <= jail_release_date - 1: #if date after SIP and before/on final jail release date
Group_Size[white_prison_i] = orig_prison_pop_white*(1-(
JAIL_OF_CORRECTIONS*jail_release_shrink_by_day*days_since_lockdown))
Group_Size[black_prison_i] = orig_prison_pop_black*(1-(
JAIL_OF_CORRECTIONS*jail_release_shrink_by_day*days_since_lockdown))
k1, k2 = prison_rate_build(Group_Size, prison_peak_date, white_prison_i,
black_prison_i, prison_peak_rate)
days_since_lockdown += 1
# multiplying k*k contact matrix * k*1 vetor of proportion of group infected
#l is a vector with length k
l = np.squeeze(np.asarray(np.dot(lambda_matrix, I_t/Group_Size)))
#this is the number of new infections
contacts = l * S_t #force of infection * number Susceptible by group
delta_i.append(contacts)
I_14 = R_t[0]
if i >= 14:
I_14 = delta_i[i-14]
dSdt = - contacts
dIdt = contacts - recovery_rates * I_14
dRdt = recovery_rates * I_14
S_t = S_t + dSdt
I_t = I_t + dIdt
R_t = R_t + dRdt
if i <= prison_peak_date:
I_t[white_prison_i] = np.exp(i*k1)
I_t[black_prison_i] = np.exp(i*k2)
S_t[white_prison_i] = Group_Size[white_prison_i] - np.exp(i*k1)
S_t[black_prison_i] = Group_Size[black_prison_i] - np.exp(i*k2)
# Should this be S_t?
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
s = pd.DataFrame(susceptible_rows, columns=Group_Names)
i = pd.DataFrame(infected_rows, columns=Group_Names)
r = pd.DataFrame(recovered_rows, columns=Group_Names)
s['Day'] = s.index
i['Day'] = i.index
r['Day'] = r.index
return s,i,r
"""Build model for policy intervention 2
This function is the same as build_model in build_models.py with the exception
of the additional parameter that it requires an updated infection rate for prison churn.
"""
def build_model_p2(group_size_data, TIME, contact_matrix1, contact_matrix2, params, policy2_params):
Group_Names, Group_Size, initial_sizes, recovery_rates = build_initial_values(group_size_data)
susceptible_rows = []
infected_rows = []
recovered_rows = []
lambda_matrix = contact_matrix1 * params.transmission_rate
S_t, I_t, R_t = initial_sizes
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
white_prison_i = np.where(Group_Names == 'White_Prison')
black_prison_i = np.where(Group_Names == 'Black_Prison')
k1, k2 = prison_rate_build(
Group_Size, params.prison_peak_date, white_prison_i, black_prison_i, params.prison_infection_rate)
#represents new infections per day
delta_i = [R_t]
for i in range(0, TIME):
if i == params.sip_start_date - 1:
lambda_matrix = contact_matrix2 * params.post_sip_transmission_rate
# multiplying k*k contact matrix * k*1 vetor of proportion of group infected
#l is a vector with length k
l = np.squeeze(np.asarray(np.dot(lambda_matrix, I_t/Group_Size)))
#this is the number of new infections
contacts = l * S_t #force of infection * number Susceptible by group
delta_i.append(contacts)
I_14 = R_t[0]
if i >= 14:
I_14 = delta_i[i-14]
dSdt = - contacts
dIdt = contacts - recovery_rates * I_14
dRdt = recovery_rates * I_14
S_t = S_t + dSdt
I_t = I_t + dIdt
R_t = R_t + dRdt
if i <= params.prison_peak_date:
if i <= params.sip_start_date - 1:
I_t[white_prison_i] = np.exp(i*k1)
I_t[black_prison_i] = np.exp(i*k2)
S_t[white_prison_i] = Group_Size[white_prison_i] - np.exp(i*k1)
S_t[black_prison_i] = Group_Size[black_prison_i] - np.exp(i*k2)
else:
print(f'{i}: now reducing prison rates')
I_t[white_prison_i] = (policy2_params.prison_sip_i_white + policy2_params.jail_sip_i_white)*Group_Size[white_prison_i]
I_t[black_prison_i] = (policy2_params.prison_sip_i_black + policy2_params.jail_sip_i_black)*Group_Size[black_prison_i]
S_t[white_prison_i] = Group_Size[white_prison_i] - (policy2_params.prison_sip_i_white + policy2_params.jail_sip_i_white)*Group_Size[white_prison_i]
S_t[black_prison_i] = Group_Size[black_prison_i] - (policy2_params.prison_sip_i_black + policy2_params.jail_sip_i_black)*Group_Size[black_prison_i]
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
s = pd.DataFrame(susceptible_rows, columns=Group_Names)
i = pd.DataFrame(infected_rows, columns=Group_Names)
r = pd.DataFrame(recovered_rows, columns=Group_Names)
s['Day'] = s.index
i['Day'] = i.index
r['Day'] = r.index
return s,i,r
"""
Build a model for new infections each day given initial group size, contacts rates,
transmision rates etc for k groups. Specifically, output a DataFrame with the
daily number of infections, recovered, susceptible
params
--------
group_size_data: array of length k
population size for each population sub-group with names for each sub-group
TIME: Int
total number of days to model
SIP_DATE: Int
number of days until shelter in place order -- will use contact_matrix1 for
contact rates until this number is reached
contact_matrix1: k*k matrix
expected number of contacts that each group makes with every other group
BEFORE the shelter in place order
contact_matrix2: k*k matrix
expected number of contacts that each group makes with every other group
AFTER the shelter in place order
transmission_rate: float
scalar, represents the likelihood of being infected by each contact
prison_peak_rate: Float
the rate that the prison infection rate will end at by the prison_peak_date
prison_peak_date: Int
number of days until prison infection rate hits its peak
return
------
susceptible: k*TIME DataFrame where each column is a sub-group, and each row is
the number of susceptible people on that day for each group
infected: DataFrame with daily number of active infections for each sub-group and
each day
recovered_rows: DataFrame with number of recovered inidividuals in each sub-group
on each day.
Note: the sum of cells in each corresponding cell in the three DataFrames is the
number of total people in that group.
"""
def build_model(group_size_data, TIME, contact_matrix1, contact_matrix2,
params):
Group_Names, Group_Size, initial_sizes, recovery_rates = build_initial_values(group_size_data)
susceptible_rows = []
infected_rows = []
recovered_rows = []
lambda_matrix = contact_matrix1 * params.transmission_rate
S_t, I_t, R_t = initial_sizes
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
white_prison_i = np.where(Group_Names == 'White_Prison')
black_prison_i = np.where(Group_Names == 'Black_Prison')
k1, k2 = prison_rate_build(
Group_Size, params.prison_peak_date,
white_prison_i, black_prison_i,params.prison_infection_rate)
#represents new infections per day
delta_i = [R_t]
for i in range(0, TIME):
if i == params.sip_start_date - 1:
lambda_matrix = contact_matrix2 * params.post_sip_transmission_rate
# multiplying k*k contact matrix * k*1 vetor of proportion of group infected
#l is a vector with length k
l = np.squeeze(np.asarray(np.dot(lambda_matrix, I_t/Group_Size)))
#this is the number of new infections
contacts = l * S_t #force of infection * number Susceptible by group
delta_i.append(contacts)
I_14 = R_t[0]
if i >= 14:
I_14 = delta_i[i-14]
dSdt = - contacts
dIdt = contacts - recovery_rates * I_14
dRdt = recovery_rates * I_14
S_t = S_t + dSdt
I_t = I_t + dIdt
R_t = R_t + dRdt
if i <= params.prison_peak_date:
I_t[white_prison_i] = np.exp(i*k1)
I_t[black_prison_i] = np.exp(i*k2)
S_t[white_prison_i] = Group_Size[white_prison_i] - np.exp(i*k1)
S_t[black_prison_i] = Group_Size[black_prison_i] - np.exp(i*k2)
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
s = pd.DataFrame(susceptible_rows, columns=Group_Names)
i = pd.DataFrame(infected_rows, columns=Group_Names)
r = pd.DataFrame(recovered_rows, columns=Group_Names)
s['Day'] = s.index
i['Day'] = i.index
r['Day'] = r.index
return s,i,r
| [
"pandas.DataFrame",
"numpy.log",
"numpy.zeros",
"numpy.where",
"numpy.exp",
"numpy.dot"
] | [((3216, 3255), 'numpy.where', 'np.where', (["(Group_Names == 'White_Prison')"], {}), "(Group_Names == 'White_Prison')\n", (3224, 3255), True, 'import numpy as np\n'), ((3277, 3316), 'numpy.where', 'np.where', (["(Group_Names == 'Black_Prison')"], {}), "(Group_Names == 'Black_Prison')\n", (3285, 3316), True, 'import numpy as np\n'), ((5793, 5844), 'pandas.DataFrame', 'pd.DataFrame', (['susceptible_rows'], {'columns': 'Group_Names'}), '(susceptible_rows, columns=Group_Names)\n', (5805, 5844), True, 'import pandas as pd\n'), ((5854, 5902), 'pandas.DataFrame', 'pd.DataFrame', (['infected_rows'], {'columns': 'Group_Names'}), '(infected_rows, columns=Group_Names)\n', (5866, 5902), True, 'import pandas as pd\n'), ((5911, 5960), 'pandas.DataFrame', 'pd.DataFrame', (['recovered_rows'], {'columns': 'Group_Names'}), '(recovered_rows, columns=Group_Names)\n', (5923, 5960), True, 'import pandas as pd\n'), ((6777, 6816), 'numpy.where', 'np.where', (["(Group_Names == 'White_Prison')"], {}), "(Group_Names == 'White_Prison')\n", (6785, 6816), True, 'import numpy as np\n'), ((6838, 6877), 'numpy.where', 'np.where', (["(Group_Names == 'Black_Prison')"], {}), "(Group_Names == 'Black_Prison')\n", (6846, 6877), True, 'import numpy as np\n'), ((9068, 9119), 'pandas.DataFrame', 'pd.DataFrame', (['susceptible_rows'], {'columns': 'Group_Names'}), '(susceptible_rows, columns=Group_Names)\n', (9080, 9119), True, 'import pandas as pd\n'), ((9129, 9177), 'pandas.DataFrame', 'pd.DataFrame', (['infected_rows'], {'columns': 'Group_Names'}), '(infected_rows, columns=Group_Names)\n', (9141, 9177), True, 'import pandas as pd\n'), ((9186, 9235), 'pandas.DataFrame', 'pd.DataFrame', (['recovered_rows'], {'columns': 'Group_Names'}), '(recovered_rows, columns=Group_Names)\n', (9198, 9235), True, 'import pandas as pd\n'), ((11449, 11488), 'numpy.where', 'np.where', (["(Group_Names == 'White_Prison')"], {}), "(Group_Names == 'White_Prison')\n", (11457, 11488), True, 'import numpy as np\n'), ((11510, 11549), 'numpy.where', 'np.where', (["(Group_Names == 'Black_Prison')"], {}), "(Group_Names == 'Black_Prison')\n", (11518, 11549), True, 'import numpy as np\n'), ((13009, 13060), 'pandas.DataFrame', 'pd.DataFrame', (['susceptible_rows'], {'columns': 'Group_Names'}), '(susceptible_rows, columns=Group_Names)\n', (13021, 13060), True, 'import pandas as pd\n'), ((13069, 13117), 'pandas.DataFrame', 'pd.DataFrame', (['infected_rows'], {'columns': 'Group_Names'}), '(infected_rows, columns=Group_Names)\n', (13081, 13117), True, 'import pandas as pd\n'), ((13126, 13175), 'pandas.DataFrame', 'pd.DataFrame', (['recovered_rows'], {'columns': 'Group_Names'}), '(recovered_rows, columns=Group_Names)\n', (13138, 13175), True, 'import pandas as pd\n'), ((1018, 1034), 'numpy.zeros', 'np.zeros', (['groups'], {}), '(groups)\n', (1026, 1034), True, 'import numpy as np\n'), ((2187, 2197), 'numpy.log', 'np.log', (['t1'], {}), '(t1)\n', (2193, 2197), True, 'import numpy as np\n'), ((2227, 2237), 'numpy.log', 'np.log', (['t2'], {}), '(t2)\n', (2233, 2237), True, 'import numpy as np\n'), ((5424, 5438), 'numpy.exp', 'np.exp', (['(i * k1)'], {}), '(i * k1)\n', (5430, 5438), True, 'import numpy as np\n'), ((5471, 5485), 'numpy.exp', 'np.exp', (['(i * k2)'], {}), '(i * k2)\n', (5477, 5485), True, 'import numpy as np\n'), ((12672, 12686), 'numpy.exp', 'np.exp', (['(i * k1)'], {}), '(i * k1)\n', (12678, 12686), True, 'import numpy as np\n'), ((12719, 12733), 'numpy.exp', 'np.exp', (['(i * k2)'], {}), '(i * k2)\n', (12725, 12733), True, 'import numpy as np\n'), ((4850, 4889), 'numpy.dot', 'np.dot', (['lambda_matrix', '(I_t / Group_Size)'], {}), '(lambda_matrix, I_t / Group_Size)\n', (4856, 4889), True, 'import numpy as np\n'), ((5547, 5561), 'numpy.exp', 'np.exp', (['(i * k1)'], {}), '(i * k1)\n', (5553, 5561), True, 'import numpy as np\n'), ((5623, 5637), 'numpy.exp', 'np.exp', (['(i * k2)'], {}), '(i * k2)\n', (5629, 5637), True, 'import numpy as np\n'), ((7416, 7455), 'numpy.dot', 'np.dot', (['lambda_matrix', '(I_t / Group_Size)'], {}), '(lambda_matrix, I_t / Group_Size)\n', (7422, 7455), True, 'import numpy as np\n'), ((8048, 8062), 'numpy.exp', 'np.exp', (['(i * k1)'], {}), '(i * k1)\n', (8054, 8062), True, 'import numpy as np\n'), ((8099, 8113), 'numpy.exp', 'np.exp', (['(i * k2)'], {}), '(i * k2)\n', (8105, 8113), True, 'import numpy as np\n'), ((12091, 12130), 'numpy.dot', 'np.dot', (['lambda_matrix', '(I_t / Group_Size)'], {}), '(lambda_matrix, I_t / Group_Size)\n', (12097, 12130), True, 'import numpy as np\n'), ((12795, 12809), 'numpy.exp', 'np.exp', (['(i * k1)'], {}), '(i * k1)\n', (12801, 12809), True, 'import numpy as np\n'), ((12871, 12885), 'numpy.exp', 'np.exp', (['(i * k2)'], {}), '(i * k2)\n', (12877, 12885), True, 'import numpy as np\n'), ((8179, 8193), 'numpy.exp', 'np.exp', (['(i * k1)'], {}), '(i * k1)\n', (8185, 8193), True, 'import numpy as np\n'), ((8259, 8273), 'numpy.exp', 'np.exp', (['(i * k2)'], {}), '(i * k2)\n', (8265, 8273), True, 'import numpy as np\n')] |
import cv2
import glob
import os
import numpy as np
import argparse
from collections import defaultdict
import pickle
from utils_for_mtmct import DataPacker
tracklets_info = []
tracklet_global_id = 1
root = '/data3/shensj/datasets/my_files/gps/GPSReID/MOT/GPSReID/crop_images/yolo_nms03_all_dataset_2_ft1/'
for camid in range(6):
tracklet_paths = os.path.join(root,'{:02d}'.format(camid+1))
tracklet_ids = os.listdir(tracklet_paths)
tracklet_ids.sort()
for tracklet_id in tracklet_ids:
img_paths = glob.glob(os.path.join(tracklet_paths, tracklet_id, '*.jpg'))
if len(img_paths) <= 2:
continue
img_paths.sort()
start_frame = int(img_paths[0].split('/')[-1].split('_')[2][1:])
end_frame = int(img_paths[-1].split('/')[-1].split('_')[2][1:])
tracklets_info.append([camid+1, tracklet_global_id, start_frame, end_frame])
tracklet_global_id += 1
tracklets_info = np.array(tracklets_info)
mask_for_timestamp = np.zeros((tracklet_global_id-1, tracklet_global_id-1), dtype=np.int64)
for tracklet_id in range(1, tracklet_global_id):
tracklet_info = tracklets_info[tracklet_id-1]
camid = tracklet_info[0]
start_frame = tracklet_info[2]
end_frame = tracklet_info[3]
tracklets_with_same_camid = tracklets_info[tracklets_info[:, 0] == camid]
for tracklet_with_same_camid in tracklets_with_same_camid:
if tracklet_with_same_camid[2] <= end_frame and tracklet_with_same_camid[3] >= start_frame:
mask_for_timestamp[tracklet_id-1, tracklet_with_same_camid[1]-1] = 1
DataPacker.dump(mask_for_timestamp, os.path.join(root, 'mask_for_timestamp.pkl'))
| [
"numpy.zeros",
"numpy.array",
"os.listdir",
"os.path.join"
] | [((969, 993), 'numpy.array', 'np.array', (['tracklets_info'], {}), '(tracklets_info)\n', (977, 993), True, 'import numpy as np\n'), ((1016, 1090), 'numpy.zeros', 'np.zeros', (['(tracklet_global_id - 1, tracklet_global_id - 1)'], {'dtype': 'np.int64'}), '((tracklet_global_id - 1, tracklet_global_id - 1), dtype=np.int64)\n', (1024, 1090), True, 'import numpy as np\n'), ((431, 457), 'os.listdir', 'os.listdir', (['tracklet_paths'], {}), '(tracklet_paths)\n', (441, 457), False, 'import os\n'), ((1653, 1697), 'os.path.join', 'os.path.join', (['root', '"""mask_for_timestamp.pkl"""'], {}), "(root, 'mask_for_timestamp.pkl')\n", (1665, 1697), False, 'import os\n'), ((552, 602), 'os.path.join', 'os.path.join', (['tracklet_paths', 'tracklet_id', '"""*.jpg"""'], {}), "(tracklet_paths, tracklet_id, '*.jpg')\n", (564, 602), False, 'import os\n')] |
# coding:utf-8
import torch
import numpy as np
class Vocab:
def __init__(self, embed, word2id):
self.embed = embed
self.word2id = word2id
self.id2word = {v: k for k, v in word2id.items()}
assert len(self.word2id) == len(self.id2word)
self.PAD_IDX = 0
self.UNK_IDX = 1
self.PAD_TOKEN = 'PAD_TOKEN'
self.UNK_TOKEN = 'UNK_TOKEN'
def __len__(self):
return len(self.word2id)
def i2w(self, idx):
return self.id2word[idx]
def w2i(self, w):
if w in self.word2id:
return self.word2id[w]
else:
return self.UNK_IDX
# Return input and target tensors for training and blog content info for evaluation.
def make_tensors(self, blog, args):
summary = ' '.join(blog['summary'])
doc_targets = []
for doc in blog['documents']:
doc_targets.append(doc['doc_label'])
sents = []
sents_target = []
sents_content = []
doc_lens = []
for doc in blog['documents']:
sents.extend(doc['text'])
sents_target.extend(doc['sent_label'])
sents_content.extend(doc['text'])
doc_lens.append(len(doc['text']))
# 将每个句子的单词数固定到sent_trunc,超过截断,不足补全
sent_trunc = args.sent_trunc
for i, sent in enumerate(sents):
sent = sent.split()
cur_sent_len = len(sent)
if cur_sent_len > sent_trunc:
sent = sent[:sent_trunc]
else:
sent += (sent_trunc - cur_sent_len) * [self.PAD_TOKEN]
sent = [self.w2i(_) for _ in sent]
sents[i] = sent
sents = torch.LongTensor(sents)
sents_target = torch.FloatTensor(sents_target)
doc_targets = torch.FloatTensor(doc_targets)
events = [] # 存储所有events,即SRL四元组
event_targets = [] # 存储各events得分,该得分和摘要events计算而得
event_tfs = [] # 存储各events TF值
event_prs = [] # 存储各events PageRank值
event_lens = [] # 存储每个doc包含的events数目
event_sent_lens = [] # 存储每个句子包含的events数目
for doc in blog['documents']:
cur_len = 0
cur_pr = []
for sent_events in doc['events']:
cur_len += len(sent_events)
event_sent_lens.append(len(sent_events))
for event in sent_events:
events.append(event['tuple'])
event_targets.append(event['score'])
event_tfs.append(event['tf'])
cur_pr.append(event['pr'])
norm = np.array(cur_pr).sum()
cur_pr = [t * norm for t in cur_pr]
event_prs.extend(cur_pr)
event_lens.append(cur_len)
for i, event in enumerate(events):
event = event.replace('-', self.PAD_TOKEN)
event = event.strip().split('\t')
event = [self.w2i(_) for _ in event]
events[i] = event
events = torch.LongTensor(events)
event_targets = torch.FloatTensor(event_targets)
event_tfs = torch.FloatTensor(event_tfs)
event_prs = torch.FloatTensor(event_prs)
# event_sim_matrix = torch.FloatTensor(blog["sim_matrix"])
return sents, sents_target, doc_lens, doc_targets, events, event_targets, event_tfs, event_prs, event_lens, event_sent_lens, sents_content, summary
| [
"numpy.array",
"torch.FloatTensor",
"torch.LongTensor"
] | [((1699, 1722), 'torch.LongTensor', 'torch.LongTensor', (['sents'], {}), '(sents)\n', (1715, 1722), False, 'import torch\n'), ((1746, 1777), 'torch.FloatTensor', 'torch.FloatTensor', (['sents_target'], {}), '(sents_target)\n', (1763, 1777), False, 'import torch\n'), ((1800, 1830), 'torch.FloatTensor', 'torch.FloatTensor', (['doc_targets'], {}), '(doc_targets)\n', (1817, 1830), False, 'import torch\n'), ((3002, 3026), 'torch.LongTensor', 'torch.LongTensor', (['events'], {}), '(events)\n', (3018, 3026), False, 'import torch\n'), ((3051, 3083), 'torch.FloatTensor', 'torch.FloatTensor', (['event_targets'], {}), '(event_targets)\n', (3068, 3083), False, 'import torch\n'), ((3104, 3132), 'torch.FloatTensor', 'torch.FloatTensor', (['event_tfs'], {}), '(event_tfs)\n', (3121, 3132), False, 'import torch\n'), ((3153, 3181), 'torch.FloatTensor', 'torch.FloatTensor', (['event_prs'], {}), '(event_prs)\n', (3170, 3181), False, 'import torch\n'), ((2614, 2630), 'numpy.array', 'np.array', (['cur_pr'], {}), '(cur_pr)\n', (2622, 2630), True, 'import numpy as np\n')] |
# Copyright 2017 Regents of the University of Colorado. All Rights Reserved.
# Released under the MIT license.
# This software was developed at the University of Colorado's Laboratory for Atmospheric and Space Physics.
# Verify current version before use at: https://github.com/MAVENSDC/Pydivide
import re
import os
from . import download_files_utilities as utils
from .file_regex import kp_regex, l2_regex
import numpy as np
import collections
def param_list(kp):
'''
Return a listing of all parameters present in the given
insitu data dictionary/structure.
Input:
kp: insitu kp data structure/dictionary read from file(s)
Output:
ParamList: a list of all contained items and their indices.
'''
import pandas as pd
index = 1
param_list_ = []
for base_tag in kp.keys():
if isinstance(kp[base_tag], pd.DataFrame):
for obs_tag in kp[base_tag].columns:
param_list_.append("#%3d %s.%s" % (index, base_tag, obs_tag))
index += 1
elif isinstance(kp[base_tag], pd.Series):
param_list_.append("#%3d %s" % (index, base_tag))
index += 1
elif isinstance(kp[base_tag], pd.Index):
param_list_.append("#%3d %s" % (index, base_tag))
index += 1
else:
print('*****WARNING*****')
print('Returning INCOMPLETE Parameter List')
print('Base tag neither DataFrame nor Series')
print('Plese check read_insitu_file definition')
return param_list_
# ---------------------------------------------------------------------
def param_range(kp, iuvs=None):
'''
Print the range of times and orbits for the provided insitu data.
If iuvs data are also provided, return only orbit numbers for IUVS data.
Caveats:
At present, not configured to handle (real) IUVS data.
Current configuration of procedure assumes IUVS has identical
time information as in-situ.
Input:
kp: insitu kp data structure/dictionary
iuvs: IUVS kp data strucure/dictionary
Output:
None: prints information to screen
'''
# First, the case where insitu data are provided
print("The loaded insitu KP data set contains data between")
print(" %s and %s" % (np.array(kp['TimeString'])[0], np.array(kp['TimeString'])[-1]))
print("Equivalently, this corresponds to orbits")
print(" %6d and %6d." % (np.array(kp['Orbit'])[0], np.array(kp['Orbit'])[-1]))
# Next, the case where IUVS data are provided
iuvs_data = False
iuvs_tags = ['CORONA_LO_HIGH', 'CORONA_LO_LIMB', 'CORONA_LO_DISK',
'CORONA_E_HIGH', 'CORONA_E_LIMB', 'CORONA_E_DISK',
'APOAPSE', 'PERIAPSE', 'STELLAR_OCC']
if kp.keys() in iuvs_tags:
print("The loaded IUVS KP data set contains data between orbits")
print(" %6d and %6d." % (np.array(kp['Orbit'])[0], np.array(kp['Orbit'])[-1]))
# Finally, the case where both insitu and IUVS are provided
if iuvs is not None:
print("The loaded IUVS KP data set contains data between orbits")
print(" %6d and %6d." % (np.array(iuvs['Orbit'])[0], np.array(iuvs['Orbit'])[-1]))
insitu_min, insitu_max = (np.nanmin([kp['Orbit']]), np.nanmax([kp['Orbit']]))
if np.nanmax([iuvs['Orbit']]) < insitu_min or np.nanmin([iuvs['Orbit']]) > insitu_max:
print("*** WARNING ***")
print("There is NO overlap between the supplied insitu and IUVS")
print(" data structures. We cannot guarantee your safety ")
print(" should you attempt to display these IUVS data against")
print(" these insitu-supplied emphemeris data.")
return # No information to return
# --------------------------------------------------------------------------
def range_select(kp, time=None, parameter=None, maximum=None, minimum=None):
'''
Returns a subset of the input data based on the provided time
and/or parameter criteria. If neither Time nor Parameter filter
information is provided, then no subselection of data will occur.
Any parameter used as a filtering criterion must be paired with
either a maximum and/or a minimum value. Open ended bounds must
be indicated with either a value of 'None' or an empty string ('').
Input:
kp: insitu kp data structure/dictionary read from file(s)
time: two-element time range must be either strings of format
'yyyy-mm-ddThh:mm:ss' or integers (orbit numbers)
parameter: Element of provided data structure/dictionary by
which to filter data. Parameter(s) must be either integer
type (search by index) or string type (search by instrument
name and observation type). If multiple Parameters are used
to filter the data, they must be provided as a list (mixing
data types within a list is permitted).
maximum: maximum value of Parameter on which to filter. A value of
None or '' will leave the Parameter filter unbounded above.
The number of elements of Maximum *MUST* equal the number of
elements of Parameter.
minimum: minimum value of Parameter on which to filter. A value of
None or '' will leave the Parameter filter unbounded below.
The number of elements of Minimum *MUST* equal the number of
elements of Parameter.
Output: a dictionary/structure containing the same elements as the provided
one, but filtered according to the Time and Parameter options.
ToDo: compartmentalize the filtering and/or argument checks.
'''
from datetime import datetime
# Initialize the filter_list
filter_list = []
# First, check the arguments
if time is None and parameter is None:
insufficient_input_range_select()
print('Neither Time nor Parameter provided')
return kp
elif time is None:
# Then only subset based on parameters
# Need to check whether one or several Parameters given
inst = []
obs = []
if isinstance(parameter, int) or isinstance(parameter, str):
# First, verify that at least one bound exists
if minimum is None and maximum is None:
insufficient_input_range_select()
print('No bounds set for parameter: %s' % parameter)
return kp
elif minimum is None:
# Range only bounded above
minimum = -np.Infinity
elif maximum is None:
# range only bounded below
maximum = np.Infinity
else:
# Range bounded on both ends
pass
a, b = get_inst_obs_labels(kp, parameter)
inst.append(a)
obs.append(b)
nparam = 1 # necc?
elif type(parameter) is list:
nparam = len(parameter)
for param in parameter:
a, b = get_inst_obs_labels(kp, param)
inst.append(a)
obs.append(b)
else:
print('*****ERROR*****')
print('Cannot identify given parameter: %s' % parameter)
print('Suggest using param_list(kp) to identify Parameter')
print('by index or by name')
print('Returning complete original data dictionary')
return kp
# Should I move this below the Time conditional and move
# Baselining of Filter List to above time
else:
# Time has been provided as a filtering agent
# Determine whether Time is provided as strings or orbits
if len(time) != 2:
if parameter is not None:
print('*****WARNING*****')
print('Time must be provided as a two-element list')
print('of either strings (yyyy-mm-dd hh:mm:ss) ')
print('or orbits. Since a Parameter *was* provided,')
print('I will filter on that, but ignore the time input.')
else:
# Cannot proceed with filtering
insufficient_input_range_select()
print('Time malformed must be either a string of format')
print('yyyy-mm-ddThh:mm:ss or integer orbit)')
print('and no Parameter criterion given')
else:
# We have a two-element Time list: parse it
if not isinstance(type(time[0]), type(time[1])):
if parameter is not None:
print('*****WARNING*****')
print('Both elements of time must be same type')
print('Only strings of format yyyy-mm-dd hh:mm:ss')
print('or integers (orbit numbers) are allowed.')
print('Ignoring time inputs; will filter ONLY')
print('on Parameter inputs.')
else:
print('*****ERROR*****')
print('Both elements of Time must be same type')
print('Only Strings of format yyyy-mm-dd hh:mm:ss')
print('or integers (orbit numbers) are allowed.')
print('Returning original unchanged data dictionary')
return kp
elif type(time[0]) is int:
# Filter based on orbit number
min_t = min(time)
max_t = max(time)
filter_list.append(kp['Orbit'] >= min_t)
filter_list.append(kp['Orbit'] <= max_t)
elif isinstance(time[0], str):
# Filter acc to string dat, need to parse it
time_dt = [datetime.strptime(i, '%Y-%m-%d %H:%M:%S') for i in time]
min_dt = min(time_dt)
max_dt = max(time_dt)
kp_dt = [datetime.strptime(i, '%Y-%m-%dT%H:%M:%S')
for i in kp['TimeString']]
delta_tmin = np.array([(i - min_dt).total_seconds() for i in kp_dt])
delta_tmax = np.array([(i - max_dt).total_seconds() for i in kp_dt])
filter_list.append(delta_tmin >= 0)
filter_list.append(delta_tmax <= 0)
else:
# Time provided as other than string or Integer
if parameter is not None:
print('*****WARNING*****')
print('Both elements of time must be same type')
print('Only strings of format yyyy-mm-dd hh:mm:ss')
print('or integers (orbit numbers) are allowed.')
print('Ignoring time inputs; will filter ONLY')
print('on Parameter inputs.')
else:
print('*****ERROR*****')
print('Both elements of Time must be same type')
print('Only Strings of format yyyy-mm-dd hh:mm:ss')
print('or integers (orbit numbers) are allowed.')
print('Returning original unchanged data dictionary')
return kp
# Now, we apply the Parameter selection
inst = []
obs = []
if isinstance(parameter, int) or isinstance(parameter, str):
# Then we have a single Parameter to filter on
# Verify that bounds info exists
if minimum is None and maximum is None:
insufficient_input_range_select()
print('No bounds set for parameter %s' % parameter)
print('Applying only Time filtering')
parameter = None
elif minimum is None:
minimum = -np.Infinity # Unbounded below
elif maximum is None:
maximum = np.Infinity # Unbounded above
else:
pass # Range fully bounded
a, b = get_inst_obs_labels(kp, parameter)
inst.append(a)
obs.append(b)
nparam = 1 # necessary?
elif type(parameter) is list:
if len(parameter) != len(minimum) or len(parameter) != len(maximum):
print('*****ERROR*****')
print('---range_select---')
print('Number of minima and maxima provided')
print('MUST match number of Parameters provided')
print('You provided %4d Parameters' % len(parameter))
print(' %4d minima' % len(minimum))
print(' and %4d maxima' % len(maximum))
print('Filtering only on Time')
parameter = None
else:
nparam = len(parameter)
for param in parameter:
a, b = get_inst_obs_labels(kp, param)
inst.append(a)
obs.append(b)
# Now, apply the filters
if parameter is not None:
inst_obs_minmax = list(zip(inst, obs, minimum, maximum))
for inst, obs, min_obs, max_obs in inst_obs_minmax:
filter_list.append(kp[inst][obs] >= min_obs)
filter_list.append(kp[inst][obs] <= max_obs)
# Filter list built, apply to data
filter = np.all(filter_list, axis=0)
new = {}
for i in kp:
temp = kp[i]
new.update({i: temp[filter]})
return new
# --------------------------------------------------------------------------
def insufficient_input_range_select():
'''
This error message is called if user calls range_select with
inputs that result in neither a valid Time range nor a valid
Parameter range capable of being determined
ToDo: Is there a way to hide this from the help feature?
'''
print('*****ERROR*****')
print('Either a time criterion with two values.')
print(' or a parameter name with maximum and/or')
print(' minimum values must be provided.')
print('Returning the complete original data dictionary')
# --------------------------------------------------------------------------
def get_inst_obs_labels(kp, name):
'''
Given parameter input in either string or integer format,
identify the instrument name and observation type for use
in accessing the relevant part of the data structure
E.g.: 'LPW.EWAVE_LOW_FREQ' would be returned as
['LPW', 'EWAVE_LOW_FREQ']
Input:
kp: insitu kp data structure/dictionary read from file(s)
name: string identifying a parameter.
(Indices must be converted to inst.obs strings before
calling this routine)
Output:
inst (1st arg): instrument identifier
obs (2nd arg): observation type identifier
'''
# Need to ensure name is a string at this stage
name = ('%s' % name)
# Now, split at the dot (if it exists)
tags = name.split('.')
# And consider the various possibilities...
if len(tags) == 2:
return tags
elif len(tags) == 1:
try:
int(tags[0])
return (find_param_from_index(kp, tags[0])).split('.')
except:
print('*****ERROR*****')
print('%s is an invalid parameter' % name)
print('If only one value is provided, it must be an integer')
return
else:
print('*****ERROR*****')
print('%s is not a valid parameter' % name)
print('because it has %1d elements' % len(tags))
print('Only 1 integer or string of form "a.b" are allowed.')
print('Please use .param_list attribute to find valid parameters')
return
def find_param_from_index(kp, index):
'''
Given an integer index, find the name of the parameter
Input:
kp: insitu kp data structure/dictionary read from file(s)
index: the index of the desired parameter (integer type)
Output:
A string of form <instrument>.<observation>
(e.g., LPW.EWAVE_LOW_FREQ)
'''
index = '#%3d' % int(index)
plist = param_list(kp)
found = False
for i in plist:
if re.search(index, i):
return i[5:] # clip the '#123 ' string
if not found:
print('*****ERROR*****')
print('%s not a valid index.' % index)
print('Use param_list to list options')
return
def remove_inst_tag(df):
'''
Remove the leading part of the column name that includes the instrument
identifier for use in creating the parameter names for the toolkit.
Input:
A DataFrame produced from the insitu KP data
Output:
A new set of column names
'''
newcol = []
for i in df.columns:
if len(i.split('.')) >= 2:
j = i.split('.')
newcol.append('.'.join(j[1:]))
return newcol
def get_latest_files_from_date_range(date1, date2):
from datetime import timedelta
mvn_root_data_dir = utils.get_root_data_dir()
maven_data_dir = os.path.join(mvn_root_data_dir, 'maven', 'data', 'sci', 'kp', 'insitu')
# Each file starts at midnight, so lets cut off the hours and just pay attention to the days
date1 = date1.replace(hour=0, minute=0, second=0)
date2 = date2.replace(hour=0, minute=0, second=0) + timedelta(days=1)
time_spanned = date2 - date1
num_days = time_spanned.days
filenames = []
for i in range(num_days):
current_date = date1 + timedelta(days=i)
year = str(current_date.year)
month = str('%02d' % current_date.month)
day = str('%02d' % current_date.day)
full_path = os.path.join(maven_data_dir, year, month)
if os.path.exists(full_path):
# Grab only the most recent version/revision of regular and crustal insitu files for each
# day
insitu = {}
c_insitu = {}
for f in os.listdir(full_path):
# print(f)
if kp_regex.match(f).group('day') == day and not kp_regex.match(f).group('description'):
v = kp_regex.match(f).group('version')
r = kp_regex.match(f).group('revision')
insitu[f] = [v, r]
elif kp_regex.match(f).group('day') == day and kp_regex.match(f).group('description') == '_crustal':
v = kp_regex.match(f).group('version')
r = kp_regex.match(f).group('revision')
c_insitu[f] = [v, r]
if insitu:
# Get max version
insitu_file = max(insitu.keys(), key=(lambda k: insitu[k][0]))
max_v = re.search('v\d{2}', insitu_file).group(0)
# Get max revision
max_r = max([re.search('r\d{2}', k).group(0) for k in insitu if max_v in k])
# Get most recent insitu file (based on max version, and then max revision values)
most_recent_insitu = [f for f in insitu.keys() if max_r in f and max_v in f]
filenames.append(os.path.join(full_path, most_recent_insitu[0]))
if c_insitu:
# Get max version
c_insitu_file = max(c_insitu.keys(), key=(lambda k: c_insitu[k][0]))
c_max_v = re.search('v\d{2}', c_insitu_file).group(0)
# Get max revision
c_max_r = max([re.search('r\d{2}', k).group(0) for k in c_insitu if c_max_v in k])
# Get most recent insitu file (based on max version, and then max revision values)
most_recent_c_insitu = [f for f in c_insitu.keys() if c_max_r in f and c_max_v in f]
filenames.append(os.path.join(full_path, most_recent_c_insitu[0]))
filenames = sorted(filenames)
return filenames
def get_latest_iuvs_files_from_date_range(date1, date2):
from datetime import timedelta
mvn_root_data_dir = utils.get_root_data_dir()
maven_data_dir = os.path.join(mvn_root_data_dir, 'maven', 'data', 'sci', 'kp', 'iuvs')
# Each file starts at midnight, so lets cut off the hours and just pay attention to the days
date1 = date1.replace(hour=0, minute=0, second=0)
date2 = date2.replace(day=date2.day, hour=0, minute=0, second=0) + timedelta(days=1)
time_spanned = date2 - date1
num_days = time_spanned.days
files_to_return = []
for i in range(num_days):
current_date = date1 + timedelta(days=i)
year = str(current_date.year)
month = str('%02d' % current_date.month)
day = str('%02d' % current_date.day)
full_path = os.path.join(maven_data_dir, year, month)
if os.path.exists(full_path):
basenames = []
# Obtain a list of all the basenames for the day
for f in os.listdir(full_path):
if kp_regex.match(f).group('day') == day:
description = kp_regex.match(f).group('description')
year = kp_regex.match(f).group('year')
month = kp_regex.match(f).group('month')
day = kp_regex.match(f).group('day')
time = kp_regex.match(f).group('time')
seq = ('mvn', 'kp', 'iuvs' + description, year + month + day + time)
basenames.append('_'.join(seq))
basenames = list(set(basenames))
for bn in basenames:
version = 0
revision = 0
for f in os.listdir(full_path):
description = kp_regex.match(f).group('description')
year = kp_regex.match(f).group('year')
month = kp_regex.match(f).group('month')
day = kp_regex.match(f).group('day')
time = kp_regex.match(f).group('time')
seq = ('mvn', 'kp', 'iuvs' + description, year + month + day + time)
if bn == '_'.join(seq):
v = kp_regex.match(f).group('version')
if int(v) > int(version):
version = v
for f in os.listdir(full_path):
description = kp_regex.match(f).group('description')
year = kp_regex.match(f).group('year')
month = kp_regex.match(f).group('month')
day = kp_regex.match(f).group('day')
time = kp_regex.match(f).group('time')
file_version = kp_regex.match(f).group('version')
seq = ('mvn', 'kp', 'iuvs' + description, year + month + day + time)
if bn == '_'.join(seq) and file_version == version:
r = kp_regex.match(f).group('revision')
if int(r) > int(revision):
revision = r
if int(version) > 0:
seq = (bn, 'v' + str(version), 'r' + str(revision) + '.tab')
files_to_return.append(os.path.join(full_path, '_'.join(seq)))
files_to_return = sorted(files_to_return)
return files_to_return
def get_l2_files_from_date(date1, instrument):
mvn_root_data_dir = utils.get_root_data_dir()
maven_data_dir = os.path.join(mvn_root_data_dir, 'maven', 'data', 'sci', instrument, 'l2')
# Each file starts at midnight, so lets cut off the hours and just pay attention to the days
date1 = date1.replace(hour=0, minute=0, second=0)
filenames = []
year = str(date1.year)
month = str('%02d' % date1.month)
day = str('%02d' % date1.day)
full_path = os.path.join(maven_data_dir, year, month)
if os.path.exists(full_path):
for f in os.listdir(full_path):
if l2_regex.match(f).group('day') == day:
filenames.append(os.path.join(full_path, f))
filenames = sorted(filenames)
return filenames
def get_header_info(filename):
# Determine number of header lines
nheader = 0
with open(filename) as f:
for line in f:
if line.startswith('#'):
nheader += 1
# Parse the header (still needs special case work)
read_param_list = False
start_temp = False
index_list = []
with open(filename) as fin:
icol = -2 # Counting header lines detailing column names
iname = 1 # for counting seven lines with name info
ncol = -1 # Dummy value to allow reading of early headerlines?
col_regex = '#\s(.{16}){%3d}' % ncol # needed for column names
crustal = False
if 'crustal' in filename:
crustal = True
for iline in range(nheader):
line = fin.readline()
# Define the proper indices change depending on the file type and row
i = [2, 2, 1] if crustal else [1, 1, 1]
if re.search('Number of parameter columns', line):
ncol = int(re.split("\s{3}", line)[i[0]])
# needed for column names
col_regex = '#\s(.{16}){%2d}' % ncol if crustal else '#\s(.{16}){%3d}' % ncol
elif re.search('Line on which data begins', line):
nhead_test = int(re.split("\s{3}", line)[i[1]]) - 1
elif re.search('Number of lines', line):
ndata = int(re.split("\s{3}", line)[i[2]])
elif re.search('PARAMETER', line):
read_param_list = True
param_head = iline
elif read_param_list:
icol += 1
if icol > ncol:
read_param_list = False
elif re.match(col_regex, line):
# OK, verified match now get the values
temp = re.findall('(.{16})', line[3:])
if temp[0] == ' 1':
start_temp = True
if start_temp:
# Crustal files do not have as much variable info as other insitu files, need
# to modify the lines below
if crustal:
if iname == 1:
index = temp
elif iname == 2:
obs1 = temp
elif iname == 3:
obs2 = temp
elif iname == 4:
unit = temp
# crustal files don't come with this field
# throwing it in here for consistency with other insitu files
inst = [' MODELED_MAG'] * 13
else:
print('More lines in data descriptor than expected.')
print('Line %d' % iline)
else:
if iname == 1:
index = temp
elif iname == 2:
obs1 = temp
elif iname == 3:
obs2 = temp
elif iname == 4:
obs3 = temp
elif iname == 5:
inst = temp
elif iname == 6:
unit = temp
elif iname == 7:
format_code = temp
else:
print('More lines in data descriptor than expected.')
print('Line %d' % iline)
iname += 1
else:
pass
# Generate the names list.
# NB, there are special case redundancies in there
# (e.g., LPW: Electron Density Quality (min and max))
# ****SWEA FLUX electron QUALITY *****
first = True
parallel = None
names = []
if crustal:
for h, i, j in zip(inst, obs1, obs2):
combo_name = (' '.join([i.strip(), j.strip()])).strip()
# Add inst to names to avoid ambiguity
# Will need to remove these after splitting
names.append('.'.join([h.strip(), combo_name]))
names[0] = 'Time'
else:
for h, i, j, k in zip(inst, obs1, obs2, obs3):
combo_name = (' '.join([i.strip(), j.strip(), k.strip()])).strip()
if re.match('^LPW$', h.strip()):
# Max and min error bars use same name in column
# SIS says first entry is min and second is max
if re.match('(Electron|Spacecraft)(.+)Quality', combo_name):
if first:
combo_name = combo_name + ' Min'
first = False
else:
combo_name = combo_name + ' Max'
first = True
elif re.match('^SWEA$', h.strip()):
# electron flux qual flags do not indicate whether parallel or anti
# From context it is clear; but we need to specify in name
if re.match('.+Parallel.+', combo_name):
parallel = True
elif re.match('.+Anti-par', combo_name):
parallel = False
else:
pass
if re.match('Flux, e-(.+)Quality', combo_name):
if parallel:
p = re.compile('Flux, e- ')
combo_name = p.sub('Flux, e- Parallel ', combo_name)
else:
p = re.compile('Flux, e- ')
combo_name = p.sub('Flux, e- Anti-par ', combo_name)
if re.match('Electron eflux (.+)Quality', combo_name):
if parallel:
p = re.compile('Electron eflux ')
combo_name = p.sub('Electron eflux Parallel ', combo_name)
else:
p = re.compile('Electron eflux ')
combo_name = p.sub('Electron eflux Anti-par ', combo_name)
# Add inst to names to avoid ambiguity
# Will need to remove these after splitting
names.append('.'.join([h.strip(), combo_name]))
names[0] = 'Time'
return names, inst
def initialize_list(the_list):
index = 0
for i in the_list:
if hasattr(i, "__len__"):
the_list[index] = initialize_list(i)
else:
the_list[index] = []
index += 1
return the_list
def place_values_in_list(the_list, location, to_append):
testing = the_list
if hasattr(location, "__len__"):
for i in range(len(location)):
testing = testing[location[i]]
testing.append(to_append)
else:
testing = testing[location]
testing.append(to_append)
def get_values_from_list(the_list, location):
testing = the_list
if hasattr(location, "__len__"):
for i in range(len(location)):
testing = testing[location[i]]
return testing
else:
testing = testing[location]
return testing
def orbit_time(begin_orbit, end_orbit=None):
orb_file = os.path.join(os.path.dirname(__file__),
'maven_orb_rec.orb')
with open(orb_file, "r") as f:
if end_orbit is None:
end_orbit = begin_orbit
orbit_num = []
time = []
f.readline()
f.readline()
for line in f:
line = line[0:28]
line = line.split(' ')
line = [x for x in line if x != '']
orbit_num.append(int(line[0]))
time.append(line[1] + "-" + month_to_num(line[2]) + "-" + line[3] + "T" + line[4])
try:
if orbit_num.index(begin_orbit) > len(time) or orbit_num.index(end_orbit) + 1 > len(time):
print("Orbit numbers not found. Please choose a number between 1 and %s.", orbit_num[-1])
return [None, None]
else:
begin_time = time[orbit_num.index(begin_orbit)]
end_time = time[orbit_num.index(end_orbit) + 1]
except ValueError:
return [None, None]
return [begin_time, end_time]
def month_to_num(month_string):
if month_string == 'JAN':
return '01'
if month_string == 'FEB':
return '02'
if month_string == 'MAR':
return '03'
if month_string == 'APR':
return '04'
if month_string == 'MAY':
return '05'
if month_string == 'JUN':
return '06'
if month_string == 'JUL':
return '07'
if month_string == 'AUG':
return '08'
if month_string == 'SEP':
return '09'
if month_string == 'OCT':
return '10'
if month_string == 'NOV':
return '11'
if month_string == 'DEC':
return '12'
def mvn_kp_sc_traj_xyz(dims_x, dims_y, dims_z, values, x_array, y_array, z_array, nn='linear'):
data = []
if nn == 'nearest':
for x, y, z in np.array([a for a in zip(x_array, y_array, z_array)]):
ix = np.abs(dims_x - x).argmin()
iy = np.abs(dims_y - y).argmin()
iz = np.abs(dims_z - z).argmin()
data.append(values[ix, iy, iz])
else:
max_x = np.max(x_array)
min_x = np.min(x_array)
max_y = np.max(y_array)
min_y = np.min(y_array)
max_z = np.max(z_array)
min_z = np.min(z_array)
for x, y, z in np.array([a for a in zip(x_array, y_array, z_array)]):
if x > max_x:
data.append(np.NaN)
elif x < min_x:
data.append(np.NaN)
elif y > max_y:
data.append(np.NaN)
elif y < min_y:
data.append(np.NaN)
elif z > max_z:
data.append(np.NaN)
elif z < min_z:
data.append(np.NaN)
sorted_x_distance = np.argsort(np.abs(dims_x - x))
ix1 = dims_x[sorted_x_distance[0]]
ix2 = dims_x[sorted_x_distance[1]]
if ix2 < ix1:
temp = ix2
ix2 = ix1
ix1 = temp
sorted_y_distance = np.argsort(np.abs(dims_y - y))
iy1 = dims_y[sorted_y_distance[0]]
iy2 = dims_y[sorted_y_distance[1]]
if iy2 < iy1:
temp = iy2
iy2 = iy1
iy1 = temp
sorted_z_distance = np.argsort(np.abs(dims_z - z))
iz1 = dims_z[sorted_z_distance[0]]
iz2 = dims_z[sorted_z_distance[1]]
if iz2 < iz1:
temp = iz2
iz2 = iz1
iz1 = temp
nx = (x - ix1) / (ix2 - ix1)
ny = (y - iy1) / (iy2 - iy1)
nz = (z - iz1) / (iz2 - iz1)
data.append(values[sorted_x_distance[0], sorted_y_distance[0], sorted_z_distance[0]]
* (1 - nx) * (1 - ny) * (1 - nz) +
values[sorted_x_distance[1], sorted_y_distance[0], sorted_z_distance[0]]
* nx * (1 - ny) * (1 - nz) +
values[sorted_x_distance[0], sorted_y_distance[1], sorted_z_distance[0]]
* (1 - nx) * ny * (1 - nz) +
values[sorted_x_distance[0], sorted_y_distance[0], sorted_z_distance[1]]
* (1 - nx) * (1 - ny) * nz +
values[sorted_x_distance[1], sorted_y_distance[0], sorted_z_distance[1]]
* nx * (1 - ny) * nz +
values[sorted_x_distance[0], sorted_y_distance[1], sorted_z_distance[1]]
* (1 - nx) * ny * nz +
values[sorted_x_distance[1], sorted_y_distance[1], sorted_z_distance[0]]
* nx * ny * (1 - nz) +
values[sorted_x_distance[1], sorted_y_distance[1], sorted_z_distance[1]]
* nx * ny * nz)
return data
def read_iuvs_file(file):
iuvs_dict = {}
periapse_num = 0
occ_num = 0
with open(file) as f:
line = f.readline()
while line != '':
if line.startswith('*'):
# Read the header
line = f.readline()
obs_mode = line[19:len(line) - 1].strip()
header = {}
f.readline()
line = f.readline()
header['time_start'] = line[19:len(line) - 1].strip()
line = f.readline()
header['time_stop'] = line[19:len(line) - 1].strip()
line = f.readline()
if obs_mode == "OCCULTATION":
header['target_name'] = line[19:len(line) - 1].strip()
line = f.readline()
header['sza'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['local_time'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['lat'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['lon'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['lat_mso'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['lon_mso'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['orbit_number'] = int(line[19:len(line) - 1].strip())
line = f.readline()
header['mars_season'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_geo_x'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_geo_y'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_geo_z'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_mso_x'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_mso_y'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_mso_z'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_geo_x'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_geo_y'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_geo_z'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_geo_lat'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_geo_lon'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_mso_lat'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sun_mso_lon'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['subsol_geo_lon'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['subsol_geo_lat'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_sza'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_local_time'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['sc_alt'] = float(line[19:len(line) - 1].strip())
line = f.readline()
header['mars_sun_dist'] = float(line[19:len(line) - 1].strip())
if obs_mode == "PERIAPSE":
periapse_num += 1
line = f.readline()
n_alt_bins = int(line[19:len(line) - 1].strip())
header['n_alt_bins'] = float(n_alt_bins)
line = f.readline()
n_alt_den_bins = int(line[19:len(line) - 1].strip())
header['n_alt_den_bins'] = float(n_alt_den_bins)
iuvs_dict['periapse' + str(periapse_num)] = {}
iuvs_dict['periapse' + str(periapse_num)].update(header)
# Empty space
f.readline()
# Read the Temperature
line = f.readline()
temp_labels = line[19:len(line) - 1].strip().split()
temperature = collections.OrderedDict((x, []) for x in temp_labels)
temperature_unc = collections.OrderedDict((x, []) for x in temp_labels)
line = f.readline()
vals = line[20:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
temperature[list(temperature.keys())[index]].append(val)
index += 1
line = f.readline()
vals = line[20:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
temperature_unc[list(temperature_unc.keys())[index]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['temperature'] = temperature
iuvs_dict['periapse' + str(periapse_num)]['temperature_unc'] = temperature_unc
# Empty space
f.readline()
# Read the Scale Heights
line = f.readline()
scale_height_labels = line[19:len(line) - 1].strip().split()
scale_height = collections.OrderedDict((x, []) for x in scale_height_labels)
scale_height_unc = collections.OrderedDict((x, []) for x in scale_height_labels)
line = f.readline()
vals = line[20:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
scale_height[list(scale_height.keys())[index]].append(val)
index += 1
line = f.readline()
vals = line[20:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
scale_height_unc[list(scale_height_unc.keys())[index]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['scale_height'] = scale_height
iuvs_dict['periapse' + str(periapse_num)]['scale_height_unc'] = scale_height_unc
# Empty space
f.readline()
f.readline()
# Read in the density
line = f.readline()
density_labels = line.strip().split()
density = collections.OrderedDict((x, []) for x in density_labels)
for i in range(0, n_alt_den_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
density[list(density.keys())[index]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['density'] = density
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the density systematic uncertainty
density_sys_unc = collections.OrderedDict((x, []) for x in density_labels)
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
density_sys_unc[list(density.keys())[index + 1]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['density_sys_unc'] = density_sys_unc
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the density uncertainty
density_unc = collections.OrderedDict((x, []) for x in density_labels)
for i in range(0, n_alt_den_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
density_unc[list(density.keys())[index]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['density_sys_unc'] = density_sys_unc
f.readline()
f.readline()
line = f.readline()
radiance_labels = line.strip().split()
if "Cameron" in radiance_labels:
radiance_labels.remove('Cameron')
radiance = collections.OrderedDict((x, []) for x in radiance_labels)
for i in range(0, n_alt_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance[list(radiance.keys())[index]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['radiance'] = radiance
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the radiance systematic uncertainty
radiance_sys_unc = collections.OrderedDict((x, []) for x in radiance_labels)
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance_sys_unc[list(radiance.keys())[index + 1]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['radiance_sys_unc'] = radiance_sys_unc
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the radiance uncertainty
radiance_unc = collections.OrderedDict((x, []) for x in radiance_labels)
for i in range(0, n_alt_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance_unc[list(radiance.keys())[index]].append(val)
index += 1
iuvs_dict['periapse' + str(periapse_num)]['radiance_unc'] = radiance_unc
elif obs_mode == "OCCULTATION":
occ_num += 1
line = f.readline()
n_alt_den_bins = int(line[19:len(line) - 1].strip())
header['n_alt_den_bins'] = float(n_alt_den_bins)
iuvs_dict['occultation' + str(occ_num)] = {}
iuvs_dict['occultation' + str(occ_num)].update(header)
# Empty space
f.readline()
# Read the Scale Heights
line = f.readline()
scale_height_labels = line[19:len(line) - 1].strip().split()
scale_height = collections.OrderedDict((x, []) for x in scale_height_labels)
scale_height_unc = collections.OrderedDict((x, []) for x in scale_height_labels)
line = f.readline()
vals = line[20:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
scale_height[list(scale_height.keys())[index]].append(val)
index += 1
line = f.readline()
vals = line[20:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
scale_height_unc[list(scale_height_unc.keys())[index]].append(val)
index += 1
iuvs_dict['occultation' + str(occ_num)]['scale_height'] = scale_height
iuvs_dict['occultation' + str(occ_num)]['scale_height_unc'] = scale_height_unc
# Empty space
f.readline()
f.readline()
# Read in the retrieval
line = f.readline()
retrieval_labels = line.strip().split()
retrieval = collections.OrderedDict((x, []) for x in retrieval_labels)
for i in range(0, n_alt_den_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
retrieval[list(retrieval.keys())[index]].append(val)
index += 1
iuvs_dict['occultation' + str(occ_num)]['retrieval'] = retrieval
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the retrieval systematic uncertainty
retrieval_sys_unc = collections.OrderedDict((x, []) for x in retrieval_labels)
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
retrieval_sys_unc[list(retrieval.keys())[index + 1]].append(val)
index += 1
iuvs_dict['occultation' + str(occ_num)]['retrieval_sys_unc'] = retrieval_sys_unc
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the retrieval uncertainty
retrieval_unc = collections.OrderedDict((x, []) for x in retrieval_labels)
for i in range(0, n_alt_den_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
retrieval_unc[list(retrieval.keys())[index]].append(val)
index += 1
iuvs_dict['occultation' + str(occ_num)]['retrieval_sys_unc'] = retrieval_sys_unc
elif obs_mode == "CORONA_LORES_HIGH":
line = f.readline()
n_alt_bins = int(line[19:len(line) - 1].strip())
header['n_alt_bins'] = float(n_alt_bins)
iuvs_dict['corona_lores_high'] = {}
iuvs_dict['corona_lores_high'].update(header)
f.readline()
# Read the Half int
line = f.readline()
half_int_dist_labels = line[19:len(line) - 1].strip().split()
half_int_dist = collections.OrderedDict((x, []) for x in half_int_dist_labels)
half_int_dist_unc = collections.OrderedDict((x, []) for x in half_int_dist_labels)
line = f.readline()
vals = line[26:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
half_int_dist[list(half_int_dist.keys())[index]].append(val)
index += 1
line = f.readline()
vals = line[26:len(line) - 1].strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
half_int_dist_unc[list(half_int_dist_unc.keys())[index]].append(val)
index += 1
iuvs_dict['corona_lores_high']['half_int_dist'] = half_int_dist
iuvs_dict['corona_lores_high']['half_int_dist_unc'] = half_int_dist_unc
# Blank space
f.readline()
f.readline()
# Read in the density
line = f.readline()
density_labels = line.strip().split()
density = collections.OrderedDict((x, []) for x in density_labels)
for i in range(0, n_alt_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
density[list(density.keys())[index]].append(val)
index += 1
iuvs_dict['corona_lores_high']['density'] = density
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the density systematic uncertainty
density_sys_unc = collections.OrderedDict((x, []) for x in density_labels)
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
density_sys_unc[list(density.keys())[index + 1]].append(val)
index += 1
iuvs_dict['corona_lores_high']['density_sys_unc'] = density_sys_unc
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the density uncertainty
density_unc = collections.OrderedDict((x, []) for x in density_labels)
for i in range(0, n_alt_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
density_unc[list(density.keys())[index]].append(val)
index += 1
iuvs_dict['corona_lores_high']['density_unc'] = density_unc
f.readline()
f.readline()
line = f.readline()
radiance_labels = line.strip().split()
if "Cameron" in radiance_labels:
radiance_labels.remove('Cameron')
radiance = collections.OrderedDict((x, []) for x in radiance_labels)
for i in range(0, n_alt_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance[list(radiance.keys())[index]].append(val)
index += 1
iuvs_dict['corona_lores_high']['radiance'] = radiance
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the radiance systematic uncertainty
radiance_sys_unc = collections.OrderedDict((x, []) for x in radiance_labels)
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance_sys_unc[list(radiance.keys())[index + 1]].append(val)
index += 1
iuvs_dict['corona_lores_high']['radiance_sys_unc'] = radiance_sys_unc
# Not needed lines
f.readline()
f.readline()
f.readline()
# Read in the radiance uncertainty
radiance_unc = collections.OrderedDict((x, []) for x in radiance_labels)
for i in range(0, n_alt_bins):
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance_unc[list(radiance.keys())[index]].append(val)
index += 1
iuvs_dict['corona_lores_high']['radiance_unc'] = radiance_unc
elif obs_mode == 'APOAPSE':
f.readline()
maps = {}
for j in range(0, 17):
var = f.readline().strip()
line = f.readline()
lons = line.strip().split()
lons = [float(x) for x in lons]
lats = []
data = []
for k in range(0, 45):
line = f.readline().strip().split()
lats.append(float(line[0]))
line_data = line[1:]
line_data = [float(x) if x != '-9.9999990E+09' else float('nan') for x in line_data]
data.append(line_data)
maps[var] = data
f.readline()
maps['latitude'] = lats
maps['longitude'] = lons
iuvs_dict['apoapse'] = {}
iuvs_dict['apoapse'].update(header)
iuvs_dict['apoapse'].update(maps)
f.readline()
f.readline()
f.readline()
# Read in the radiance systematic uncertainty
line = f.readline()
radiance_labels = line.strip().split()
radiance_sys_unc = collections.OrderedDict((x, []) for x in radiance_labels)
line = f.readline()
vals = line.strip().split()
index = 0
for val in vals:
if val == '-9.9999990E+09':
val = float('nan')
else:
val = float(val)
radiance_sys_unc[list(radiance.keys())[index + 1]].append(val)
index += 1
iuvs_dict['apoapse']['radiance_sys_unc'] = radiance_sys_unc
line = f.readline()
return iuvs_dict
param_dict = {'Electron Density': 'ELECTRON_DENSITY',
'Electron Density Quality Min': 'ELECTRON_DENSITY_QUAL_MIN',
'Electron Density Quality Max': 'ELECTRON_DENSITY_QUAL_MAX',
'Electron Temperature': 'ELECTRON_TEMPERATURE',
'Electron Temperature Quality Min': 'ELECTRON_TEMPERATURE_QUAL_MIN',
'Electron Temperature Quality Max': 'ELECTRON_TEMPERATURE_QUAL_MAX',
'Spacecraft Potential': 'SPACECRAFT_POTENTIAL',
'Spacecraft Potential Quality Min': 'SPACECRAFT_POTENTIAL_QUAL_MIN',
'Spacecraft Potential Quality Max': 'SPACECRAFT_POTENTIAL_QUAL_MAX',
'E-field Power 2-100 Hz': 'EWAVE_LOW_FREQ',
'E-field 2-100 Hz Quality': 'EWAVE_LOW_FREQ_QUAL_QUAL',
'E-field Power 100-800 Hz': 'EWAVE_MID_FREQ',
'E-field 100-800 Hz Quality': 'EWAVE_MID_FREQ_QUAL_QUAL',
'E-field Power 0.8-1.0 Mhz': 'EWAVE_HIGH_FREQ',
'E-field 0.8-1.0 Mhz Quality': 'EWAVE_HIGH_FREQ_QUAL_QUAL',
'EUV Irradiance 0.1-7.0 nm': 'IRRADIANCE_LOW',
'Irradiance 0.1-7.0 nm Quality': 'IRRADIANCE_LOW_QUAL',
'EUV Irradiance 17-22 nm': 'IRRADIANCE_MID',
'Irradiance 17-22 nm Quality': 'IRRADIANCE_MID_QUAL',
'EUV Irradiance Lyman-alpha': 'IRRADIANCE_LYMAN',
'Irradiance Lyman-alpha Quality': 'IRRADIANCE_LYMAN_QUAL',
'Solar Wind Electron Density': 'SOLAR_WIND_ELECTRON_DENSITY',
'Solar Wind E- Density Quality': 'SOLAR_WIND_ELECTRON_DENSITY_QUAL',
'Solar Wind Electron Temperature': 'SOLAR_WIND_ELECTRON_TEMPERATURE',
'Solar Wind E- Temperature Quality': 'SOLAR_WIND_ELECTRON_TEMPERATURE_QUAL',
'Flux, e- Parallel (5-100 ev)': 'ELECTRON_PARALLEL_FLUX_LOW',
'Flux, e- Parallel (5-100 ev) Quality': 'ELECTRON_PARALLEL_FLUX_LOW_QUAL',
'Flux, e- Parallel (100-500 ev)': 'ELECTRON_PARALLEL_FLUX_MID',
'Flux, e- Parallel (100-500 ev) Quality': 'ELECTRON_PARALLEL_FLUX_MID_QUAL',
'Flux, e- Parallel (500-1000 ev)': 'ELECTRON_PARALLEL_FLUX_HIGH',
'Flux, e- Parallel (500-1000 ev) Quality': 'ELECTRON_PARALLEL_FLUX_HIGH_QUAL',
'Flux, e- Anti-par (5-100 ev)': 'ELECTRON_ANTI_PARALLEL_FLUX_LOW',
'Flux, e- Anti-par (5-100 ev) Quality': 'ELECTRON_ANTI_PARALLEL_FLUX_LOW_QUAL',
'Flux, e- Anti-par (100-500 ev)': 'ELECTRON_ANTI_PARALLEL_FLUX_MID',
'Flux, e- Anti-par (100-500 ev) Quality': 'ELECTRON_ANTI_PARALLEL_FLUX_MID_QUAL',
'Flux, e- Anti-par (500-1000 ev)': 'ELECTRON_ANTI_PARALLEL_FLUX_HIGH',
'Flux, e- Anti-par (500-1000 ev) Quality': 'ELECTRON_ANTI_PARALLEL_FLUX_HIGH_QUAL',
'Electron eflux Parallel (5-100 ev)': 'ELECTRON_PARALLEL_FLUX_LOW',
'Electron eflux Parallel (5-100 ev) Quality': 'ELECTRON_PARALLEL_FLUX_LOW_QUAL',
'Electron eflux Parallel (100-500 ev)': 'ELECTRON_PARALLEL_FLUX_MID',
'Electron eflux Parallel (100-500 ev) Quality': 'ELECTRON_PARALLEL_FLUX_MID_QUAL',
'Electron eflux Parallel (500-1000 ev)': 'ELECTRON_PARALLEL_FLUX_HIGH',
'Electron eflux Parallel (500-1000 ev) Quality': 'ELECTRON_PARALLEL_FLUX_HIGH_QUAL',
'Electron eflux Anti-par (5-100 ev)': 'ELECTRON_ANTI_PARALLEL_FLUX_LOW',
'Electron eflux Anti-par (5-100 ev) Quality': 'ELECTRON_ANTI_PARALLEL_FLUX_LOW_QUAL',
'Electron eflux Anti-par (100-500 ev)': 'ELECTRON_ANTI_PARALLEL_FLUX_MID',
'Electron eflux Anti-par (100-500 ev) Quality': 'ELECTRON_ANTI_PARALLEL_FLUX_MID_QUAL',
'Electron eflux Anti-par (500-1000 ev)': 'ELECTRON_ANTI_PARALLEL_FLUX_HIGH',
'Electron eflux Anti-par (500-1000 ev) Quality': 'ELECTRON_ANTI_PARALLEL_FLUX_HIGH_QUAL',
'Electron Spectrum Shape': 'ELECTRON_SPECTRUM_SHAPE_PARAMETER',
'Spectrum Shape Quality': 'ELECTRON_SPECTRUM_SHAPE_PARAMETER_QUAL',
'H+ Density': 'HPLUS_DENSITY',
'H+ Density Quality': 'HPLUS_DENSITY_QUAL',
'H+ Flow Velocity MSO X': 'HPLUS_FLOW_VELOCITY_MSO_X',
'H+ Flow MSO X Quality': 'HPLUS_FLOW_VELOCITY_MSO_X_QUAL',
'H+ Flow Velocity MSO Y': 'HPLUS_FLOW_VELOCITY_MSO_Y',
'H+ Flow MSO Y Quality': 'HPLUS_FLOW_VELOCITY_MSO_Y_QUAL',
'H+ Flow Velocity MSO Z': 'HPLUS_FLOW_VELOCITY_MSO_Z',
'H+ Flow MSO Z Quality': 'HPLUS_FLOW_VELOCITY_MSO_Z_QUAL',
'H+ Temperature': 'HPLUS_TEMPERATURE',
'H+ Temperature Quality': 'HPLUS_TEMPERATURE_QUAL',
'Solar Wind Dynamic Pressure': 'SOLAR_WIND_DYNAMIC_PRESSURE',
'Solar Wind Pressure Quality': 'SOLAR_WIND_DYNAMIC_PRESSURE_QUAL',
'STATIC Quality Flag': 'STATIC_QUALITY_FLAG',
'O+ Density': 'OPLUS_DENSITY',
'O+ Density Quality': 'OPLUS_DENSITY_QUAL',
'O2+ Density': 'O2PLUS_DENSITY',
'O2+ Density Quality': 'O2PLUS_DENSITY_QUAL',
'O+ Temperature': 'OPLUS_TEMPERATURE',
'O+ Temperature Quality': 'OPLUS_TEMPERATURE_QUAL',
'O2+ Temperature': 'O2PLUS_TEMPERATURE',
'O2+ Temperature Quality': 'O2PLUS_TEMPERATURE_QUAL',
'O2+ Flow Velocity MAVEN_APP X': 'O2PLUS_FLOW_VELOCITY_MAVEN_APP_X',
'O2+ Flow MAVEN_APP X Quality': 'O2PLUS_FLOW_VELOCITY_MAVEN_APP_X_QUAL',
'O2+ Flow Velocity MAVEN_APP Y': 'O2PLUS_FLOW_VELOCITY_MAVEN_APP_Y',
'O2+ Flow MAVEN_APP Y Quality': 'O2PLUS_FLOW_VELOCITY_MAVEN_APP_Y_QUAL',
'O2+ Flow Velocity MAVEN_APP Z': 'O2PLUS_FLOW_VELOCITY_MAVEN_APP_Z',
'O2+ Flow MAVEN_APP Z Quality': 'O2PLUS_FLOW_VELOCITY_MAVEN_APP_Z_QUAL',
'O2+ Flow Velocity MSO X': 'O2PLUS_FLOW_VELOCITY_MSO_X',
'O2+ Flow MSO X Quality': 'O2PLUS_FLOW_VELOCITY_MSO_X_QUAL',
'O2+ Flow Velocity MSO Y': 'O2PLUS_FLOW_VELOCITY_MSO_Y',
'O2+ Flow MSO Y Quality': 'O2PLUS_FLOW_VELOCITY_MSO_Y_QUAL',
'O2+ Flow Velocity MSO Z': 'O2PLUS_FLOW_VELOCITY_MSO_Z',
'O2+ Flow MSO Z Quality': 'O2PLUS_FLOW_VELOCITY_MSO_Z_QUAL',
'H+ Omni Flux': 'HPLUS_OMNI_DIRECTIONAL_FLUX',
'H+ Energy': 'HPLUS_CHARACTERISTIC_ENERGY',
'H+ Energy Quality': 'HPLUS_CHARACTERISTIC_ENERGY_QUAL',
'He++ Omni Flux': 'HEPLUS_OMNI_DIRECTIONAL_FLUX',
'He++ Energy': 'HEPLUS_CHARACTERISTIC_ENERGY',
'He++ Energy Quality': 'HEPLUS_CHARACTERISTIC_ENERGY_QUAL',
'O+ Omni Flux': 'OPLUS_OMNI_DIRECTIONAL_FLUX',
'O+ Energy': 'OPLUS_CHARACTERISTIC_ENERGY',
'O+ Energy Quality': 'OPLUS_CHARACTERISTIC_ENERGY_QUAL',
'O2+ Omni Flux': 'O2PLUS_OMNI_DIRECTIONAL_FLUX',
'O2+ Energy': 'O2PLUS_CHARACTERISTIC_ENERGY',
'O2+ Energy Quality': 'O2PLUS_CHARACTERISTIC_ENERGY_QUAL',
'H+ Direction MSO X': 'HPLUS_CHARACTERISTIC_DIRECTION_MSO_X',
'H+ Direction MSO Y': 'HPLUS_CHARACTERISTIC_DIRECTION_MSO_Y',
'H+ Direction MSO Z': 'HPLUS_CHARACTERISTIC_DIRECTION_MSO_Z',
'H+ Angular Width': 'HPLUS_CHARACTERISTIC_ANGULAR_WIDTH',
'H+ Width Quality': 'HPLUS_CHARACTERISTIC_ANGULAR_WIDTH_QUAL',
'Pickup Ion Direction MSO X': 'DOMINANT_PICKUP_ION_CHARACTERISTIC_DIRECTION_MSO_X',
'Pickup Ion Direction MSO Y': 'DOMINANT_PICKUP_ION_CHARACTERISTIC_DIRECTION_MSO_Y',
'Pickup Ion Direction MSO Z': 'DOMINANT_PICKUP_ION_CHARACTERISTIC_DIRECTION_MSO_Z',
'Pickup Ion Angular Width': 'DOMINANT_PICKUP_ION_CHARACTERISTIC_ANGULAR_WIDTH',
'Pickup Ion Width Quality': 'DOMINANT_PICKUP_ION_CHARACTERISTIC_ANGULAR_WIDTH_QUAL',
'Ion Flux FOV 1 F': 'ION_ENERGY_FLUX__FOV_1_F',
'Ion Flux FOV 1F Quality': 'ION_ENERGY_FLUX__FOV_1_F_QUAL',
'Ion Flux FOV 1 R': 'ION_ENERGY_FLUX__FOV_1_R',
'Ion Flux FOV 1R Quality': 'ION_ENERGY_FLUX__FOV_1_R_QUAL',
'Ion Flux FOV 2 F': 'ION_ENERGY_FLUX__FOV_2_F',
'Ion Flux FOV 2F Quality': 'ION_ENERGY_FLUX__FOV_2_F_QUAL',
'Ion Flux FOV 2 R': 'ION_ENERGY_FLUX__FOV_2_R',
'Ion Flux FOV 2R Quality': 'ION_ENERGY_FLUX__FOV_2_R_QUAL',
'Electron Flux FOV 1 F': 'ELECTRON_ENERGY_FLUX___FOV_1_F',
'Electron Flux FOV 1F Quality': 'ELECTRON_ENERGY_FLUX___FOV_1_F_QUAL',
'Electron Flux FOV 1 R': 'ELECTRON_ENERGY_FLUX___FOV_1_R',
'Electron Flux FOV 1R Quality': 'ELECTRON_ENERGY_FLUX___FOV_1_R_QUAL',
'Electron Flux FOV 2 F': 'ELECTRON_ENERGY_FLUX___FOV_2_F',
'Electron Flux FOV 2F Quality': 'ELECTRON_ENERGY_FLUX___FOV_2_F_QUAL',
'Electron Flux FOV 2 R': 'ELECTRON_ENERGY_FLUX___FOV_2_R',
'Electron Flux FOV 2R Quality': 'ELECTRON_ENERGY_FLUX___FOV_2_R_QUAL',
'Look Direction 1-F MSO X': 'LOOK_DIRECTION_1_F_MSO_X',
'Look Direction 1-F MSO Y': 'LOOK_DIRECTION_1_F_MSO_Y',
'Look Direction 1-F MSO Z': 'LOOK_DIRECTION_1_F_MSO_Z',
'Look Direction 1-R MSO X': 'LOOK_DIRECTION_1_R_MSO_X',
'Look Direction 1-R MSO Y': 'LOOK_DIRECTION_1_R_MSO_Y',
'Look Direction 1-R MSO Z': 'LOOK_DIRECTION_1_R_MSO_Z',
'Look Direction 2-F MSO X': 'LOOK_DIRECTION_2_F_MSO_X',
'Look Direction 2-F MSO Y': 'LOOK_DIRECTION_2_F_MSO_Y',
'Look Direction 2-F MSO Z': 'LOOK_DIRECTION_2_F_MSO_Z',
'Look Direction 2-R MSO X': 'LOOK_DIRECTION_2_R_MSO_X',
'Look Direction 2-R MSO Y': 'LOOK_DIRECTION_2_R_MSO_Y',
'Look Direction 2-R MSO Z': 'LOOK_DIRECTION_2_R_MSO_Z',
'Magnetic Field MSO X': 'MSO_X',
'Magnetic MSO X Quality': 'MSO_X_QUAL',
'Magnetic Field MSO Y': 'MSO_Y',
'Magnetic MSO Y Quality': 'MSO_Y_QUAL',
'Magnetic Field MSO Z': 'MSO_Z',
'Magnetic MSO Z Quality': 'MSO_Z_QUAL',
'Magnetic Field GEO X': 'GEO_X',
'Magnetic GEO X Quality': 'GEO_X_QUAL',
'Magnetic Field GEO Y': 'GEO_Y',
'Magnetic GEO Y Quality': 'GEO_Y_QUAL',
'Magnetic Field GEO Z': 'GEO_Z',
'Magnetic GEO Z Quality': 'GEO_Z_QUAL',
'Magnetic Field RMS Dev': 'RMS_DEVIATION',
'Magnetic RMS Quality': 'RMS_DEVIATION_QUAL',
'Density He': 'HE_DENSITY',
'Density He Precision': 'HE_DENSITY_PRECISION',
'Density He Quality': 'HE_DENSITY_QUAL',
'Density O': 'O_DENSITY',
'Density O Precision': 'O_DENSITY_PRECISION',
'Density O Quality': 'O_DENSITY_QUAL',
'Density CO': 'CO_DENSITY',
'Density CO Precision': 'CO_DENSITY_PRECISION',
'Density CO Quality': 'CO_DENSITY_QUAL',
'Density N2': 'N2_DENSITY',
'Density N2 Precision': 'N2_DENSITY_PRECISION',
'Density N2 Quality': 'N2_DENSITY_QUAL',
'Density NO': 'NO_DENSITY',
'Density NO Precision': 'NO_DENSITY_PRECISION',
'Density NO Quality': 'NO_DENSITY_QUAL',
'Density Ar': 'AR_DENSITY',
'Density Ar Precision': 'AR_DENSITY_PRECISION',
'Density Ar Quality': 'AR_DENSITY_QUAL',
'Density CO2': 'CO2_DENSITY',
'Density CO2 Precision': 'CO2_DENSITY_PRECISION',
'Density CO2 Quality': 'CO2_DENSITY_QUAL',
'Density 32+': 'O2PLUS_DENSITY',
'Density 32+ Precision': 'O2PLUS_DENSITY_PRECISION',
'Density 32+ Quality': 'O2PLUS_DENSITY_QUAL',
'Density 44+': 'CO2PLUS_DENSITY',
'Density 44+ Precision': 'CO2PLUS_DENSITY_PRECISION',
'Density 44+ Quality': 'CO2PLUS_DENSITY_QUAL',
'Density 30+': 'NOPLUS_DENSITY',
'Density 30+ Precision': 'NOPLUS_DENSITY_PRECISION',
'Density 30+ Quality': 'NOPLUS_DENSITY_QUAL',
'Density 16+': 'OPLUS_DENSITY',
'Density 16+ Precision': 'OPLUS_DENSITY_PRECISION',
'Density 16+ Quality': 'OPLUS_DENSITY_QUAL',
'Density 28+': 'CO2PLUS_N2PLUS_DENSITY',
'Density 28+ Precision': 'CO2PLUS_N2PLUS_DENSITY_PRECISION',
'Density 28+ Quality': 'CO2PLUS_N2PLUS_DENSITY_QUAL',
'Density 12+': 'CPLUS_DENSITY',
'Density 12+ Precision': 'CPLUS_DENSITY_PRECISION',
'Density 12+ Quality': 'CPLUS_DENSITY_QUAL',
'Density 17+': 'OHPLUS_DENSITY',
'Density 17+ Precision': 'OHPLUS_DENSITY_PRECISION',
'Density 17+ Quality': 'OHPLUS_DENSITY_QUAL',
'Density 14+': 'NPLUS_DENSITY',
'Density 14+ Precision': 'NPLUS_DENSITY_PRECISION',
'Density 14+ Quality': 'NPLUS_DENSITY_QUAL',
'APP Attitude GEO X': 'ATTITUDE_GEO_X',
'APP Attitude GEO Y': 'ATTITUDE_GEO_Y',
'APP Attitude GEO Z': 'ATTITUDE_GEO_Z',
'APP Attitude MSO X': 'ATTITUDE_MSO_X',
'APP Attitude MSO Y': 'ATTITUDE_MSO_Y',
'APP Attitude MSO Z': 'ATTITUDE_MSO_Z',
'Spacecraft GEO X': 'GEO_X',
'Spacecraft GEO Y': 'GEO_Y',
'Spacecraft GEO Z': 'GEO_Z',
'Spacecraft MSO X': 'MSO_X',
'Spacecraft MSO Y': 'MSO_Y',
'Spacecraft MSO Z': 'MSO_Z',
'Spacecraft GEO Longitude': 'SUB_SC_LONGITUDE',
'Spacecraft GEO Latitude': 'SUB_SC_LATITUDE',
'Spacecraft Solar Zenith Angle': 'SZA',
'Spacecraft Local Time': 'LOCAL_TIME',
'Spacecraft Altitude Aeroid': 'ALTITUDE',
'Spacecraft Attitude GEO X': 'ATTITUDE_GEO_X',
'Spacecraft Attitude GEO Y': 'ATTITUDE_GEO_Y',
'Spacecraft Attitude GEO Z': 'ATTITUDE_GEO_Z',
'Spacecraft Attitude MSO X': 'ATTITUDE_MSO_X',
'Spacecraft Attitude MSO Y': 'ATTITUDE_MSO_Y',
'Spacecraft Attitude MSO Z': 'ATTITUDE_MSO_Z',
'Mars Season (Ls)': 'MARS_SEASON',
'Mars-Sun Distance': 'MARS_SUN_DISTANCE',
'Subsolar Point GEO Longitude': 'SUBSOLAR_POINT_GEO_LONGITUDE',
'Subsolar Point GEO Latitude': 'SUBSOLAR_POINT_GEO_LATITUDE',
'Sub-Mars Point on the Sun Longitude': 'SUBMARS_POINT_SOLAR_LONGITUDE',
'Sub-Mars Point on the Sun Latitude': 'SUBMARS_POINT_SOLAR_LATITUDE',
'Rot matrix MARS -> MSO Row 1, Col 1': 'T11',
'Rot matrix MARS -> MSO Row 1, Col 2': 'T12',
'Rot matrix MARS -> MSO Row 1, Col 3': 'T13',
'Rot matrix MARS -> MSO Row 2, Col 1': 'T21',
'Rot matrix MARS -> MSO Row 2, Col 2': 'T22',
'Rot matrix MARS -> MSO Row 2, Col 3': 'T23',
'Rot matrix MARS -> MSO Row 3, Col 1': 'T31',
'Rot matrix MARS -> MSO Row 3, Col 2': 'T32',
'Rot matrix MARS -> MSO Row 3, Col 3': 'T33',
'Rot matrix SPCCRFT -> MSO Row 1, Col 1': 'SPACECRAFT_T11',
'Rot matrix SPCCRFT -> MSO Row 1, Col 2': 'SPACECRAFT_T12',
'Rot matrix SPCCRFT -> MSO Row 1, Col 3': 'SPACECRAFT_T13',
'Rot matrix SPCCRFT -> MSO Row 2, Col 1': 'SPACECRAFT_T21',
'Rot matrix SPCCRFT -> MSO Row 2, Col 2': 'SPACECRAFT_T22',
'Rot matrix SPCCRFT -> MSO Row 2, Col 3': 'SPACECRAFT_T23',
'Rot matrix SPCCRFT -> MSO Row 3, Col 1': 'SPACECRAFT_T31',
'Rot matrix SPCCRFT -> MSO Row 3, Col 2': 'SPACECRAFT_T32',
'Rot matrix SPCCRFT -> MSO Row 3, Col 3': 'SPACECRAFT_T33'}
| [
"numpy.abs",
"os.path.join",
"os.path.dirname",
"os.path.exists",
"numpy.max",
"re.findall",
"datetime.timedelta",
"re.search",
"re.split",
"re.match",
"numpy.min",
"datetime.datetime.strptime",
"os.listdir",
"numpy.nanmax",
"numpy.all",
"re.compile",
"numpy.nanmin",
"numpy.array",... | [((13424, 13451), 'numpy.all', 'np.all', (['filter_list'], {'axis': '(0)'}), '(filter_list, axis=0)\n', (13430, 13451), True, 'import numpy as np\n'), ((17126, 17197), 'os.path.join', 'os.path.join', (['mvn_root_data_dir', '"""maven"""', '"""data"""', '"""sci"""', '"""kp"""', '"""insitu"""'], {}), "(mvn_root_data_dir, 'maven', 'data', 'sci', 'kp', 'insitu')\n", (17138, 17197), False, 'import os\n'), ((20062, 20131), 'os.path.join', 'os.path.join', (['mvn_root_data_dir', '"""maven"""', '"""data"""', '"""sci"""', '"""kp"""', '"""iuvs"""'], {}), "(mvn_root_data_dir, 'maven', 'data', 'sci', 'kp', 'iuvs')\n", (20074, 20131), False, 'import os\n'), ((23336, 23409), 'os.path.join', 'os.path.join', (['mvn_root_data_dir', '"""maven"""', '"""data"""', '"""sci"""', 'instrument', '"""l2"""'], {}), "(mvn_root_data_dir, 'maven', 'data', 'sci', instrument, 'l2')\n", (23348, 23409), False, 'import os\n'), ((23698, 23739), 'os.path.join', 'os.path.join', (['maven_data_dir', 'year', 'month'], {}), '(maven_data_dir, year, month)\n', (23710, 23739), False, 'import os\n'), ((23747, 23772), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (23761, 23772), False, 'import os\n'), ((16261, 16280), 're.search', 're.search', (['index', 'i'], {}), '(index, i)\n', (16270, 16280), False, 'import re\n'), ((17406, 17423), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (17415, 17423), False, 'from datetime import timedelta\n'), ((17743, 17784), 'os.path.join', 'os.path.join', (['maven_data_dir', 'year', 'month'], {}), '(maven_data_dir, year, month)\n', (17755, 17784), False, 'import os\n'), ((17796, 17821), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (17810, 17821), False, 'import os\n'), ((20355, 20372), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (20364, 20372), False, 'from datetime import timedelta\n'), ((20697, 20738), 'os.path.join', 'os.path.join', (['maven_data_dir', 'year', 'month'], {}), '(maven_data_dir, year, month)\n', (20709, 20738), False, 'import os\n'), ((20750, 20775), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (20764, 20775), False, 'import os\n'), ((23791, 23812), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (23801, 23812), False, 'import os\n'), ((31456, 31481), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (31471, 31481), False, 'import os\n'), ((33542, 33557), 'numpy.max', 'np.max', (['x_array'], {}), '(x_array)\n', (33548, 33557), True, 'import numpy as np\n'), ((33574, 33589), 'numpy.min', 'np.min', (['x_array'], {}), '(x_array)\n', (33580, 33589), True, 'import numpy as np\n'), ((33606, 33621), 'numpy.max', 'np.max', (['y_array'], {}), '(y_array)\n', (33612, 33621), True, 'import numpy as np\n'), ((33638, 33653), 'numpy.min', 'np.min', (['y_array'], {}), '(y_array)\n', (33644, 33653), True, 'import numpy as np\n'), ((33670, 33685), 'numpy.max', 'np.max', (['z_array'], {}), '(z_array)\n', (33676, 33685), True, 'import numpy as np\n'), ((33702, 33717), 'numpy.min', 'np.min', (['z_array'], {}), '(z_array)\n', (33708, 33717), True, 'import numpy as np\n'), ((3283, 3307), 'numpy.nanmin', 'np.nanmin', (["[kp['Orbit']]"], {}), "([kp['Orbit']])\n", (3292, 3307), True, 'import numpy as np\n'), ((3309, 3333), 'numpy.nanmax', 'np.nanmax', (["[kp['Orbit']]"], {}), "([kp['Orbit']])\n", (3318, 3333), True, 'import numpy as np\n'), ((17573, 17590), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (17582, 17590), False, 'from datetime import timedelta\n'), ((18014, 18035), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (18024, 18035), False, 'import os\n'), ((20527, 20544), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (20536, 20544), False, 'from datetime import timedelta\n'), ((20886, 20907), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (20896, 20907), False, 'import os\n'), ((24927, 24973), 're.search', 're.search', (['"""Number of parameter columns"""', 'line'], {}), "('Number of parameter columns', line)\n", (24936, 24973), False, 'import re\n'), ((3346, 3372), 'numpy.nanmax', 'np.nanmax', (["[iuvs['Orbit']]"], {}), "([iuvs['Orbit']])\n", (3355, 3372), True, 'import numpy as np\n'), ((3389, 3415), 'numpy.nanmin', 'np.nanmin', (["[iuvs['Orbit']]"], {}), "([iuvs['Orbit']])\n", (3398, 3415), True, 'import numpy as np\n'), ((21579, 21600), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (21589, 21600), False, 'import os\n'), ((22223, 22244), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (22233, 22244), False, 'import os\n'), ((25186, 25230), 're.search', 're.search', (['"""Line on which data begins"""', 'line'], {}), "('Line on which data begins', line)\n", (25195, 25230), False, 'import re\n'), ((34223, 34241), 'numpy.abs', 'np.abs', (['(dims_x - x)'], {}), '(dims_x - x)\n', (34229, 34241), True, 'import numpy as np\n'), ((34486, 34504), 'numpy.abs', 'np.abs', (['(dims_y - y)'], {}), '(dims_y - y)\n', (34492, 34504), True, 'import numpy as np\n'), ((34749, 34767), 'numpy.abs', 'np.abs', (['(dims_z - z)'], {}), '(dims_z - z)\n', (34755, 34767), True, 'import numpy as np\n'), ((2326, 2352), 'numpy.array', 'np.array', (["kp['TimeString']"], {}), "(kp['TimeString'])\n", (2334, 2352), True, 'import numpy as np\n'), ((2357, 2383), 'numpy.array', 'np.array', (["kp['TimeString']"], {}), "(kp['TimeString'])\n", (2365, 2383), True, 'import numpy as np\n'), ((2475, 2496), 'numpy.array', 'np.array', (["kp['Orbit']"], {}), "(kp['Orbit'])\n", (2483, 2496), True, 'import numpy as np\n'), ((2501, 2522), 'numpy.array', 'np.array', (["kp['Orbit']"], {}), "(kp['Orbit'])\n", (2509, 2522), True, 'import numpy as np\n'), ((19160, 19206), 'os.path.join', 'os.path.join', (['full_path', 'most_recent_insitu[0]'], {}), '(full_path, most_recent_insitu[0])\n', (19172, 19206), False, 'import os\n'), ((19790, 19838), 'os.path.join', 'os.path.join', (['full_path', 'most_recent_c_insitu[0]'], {}), '(full_path, most_recent_c_insitu[0])\n', (19802, 19838), False, 'import os\n'), ((23901, 23927), 'os.path.join', 'os.path.join', (['full_path', 'f'], {}), '(full_path, f)\n', (23913, 23927), False, 'import os\n'), ((25317, 25351), 're.search', 're.search', (['"""Number of lines"""', 'line'], {}), "('Number of lines', line)\n", (25326, 25351), False, 'import re\n'), ((28653, 28709), 're.match', 're.match', (['"""(Electron|Spacecraft)(.+)Quality"""', 'combo_name'], {}), "('(Electron|Spacecraft)(.+)Quality', combo_name)\n", (28661, 28709), False, 'import re\n'), ((33354, 33372), 'numpy.abs', 'np.abs', (['(dims_x - x)'], {}), '(dims_x - x)\n', (33360, 33372), True, 'import numpy as np\n'), ((33399, 33417), 'numpy.abs', 'np.abs', (['(dims_y - y)'], {}), '(dims_y - y)\n', (33405, 33417), True, 'import numpy as np\n'), ((33444, 33462), 'numpy.abs', 'np.abs', (['(dims_z - z)'], {}), '(dims_z - z)\n', (33450, 33462), True, 'import numpy as np\n'), ((40902, 40955), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in temp_labels)'], {}), '((x, []) for x in temp_labels)\n', (40925, 40955), False, 'import collections\n'), ((40994, 41047), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in temp_labels)'], {}), '((x, []) for x in temp_labels)\n', (41017, 41047), False, 'import collections\n'), ((42442, 42503), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in scale_height_labels)'], {}), '((x, []) for x in scale_height_labels)\n', (42465, 42503), False, 'import collections\n'), ((42543, 42604), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in scale_height_labels)'], {}), '((x, []) for x in scale_height_labels)\n', (42566, 42604), False, 'import collections\n'), ((44010, 44066), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in density_labels)'], {}), '((x, []) for x in density_labels)\n', (44033, 44066), False, 'import collections\n'), ((44925, 44981), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in density_labels)'], {}), '((x, []) for x in density_labels)\n', (44948, 44981), False, 'import collections\n'), ((45759, 45815), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in density_labels)'], {}), '((x, []) for x in density_labels)\n', (45782, 45815), False, 'import collections\n'), ((46760, 46817), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (46783, 46817), False, 'import collections\n'), ((47679, 47736), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (47702, 47736), False, 'import collections\n'), ((48520, 48577), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (48543, 48577), False, 'import collections\n'), ((2937, 2958), 'numpy.array', 'np.array', (["kp['Orbit']"], {}), "(kp['Orbit'])\n", (2945, 2958), True, 'import numpy as np\n'), ((2963, 2984), 'numpy.array', 'np.array', (["kp['Orbit']"], {}), "(kp['Orbit'])\n", (2971, 2984), True, 'import numpy as np\n'), ((3191, 3214), 'numpy.array', 'np.array', (["iuvs['Orbit']"], {}), "(iuvs['Orbit'])\n", (3199, 3214), True, 'import numpy as np\n'), ((3219, 3242), 'numpy.array', 'np.array', (["iuvs['Orbit']"], {}), "(iuvs['Orbit'])\n", (3227, 3242), True, 'import numpy as np\n'), ((18765, 18798), 're.search', 're.search', (['"""v\\\\d{2}"""', 'insitu_file'], {}), "('v\\\\d{2}', insitu_file)\n", (18774, 18798), False, 'import re\n'), ((19379, 19414), 're.search', 're.search', (['"""v\\\\d{2}"""', 'c_insitu_file'], {}), "('v\\\\d{2}', c_insitu_file)\n", (19388, 19414), False, 'import re\n'), ((25002, 25026), 're.split', 're.split', (['"""\\\\s{3}"""', 'line'], {}), "('\\\\s{3}', line)\n", (25010, 25026), False, 'import re\n'), ((25429, 25457), 're.search', 're.search', (['"""PARAMETER"""', 'line'], {}), "('PARAMETER', line)\n", (25438, 25457), False, 'import re\n'), ((29222, 29258), 're.match', 're.match', (['""".+Parallel.+"""', 'combo_name'], {}), "('.+Parallel.+', combo_name)\n", (29230, 29258), False, 'import re\n'), ((29480, 29523), 're.match', 're.match', (['"""Flux, e-(.+)Quality"""', 'combo_name'], {}), "('Flux, e-(.+)Quality', combo_name)\n", (29488, 29523), False, 'import re\n'), ((29889, 29939), 're.match', 're.match', (['"""Electron eflux (.+)Quality"""', 'combo_name'], {}), "('Electron eflux (.+)Quality', combo_name)\n", (29897, 29939), False, 'import re\n'), ((49881, 49942), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in scale_height_labels)'], {}), '((x, []) for x in scale_height_labels)\n', (49904, 49942), False, 'import collections\n'), ((49982, 50043), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in scale_height_labels)'], {}), '((x, []) for x in scale_height_labels)\n', (50005, 50043), False, 'import collections\n'), ((51451, 51509), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in retrieval_labels)'], {}), '((x, []) for x in retrieval_labels)\n', (51474, 51509), False, 'import collections\n'), ((52378, 52436), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in retrieval_labels)'], {}), '((x, []) for x in retrieval_labels)\n', (52401, 52436), False, 'import collections\n'), ((53224, 53282), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in retrieval_labels)'], {}), '((x, []) for x in retrieval_labels)\n', (53247, 53282), False, 'import collections\n'), ((29325, 29359), 're.match', 're.match', (['""".+Anti-par"""', 'combo_name'], {}), "('.+Anti-par', combo_name)\n", (29333, 29359), False, 'import re\n'), ((54505, 54567), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in half_int_dist_labels)'], {}), '((x, []) for x in half_int_dist_labels)\n', (54528, 54567), False, 'import collections\n'), ((54608, 54670), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in half_int_dist_labels)'], {}), '((x, []) for x in half_int_dist_labels)\n', (54631, 54670), False, 'import collections\n'), ((56062, 56118), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in density_labels)'], {}), '((x, []) for x in density_labels)\n', (56085, 56118), False, 'import collections\n'), ((56963, 57019), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in density_labels)'], {}), '((x, []) for x in density_labels)\n', (56986, 57019), False, 'import collections\n'), ((57786, 57842), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in density_labels)'], {}), '((x, []) for x in density_labels)\n', (57809, 57842), False, 'import collections\n'), ((58765, 58822), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (58788, 58822), False, 'import collections\n'), ((59673, 59730), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (59696, 59730), False, 'import collections\n'), ((60503, 60560), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (60526, 60560), False, 'import collections\n'), ((9774, 9815), 'datetime.datetime.strptime', 'datetime.strptime', (['i', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(i, '%Y-%m-%d %H:%M:%S')\n", (9791, 9815), False, 'from datetime import datetime\n'), ((9932, 9973), 'datetime.datetime.strptime', 'datetime.strptime', (['i', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(i, '%Y-%m-%dT%H:%M:%S')\n", (9949, 9973), False, 'from datetime import datetime\n'), ((18871, 18894), 're.search', 're.search', (['"""r\\\\d{2}"""', 'k'], {}), "('r\\\\d{2}', k)\n", (18880, 18894), False, 'import re\n'), ((19489, 19512), 're.search', 're.search', (['"""r\\\\d{2}"""', 'k'], {}), "('r\\\\d{2}', k)\n", (19498, 19512), False, 'import re\n'), ((25265, 25289), 're.split', 're.split', (['"""\\\\s{3}"""', 'line'], {}), "('\\\\s{3}', line)\n", (25273, 25289), False, 'import re\n'), ((25381, 25405), 're.split', 're.split', (['"""\\\\s{3}"""', 'line'], {}), "('\\\\s{3}', line)\n", (25389, 25405), False, 'import re\n'), ((25686, 25711), 're.match', 're.match', (['col_regex', 'line'], {}), '(col_regex, line)\n', (25694, 25711), False, 'import re\n'), ((29594, 29617), 're.compile', 're.compile', (['"""Flux, e- """'], {}), "('Flux, e- ')\n", (29604, 29617), False, 'import re\n'), ((29761, 29784), 're.compile', 're.compile', (['"""Flux, e- """'], {}), "('Flux, e- ')\n", (29771, 29784), False, 'import re\n'), ((30010, 30039), 're.compile', 're.compile', (['"""Electron eflux """'], {}), "('Electron eflux ')\n", (30020, 30039), False, 'import re\n'), ((30190, 30219), 're.compile', 're.compile', (['"""Electron eflux """'], {}), "('Electron eflux ')\n", (30200, 30219), False, 'import re\n'), ((62612, 62669), 'collections.OrderedDict', 'collections.OrderedDict', (['((x, []) for x in radiance_labels)'], {}), '((x, []) for x in radiance_labels)\n', (62635, 62669), False, 'import collections\n'), ((25792, 25823), 're.findall', 're.findall', (['"""(.{16})"""', 'line[3:]'], {}), "('(.{16})', line[3:])\n", (25802, 25823), False, 'import re\n')] |
import numpy as np
class StoneAgeGame(object):
def __init__(self, policy, player_types):
# TODO: Keep food limit in mind for the GUI
# TODO: Clean this in initialization
self.placements_max = {'Farm': 1, 'Mating': 2, 'Wood': 7, 'Food': 20}
self.spots_to_index = {'Farm': 0, 'Mating': 1, 'Wood': 2, 'Food': 3}
self.spots = [[0] * x for x in self.placements_max.values()]
# self.meeple_player_sources = {1: 'red_meeple.png', 2: 'blue_meeple.png'}
self.base_players = [1, 2]
self.players = self.base_players.copy()
self.player_types = player_types
self.phase = 1
self.round = 1
self.current_player = np.random.choice(self.players)
# print('Starting player: ', self.current_player)
self.farms = [0, 0]
self.meeples = [5, 5]
self.food = [12, 12]
self.points = [0, 0]
self.placements = 5
self.actions = 0
self.states_list, self.action_list, self.reward_list = [], [], []
self.policy = policy
def track(self, state, choice, reward):
"""
This functions tracks the S, A and R for the game.
:param state: State before evolution
:param choice: Choice made with the policy, state and possible actions
:param reward: Reward gained from making choice in state
:return:
"""
# TODO: Only track the points of one player
# TODO: Fix tracking on phase switches
self.states_list.append(state)
self.action_list.append(choice)
self.reward_list.append(reward)
def end_of_game(self):
return self.round > 10
def play(self, **kwargs):
"""
This functions needs to work when this object is cast to the StoneAgeGUI-Class and allow for both player and AI
:param self:
:return:
"""
state = self.get_state()
if 'Choice' in kwargs:
choice = kwargs['Choice']
else: # TODO: Might not work when AI has its turn and player presses
possible_actions = self.check_possible_actions(state)
choice = self.policy.take_choice(state, possible_actions)
# print('CHOICE', choice)
new_state, reward, done = self.step(choice)
if done:
# print('GAME OVER')
highest_player = np.argmax(self.points)
if self.points[0] > self.points[1]:
reward += 100
elif self.points[0] < self.points[1]:
reward -= 100
else:
reward += 0
# print(highest_player, ' HAS WON!!!')
# print('NEW_STATE', new_state)
self.track(state, choice, reward)
# while not self.end_of_game(): # TODO: Used to be while for training
def step(self, action):
reward = 0
# if self.current_player == 1:
if self.phase == 1: # Evolve state P1
self.place_meeple(action)
elif self.phase == 2: # Evolve state P2
self.take_action(action)
# reward += self.take_action(action)
# print('Reward after own', reward)
# TODO: Do something about commenting/uncommenting below when playing/training
"""
while self.current_player == 2:
# Opponent using same policy
state = self.get_state()
possible_actions = self.check_possible_actions(state)
action = self.policy.take_choice(state, possible_actions)
# print('Chosen Action opponent by policy', action)
if self.phase == 1: # Evolve state P1
self.place_meeple(action)
reward -= 0
elif self.phase == 2: # Evolve state P2
opponent_reward = self.take_action(action)
# print('Current player in current player == 2 and self.phase == 2', self.current_player)
# print('Opponent reward', opponent_reward)
reward += -1*opponent_reward
# print('Reward before return', reward)
"""
return self.get_state(), reward, self.end_of_game()
def get_state(self):
"""
Make this into dict otherwise impossible to find
:return: State of the game
"""
# Future: Use game state to make GUI
board_states = []
if self.current_player == 1:
self_ind = 0
opp_ind = 1
for player in [1, 2]:
board_states.extend([self.spots[choice].count(player) for choice in range(0, 4)])
elif self.current_player == 2:
self_ind = 1
opp_ind = 0
for player in [2, 1]:
board_states.extend([self.spots[choice].count(player) for choice in range(0, 4)])
# 'Round': self.round,
# 'Player': self.current_player,
# 'Phase': self.phase
state = {'Placements': self.placements, 'Actions': self.actions,
'SFood': self.food_state_encoding(self_ind), 'OFood': self.food_state_encoding(opp_ind),
'SelfAgri': self.farms[self_ind], 'OppAgri': self.farms[opp_ind],
'SelfWorkers': self.meeples[self_ind], 'OppWorkers': self.meeples[opp_ind],
'SelfFarm': board_states[0], 'OppFarm': board_states[4],
'SelfHut': board_states[1], 'OppHut': board_states[5],
'SelfChop': board_states[2], 'OppChop': board_states[6],
'SelfHunt': board_states[3], 'OppHunt': board_states[7]}
return state
def food_state_encoding(self, ind):
food = self.food[ind]
if food < 4:
food_code = 0
elif food < 8:
food_code = 1
elif food < 12:
food_code = 2
else:
food_code = 3
return food_code
def check_possible_actions(self, state):
"""
Notes for this important function:
- Implement the 'no-meeples-left' by simply auto-evolving upon reaching min placements or 'actions' (lifts?/takes?)
If len(possible_states) == 0 -> evolve?
"""
if self.phase == 1:
actions = [0, 1, 2, 3]
# Function for checking the patch is not 'full'
if (state['SelfFarm'] + state['OppFarm']) == 1:
actions.remove(0)
if (state['SelfHut'] + state['OppHut']) == 2:
actions.remove(1)
if (state['SelfChop'] + state['OppChop']) == 7:
actions.remove(2)
# No restriction for food
# TODO: Might want to let the game learn that there are no benefits here beyond 10.
# Below try/except is because above might've already removed
# TODO: Does this work for opposite now as well?
if state['SelfFarm'] == 10:
try:
actions.remove(0)
except ValueError:
pass
if state['SelfWorkers'] == 10:
try:
actions.remove(1)
except ValueError:
pass
elif self.phase == 2: # Make sure actions without meeples are not being picked
# Function for checking where your meeples are, using spots!
placement_counts = [self.spots[choice].count(self.current_player) for choice in range(0, 4)]
actions = [action for action, placement_count in enumerate(placement_counts) if placement_count != 0]
else:
raise Exception
return actions
def place_meeple(self, choice):
if self.placements > 0:
sub_spots = self.spots[choice] # List of fills with that choice
if 0 in sub_spots:
self.placements -= 1 # If meeple placeable -> 1 less placements
first_zero_index = [i for i, x in enumerate(self.spots[choice]) if x == 0][0]
self.spots[choice][first_zero_index] = self.current_player
else:
pass
if self.placements == 0:
self.evolve_phase()
def take_action(self, choice):
"""
Functions which counts the number of actions left for active player.
Then counts the number of placements on different spots for the active player.
If the actions is bigger than zero, these choices are put into actions, evolving game state.
If actions are zero -> next phase
:param choice: Chosen choice by policy
"""
if self.actions > 0:
placements = self.spots[choice].count(self.current_player) # Number of meeples on that choice
if placements > 0:
# self.reset_info()
pass
reward = self.evolve_state(choice, placements)
if self.actions == 0:
reward = self.evolve_phase()
return reward
def evolve_state(self, choice, placements):
"""
Function which maps choice taken to evolutions in game state
:param choice: Chosen choice by policy
:param placements: Number of placements, since some choice are number of places dependent
"""
if placements > 0:
if choice == 0:
farm_var = self.farms[self.current_player - 1]
if farm_var < 10:
self.farms[self.current_player - 1] += 1
else:
pass
elif choice == 1:
if placements == 2:
meeple_var = self.meeples[self.current_player - 1]
if meeple_var < 10:
self.meeples[self.current_player - 1] += 1
else:
pass
elif choice == 2:
self.points[self.current_player - 1] += placements
if self.current_player == 1:
self.clean_action_source_spots(choice)
return placements
elif choice == 3:
self.food[self.current_player - 1] += placements
self.clean_action_source_spots(choice)
return 0
else:
# self.info = "Can't take that action"
pass
def clean_action_source_spots(self, choice):
self.actions -= 1
self.spots[choice] = [0 if x == self.current_player else x for x in self.spots[choice]]
def feed(self):
self.food = [self.food[i] - self.meeples[i] + self.farms[i] for i, x in enumerate(self.food)]
self.points = [self.points[i] - 10 if self.food[i] < 0 else self.points[i] for i, x in enumerate(self.food)]
if self.food[0] < 0:
reward = -10
else:
reward = 0
if self.food[0] < 0:
self.food[0] = 0
if self.food[1] < 0:
self.food[1] = 0
return reward
def evolve_phase(self):
reward = 0
self.players.remove(self.current_player)
if len(self.players) > 0:
self.current_player = self.players[0]
if self.phase == 1:
self.placements = self.meeples[self.current_player - 1]
elif self.phase == 2:
self.actions = sum(self.current_player in sub for sub in self.spots)
else:
self.phase += 1
self.players = self.base_players.copy()
self.current_player = self.players[0]
self.actions = sum(self.current_player in sub for sub in self.spots)
if self.phase == 3:
reward = self.feed()
self.phase = 1
self.base_players = self.base_players[1:] + self.base_players[:1]
self.round += 1
self.players = self.base_players.copy()
self.current_player = self.players[0]
self.placements = self.meeples[self.current_player - 1]
return reward
| [
"numpy.argmax",
"numpy.random.choice"
] | [((718, 748), 'numpy.random.choice', 'np.random.choice', (['self.players'], {}), '(self.players)\n', (734, 748), True, 'import numpy as np\n'), ((2428, 2450), 'numpy.argmax', 'np.argmax', (['self.points'], {}), '(self.points)\n', (2437, 2450), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
class ORACCDataset(torch.utils.data.Dataset):
def __init__(self, file_path, tokenizer, block_size, missing_sign_encoding: int,
encode_only_first_token_in_word: bool = False,
ignore_missing=True):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
with open(file_path, 'r', encoding="utf-8") as f:
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
self.batch_encodings = tokenizer(lines, add_special_tokens=True, padding=True, truncation=True, max_length=block_size)
self.labels = get_enc_labels(
encodings=self.batch_encodings,
missing_sign_encoding=missing_sign_encoding,
encode_only_first_token_in_word=encode_only_first_token_in_word,
ignore_missing=ignore_missing
)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.batch_encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
def get_enc_labels(encodings, missing_sign_encoding: int, encode_only_first_token_in_word: bool = False,
ignore_missing=True):
encoded_labels = []
for labels in encodings.encodings:
# create an empty array of -100 (-100 is ignored during training)
enc_labels = np.ones(len(labels.offsets), dtype=int) * -100
arr_offset, arr_labels = np.array(labels.offsets), np.array(labels.ids)
offset_indices = np.ones(len(labels.offsets), dtype=bool)
if ignore_missing:
offset_indices &= arr_labels != missing_sign_encoding
if encode_only_first_token_in_word:
# set labels whose first offset position is 0 and the second is not 0
offset_indices &= (arr_offset[:, 0] == 0) & (arr_offset[:, 1] != 0)
enc_labels[offset_indices] = arr_labels[offset_indices]
encoded_labels.append(enc_labels.tolist())
return encoded_labels
| [
"os.path.isfile",
"numpy.array",
"torch.tensor"
] | [((294, 319), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (308, 319), False, 'import os\n'), ((1068, 1098), 'torch.tensor', 'torch.tensor', (['self.labels[idx]'], {}), '(self.labels[idx])\n', (1080, 1098), False, 'import torch\n'), ((974, 996), 'torch.tensor', 'torch.tensor', (['val[idx]'], {}), '(val[idx])\n', (986, 996), False, 'import torch\n'), ((1562, 1586), 'numpy.array', 'np.array', (['labels.offsets'], {}), '(labels.offsets)\n', (1570, 1586), True, 'import numpy as np\n'), ((1588, 1608), 'numpy.array', 'np.array', (['labels.ids'], {}), '(labels.ids)\n', (1596, 1608), True, 'import numpy as np\n')] |
import argparse
from datetime import datetime
import pandas as pd
import re
import os
from tabulate import tabulate
from ast import literal_eval
import numpy as np
def init_data(
) -> pd.DataFrame:
"""
Return
-------
Plan: pd.DataFrame. Item of the planner
Notes
-------
Reads the plan from the file in "pwd/../data/data.csv" and initialise it
into the plan pandas DataFrame. If either "data" folder or "data.csv" or both
of them do not exist, it creates this file.
"""
# Features of the plan
features = ["title", "note", "date", "tags"]
# Initialise the plan as dataframe object
plan = pd.DataFrame(columns=features)
# finding the current directory
loc_dir = os.path.abspath(os.getcwd())
# moving to the parent folder and into "data" folder
dir_path = os.path.abspath(os.path.join(loc_dir, "..", "data"))
# path to "data.csv" file
data_path = os.path.abspath(os.path.join(dir_path, "data.csv"))
# If the folder does not exist yet
if not os.path.exists(dir_path):
os.mkdir(dir_path)
plan.to_csv(data_path, index=False)
# If the folder already exists
else:
# If "data.csv" does not exist yet
if not os.path.exists(data_path):
plan.to_csv(data_path, index=False)
# If "data.csv" already exists
else:
plan = pd.read_csv(data_path, index_col=False)
# returning plan
return plan
def update_data(
plan: pd.DataFrame
):
"""
Parameters
----------
plan: pd.DataFrame. Contains all the notes
Notes
----
This function takes in input the updated version of plan and
overwrite the existing local copy in "Data/data.csv"
"""
# finding the current directory
loc_dir = os.path.abspath(os.getcwd())
# moving to the parent directory and into "data" folder
dir_path = os.path.abspath(os.path.join(loc_dir, "..", "data"))
# path to the "data.csv" file
data_path = os.path.abspath(os.path.join(dir_path, "data.csv"))
# Sorting the dictionary by date
plan["date"] = pd.to_datetime(plan["date"], format = '%Y-%m-%d', errors='coerce')
plan["date"] = plan["date"].dt.date
plan = plan.sort_values(by="date")
# overwriting data
plan.to_csv(data_path, index=False)
pass
def add_note(
args: argparse.Namespace, # parser arguments
plan: pd.DataFrame # DataFrame to be updated
):
"""
Parameters
----------
args: argparse.Namespace. Contains the arguments "title", "note" and "date"
plan: pd.DataFrame. Contains all the notes
Returns
-------
update_plan: pd.DataFrame with the added note
Notes
-----
This function adds a new note to the existing planner.
Warnings
--------
This function must be updated everytime the columns of the plan are changed
"""
item = {}
for name in plan.columns:
if str(name) != "tags":
item[str(name)] = vars(args)[str(name)]
# these three lines insert a list in the pd.DataFrame. ATTENTION: it is stored as a string
# they are needed because pd.DataFrame can't be initialized with nested data
item["tags"] = "..."
data = pd.DataFrame(item, index=[0])
data.at[0, "tags"] = vars(args)[str("tags")] # use literal_eval('[1.23, 2.34]') to read this data
plan = plan.append(data)
update_data(plan)
def add_note_verbose(
plan: pd.DataFrame # DataFrame to be updated
):
"""
Parameters
----------
plan: pd.DataFrame
Returns
-------
plan: pd.DataFrame with the added note
Notes
-----
This function adds a new note to the existing planner.
It uses an input/output interface; this is more convenient to use with larger notes or notes with tags.
Warnings
--------
This function must be updated everytime the columns of the plan are changed
"""
item = {} # initializing the new note
# title
title = input("Please, insert the title: ")
item["title"] = title
# body
note = input("It's time to write your note: ")
item["note"] = note
# date
date = input("Insert the date 'Y-m-d'. Press Enter to use the current date: ")
if date == '': # insert the current data if requested
date = datetime.today().strftime('%Y-%m-%d')
item["date"] = date
# tags
tags = input("Insert the tags (separated by a space or a comma): ")
# these three lines insert a list in the pd.DataFrame. ATTENTION: it is stored as a string.
# they are needed because pd.DataFrame can't be initialized with nested data
item["tags"] = "..."
data_bug = pd.DataFrame(item, index=[0])
data_bug.at[0, "tags"] = re.sub(r"[^\w]", " ", tags).split() # use literal_eval(list in format <str>) to read this
# updating the plan
plan = plan.append(data_bug)
update_data(plan)
def print_planner(
plan: pd.DataFrame
):
"""
Parameters
----------
plan: pd.DataFrame. Contains all the notes
Notes
----
The function prints in the terminal all the notes in a table
"""
# Extracting the actual indexes of the pandas (it may have been
# truncated in search_word)
idx_plan = plan.index.to_list()
# convert the tags in a readable format, it accepts both list and string convertible in list
for i, tag in enumerate(plan["tags"]):
if type(tag) is str:
plan.at[idx_plan[i], "tags"] = ', '.join(literal_eval(plan.at[idx_plan[i], 'tags']))
elif type(tag) is list:
plan.at[idx_plan[i], "tags"] = ', '.join(tag)
# table creation
plan_tab = lambda plan: tabulate(plan,
headers=[str(plan.columns[0]), str(plan.columns[1]),
str(plan.columns[2]), str(plan.columns[3])
],
tablefmt="fancy_grid",
showindex=False)
# printing the plan in the table
print(plan_tab(plan))
def search_word(
args: argparse.Namespace, # parser arguments
plan: pd.DataFrame # DataFrame where search -word-
) -> pd.DataFrame: # DataFrame with only the notes with -word-
# rewriting 'word' without capital letter
word = vars(args)["word"].lower()
# row indexes whose title contains word
rows_idx = []
# Converting plan to a list of lists to fasten the iteration through it
list_plan = plan.values
# Iterating through arr_plan
for idx, row in enumerate(list_plan):
# rewriting title and note without capital letters
title = str(row[0]).lower()
note = str(row[1]).lower()
# checking if 'word' is either in title or note or both
if word in title or word in note:
rows_idx.append(idx)
# selecting only those rows whose titles or notes contain 'word'
selected_plan = plan.iloc[rows_idx, :]
return selected_plan
def search_by_tag(
args: argparse.Namespace, # parser arguments
plan: pd.DataFrame # DataFrame where search the tags
) -> pd.DataFrame: # DataFrame with only the notes with the correct tags
"""
Parameters
----------
plan: pd.DataFrame. Contains all the notes
args: parse.parse_args(). Contains the tags to be searched and/or rejected, tags must be only in a string,
both commas or spaces can be used as separators
Returns
-------
selected_plan : pd.DataFrame. Contains all the notes with the searched tags
Notes
----
The function return a pd.DataFrame with all the notes that contains the -tags- and/or do not contain the -notags-
"""
# creating two list with the -tags- and -notags-
tag_to_search = args.tags
no_tags = args.notag
# convert from str format to list format, it accepts both commas or spaces as separators
tag_to_search = tag_to_search.split(sep=",") if "," in tag_to_search else tag_to_search.split()
no_tags = no_tags.split(sep=",") if "," in no_tags else no_tags.split()
# row indexes to be used for the returned DataFrame
rows_idx = []
# Converting plan["tags"] to a list of lists to fasten the iteration through it
list_plan = plan["tags"].values
# tags searching and notags rejecting
for idx, tags in enumerate(list_plan):
list_plan[idx] = literal_eval(tags)
if \
all(item in list_plan[idx] for item in tag_to_search) and \
list(np.intersect1d(no_tags, list_plan[idx])) == []:
rows_idx.append(idx)
# creating the plan to be returned
selected_plan = plan.iloc[rows_idx, :]
return selected_plan
| [
"pandas.DataFrame",
"os.mkdir",
"datetime.datetime.today",
"os.getcwd",
"pandas.read_csv",
"os.path.exists",
"pandas.to_datetime",
"ast.literal_eval",
"numpy.intersect1d",
"os.path.join",
"re.sub"
] | [((648, 678), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'features'}), '(columns=features)\n', (660, 678), True, 'import pandas as pd\n'), ((2111, 2175), 'pandas.to_datetime', 'pd.to_datetime', (["plan['date']"], {'format': '"""%Y-%m-%d"""', 'errors': '"""coerce"""'}), "(plan['date'], format='%Y-%m-%d', errors='coerce')\n", (2125, 2175), True, 'import pandas as pd\n'), ((3236, 3265), 'pandas.DataFrame', 'pd.DataFrame', (['item'], {'index': '[0]'}), '(item, index=[0])\n', (3248, 3265), True, 'import pandas as pd\n'), ((4686, 4715), 'pandas.DataFrame', 'pd.DataFrame', (['item'], {'index': '[0]'}), '(item, index=[0])\n', (4698, 4715), True, 'import pandas as pd\n'), ((746, 757), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (755, 757), False, 'import os\n'), ((848, 883), 'os.path.join', 'os.path.join', (['loc_dir', '""".."""', '"""data"""'], {}), "(loc_dir, '..', 'data')\n", (860, 883), False, 'import os\n'), ((948, 982), 'os.path.join', 'os.path.join', (['dir_path', '"""data.csv"""'], {}), "(dir_path, 'data.csv')\n", (960, 982), False, 'import os\n'), ((1035, 1059), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (1049, 1059), False, 'import os\n'), ((1069, 1087), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (1077, 1087), False, 'import os\n'), ((1809, 1820), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1818, 1820), False, 'import os\n'), ((1914, 1949), 'os.path.join', 'os.path.join', (['loc_dir', '""".."""', '"""data"""'], {}), "(loc_dir, '..', 'data')\n", (1926, 1949), False, 'import os\n'), ((2018, 2052), 'os.path.join', 'os.path.join', (['dir_path', '"""data.csv"""'], {}), "(dir_path, 'data.csv')\n", (2030, 2052), False, 'import os\n'), ((8465, 8483), 'ast.literal_eval', 'literal_eval', (['tags'], {}), '(tags)\n', (8477, 8483), False, 'from ast import literal_eval\n'), ((1235, 1260), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (1249, 1260), False, 'import os\n'), ((1382, 1421), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'index_col': '(False)'}), '(data_path, index_col=False)\n', (1393, 1421), True, 'import pandas as pd\n'), ((4745, 4772), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'tags'], {}), "('[^\\\\w]', ' ', tags)\n", (4751, 4772), False, 'import re\n'), ((4321, 4337), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (4335, 4337), False, 'from datetime import datetime\n'), ((5505, 5547), 'ast.literal_eval', 'literal_eval', (["plan.at[idx_plan[i], 'tags']"], {}), "(plan.at[idx_plan[i], 'tags'])\n", (5517, 5547), False, 'from ast import literal_eval\n'), ((8594, 8633), 'numpy.intersect1d', 'np.intersect1d', (['no_tags', 'list_plan[idx]'], {}), '(no_tags, list_plan[idx])\n', (8608, 8633), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from time import time
from tensorflow.contrib.distributions import Normal
from stein.samplers import SteinSampler
from stein.optimizers import AdamGradientDescent
# Import data.
data_X = np.loadtxt("./data/data_X.csv", delimiter=",")
if len(data_X.shape) == 1:
data_X = np.atleast_2d(data_X).T
data_w = np.atleast_2d(np.loadtxt("./data/data_w.csv", delimiter=",")).T
data_y = np.atleast_2d(np.loadtxt("./data/data_y.csv", delimiter=",")).T
n_samples, n_feats = data_X.shape
with tf.variable_scope("model"):
# Placeholders for features and targets.
model_X = tf.placeholder(tf.float32, shape=[None, n_feats])
model_y = tf.placeholder(tf.float32, shape=[None, 1])
model_w = tf.Variable(tf.zeros([n_feats, 1]))
# Compute prior.
with tf.variable_scope("priors"):
w_prior = Normal(tf.zeros([n_feats, 1]), 1.)
# Compute likelihood function.
with tf.variable_scope("likelihood"):
y_hat = tf.matmul(model_X, model_w)
log_l = -0.5 * tf.reduce_sum(tf.square(y_hat - model_y))
# Compute the log-posterior of the model.
log_p = log_l + tf.reduce_sum(w_prior.log_prob(model_w))
# Record time elapsed.
start_time = time()
# Number of learning iterations.
n_iters = 500
# Sample from the posterior using Stein variational gradient descent.
n_particles = 50
gd = AdamGradientDescent(learning_rate=1e-1)
sampler = SteinSampler(n_particles, log_p, gd)
# Perform learning iterations.
for i in range(n_iters):
start_iter = time()
sampler.train_on_batch({model_X: data_X, model_y: data_y})
end_iter = time()
print("Iteration {}. Time to complete iteration: {:.4f}".format(
i, end_iter - start_iter
))
# Show diagnostics.
est = np.array(list(sampler.theta.values()))[0].mean(axis=0).ravel()
print("True coefficients: {}".format(data_w.ravel()))
print("Est. coefficients: {}".format(est))
print("Time elapsed: {}".format(time() - start_time))
# Visualize.
if n_feats == 1:
r = np.atleast_2d(np.linspace(-4., 4., num=100)).T
Y = r.dot(sampler.theta[model_w][:, :, 0].T).T
plt.figure(figsize=(8, 6))
plt.plot(data_X.ravel(), data_y.ravel(), "r.", alpha=0.3)
for i in range(n_particles):
plt.plot(r.ravel(), Y[i], "b-", alpha=0.1)
plt.grid()
plt.xlim((-4., 4.))
plt.show()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"stein.samplers.SteinSampler",
"tensorflow.variable_scope",
"stein.optimizers.AdamGradientDescent",
"time.time",
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"tensorflow.zeros",
"tensorflow.matmul",
"numpy.loadtxt",
"numpy.linspace",... | [((264, 310), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data_X.csv"""'], {'delimiter': '""","""'}), "('./data/data_X.csv', delimiter=',')\n", (274, 310), True, 'import numpy as np\n'), ((1248, 1254), 'time.time', 'time', ([], {}), '()\n', (1252, 1254), False, 'from time import time\n'), ((1394, 1432), 'stein.optimizers.AdamGradientDescent', 'AdamGradientDescent', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1413, 1432), False, 'from stein.optimizers import AdamGradientDescent\n'), ((1444, 1480), 'stein.samplers.SteinSampler', 'SteinSampler', (['n_particles', 'log_p', 'gd'], {}), '(n_particles, log_p, gd)\n', (1456, 1480), False, 'from stein.samplers import SteinSampler\n'), ((561, 587), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {}), "('model')\n", (578, 587), True, 'import tensorflow as tf\n'), ((648, 697), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, n_feats]'}), '(tf.float32, shape=[None, n_feats])\n', (662, 697), True, 'import tensorflow as tf\n'), ((712, 755), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (726, 755), True, 'import tensorflow as tf\n'), ((1554, 1560), 'time.time', 'time', ([], {}), '()\n', (1558, 1560), False, 'from time import time\n'), ((1639, 1645), 'time.time', 'time', ([], {}), '()\n', (1643, 1645), False, 'from time import time\n'), ((2137, 2163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (2147, 2163), True, 'import matplotlib.pyplot as plt\n'), ((2314, 2324), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2322, 2324), True, 'import matplotlib.pyplot as plt\n'), ((2329, 2350), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-4.0, 4.0)'], {}), '((-4.0, 4.0))\n', (2337, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2353, 2363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2361, 2363), True, 'import matplotlib.pyplot as plt\n'), ((351, 372), 'numpy.atleast_2d', 'np.atleast_2d', (['data_X'], {}), '(data_X)\n', (364, 372), True, 'import numpy as np\n'), ((398, 444), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data_w.csv"""'], {'delimiter': '""","""'}), "('./data/data_w.csv', delimiter=',')\n", (408, 444), True, 'import numpy as np\n'), ((471, 517), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data_y.csv"""'], {'delimiter': '""","""'}), "('./data/data_y.csv', delimiter=',')\n", (481, 517), True, 'import numpy as np\n'), ((782, 804), 'tensorflow.zeros', 'tf.zeros', (['[n_feats, 1]'], {}), '([n_feats, 1])\n', (790, 804), True, 'import tensorflow as tf\n'), ((836, 863), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""priors"""'], {}), "('priors')\n", (853, 863), True, 'import tensorflow as tf\n'), ((962, 993), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""likelihood"""'], {}), "('likelihood')\n", (979, 993), True, 'import tensorflow as tf\n'), ((1011, 1038), 'tensorflow.matmul', 'tf.matmul', (['model_X', 'model_w'], {}), '(model_X, model_w)\n', (1020, 1038), True, 'import tensorflow as tf\n'), ((890, 912), 'tensorflow.zeros', 'tf.zeros', (['[n_feats, 1]'], {}), '([n_feats, 1])\n', (898, 912), True, 'import tensorflow as tf\n'), ((1974, 1980), 'time.time', 'time', ([], {}), '()\n', (1978, 1980), False, 'from time import time\n'), ((2049, 2080), 'numpy.linspace', 'np.linspace', (['(-4.0)', '(4.0)'], {'num': '(100)'}), '(-4.0, 4.0, num=100)\n', (2060, 2080), True, 'import numpy as np\n'), ((1076, 1102), 'tensorflow.square', 'tf.square', (['(y_hat - model_y)'], {}), '(y_hat - model_y)\n', (1085, 1102), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# Imports
# Standard lib
import unittest
import pathlib
# 3rd party
import numpy as np
from PIL import Image
# Our own imports
from deep_hipsc_tracking.model import preproc
from deep_hipsc_tracking.model._preproc import composite_mask
from .. import helpers
# Helper Classes
class FakeDetector(object):
""" Mock a classic one output detector """
def __init__(self, predict=None):
if predict is None:
self.predict = self.predict_ones
else:
self.predict = predict
def predict_ones(self, batch_slab, batch_size):
return np.ones((batch_size, 1), dtype=np.float32)
class FakeConvDetector(object):
""" Mock a convolutional detector """
def __init__(self, in_shape, out_shape):
self.in_shape = in_shape
self.out_shape = out_shape
self.start_x = (in_shape[0] - out_shape[0])//2
self.start_y = (in_shape[1] - out_shape[1])//2
self.end_x = self.start_x + out_shape[0]
self.end_y = self.start_y + out_shape[1]
def predict(self, batch_slab):
assert batch_slab.ndim == 4
assert batch_slab.shape[1:3] == self.in_shape
return batch_slab[:, self.start_x:self.end_x, self.start_y:self.end_y, 0:1]
# Tests
class TestPredictWithSteps(unittest.TestCase):
def test_predicts_same_size_input_output(self):
img = np.random.ranf((256, 256))
detector = FakeConvDetector((256, 256), (256, 256))
res = preproc.predict_with_steps(img, detector, (256, 256), (256, 256))
self.assertEqual(res.shape, (256, 256))
np.testing.assert_almost_equal(res, img)
def test_predicts_one_off_input_output(self):
img = np.random.ranf((257, 257))
detector = FakeConvDetector((256, 256), (256, 256))
res = preproc.predict_with_steps(img, detector, (256, 256), (256, 256))
self.assertEqual(res.shape, (257, 257))
np.testing.assert_almost_equal(res, img)
def test_predicts_input_output_all_different(self):
img = np.random.ranf((257, 257))
detector = FakeConvDetector((256, 256), (225, 225))
res = preproc.predict_with_steps(img, detector, (256, 256), (225, 225))
self.assertEqual(res.shape, (257, 257))
np.testing.assert_almost_equal(res, img)
def test_predicts_input_output_countception_shape(self):
img = np.random.ranf((260, 347))
detector = FakeConvDetector((256, 256), (225, 225))
res = preproc.predict_with_steps(img, detector, (256, 256), (225, 225))
self.assertEqual(res.shape, (260, 347))
np.testing.assert_almost_equal(res, img)
def test_predicts_input_output_unet_shape(self):
img = np.random.ranf((260, 347))
detector = FakeConvDetector((256, 256), (68, 68))
res = preproc.predict_with_steps(img, detector, (256, 256), (68, 68))
self.assertEqual(res.shape, (260, 347))
np.testing.assert_almost_equal(res, img)
def test_predicts_input_output_with_small_overlap(self):
img = np.random.ranf((260, 347))
detector = FakeConvDetector((256, 256), (68, 68))
res = preproc.predict_with_steps(img, detector, (256, 256), (68, 68), overlap=1)
self.assertEqual(res.shape, (260, 347))
np.testing.assert_almost_equal(res, img)
def test_predicts_input_output_with_large_overlap(self):
img = np.random.ranf((260, 347))
detector = FakeConvDetector((256, 256), (68, 68))
res = preproc.predict_with_steps(img, detector, (256, 256), (68, 68), overlap=(10, 8))
self.assertEqual(res.shape, (260, 347))
np.testing.assert_almost_equal(res, img)
class TestCalculatePeakImage(unittest.TestCase):
def test_peaks_with_single_dot_equal_padding(self):
target_img = np.zeros((64, 64))
target_img[32, 32] = 1
x = np.arange(64) - 32
y = np.arange(64) - 32
xx, yy = np.meshgrid(x, y)
rr = np.sqrt(xx**2 + yy**2)
exp_img = (1 - rr/4)
exp_img[exp_img < 0] = 0
peak_img = preproc.calculate_peak_image(target_img,
img_rows=32, img_cols=32,
zero_padding=32,
peak_sharpness=8)
self.assertEqual(peak_img.shape, target_img.shape)
np.testing.assert_almost_equal(exp_img, peak_img)
class TestRandomSplit(unittest.TestCase):
def test_without_replacement(self):
ind = np.random.rand(16)
ind.sort()
samp, rem = preproc.random_split(ind, 8)
self.assertEqual(samp.shape, (8, ))
self.assertEqual(rem.shape, (8, ))
res = np.concatenate((samp, rem))
res.sort()
np.testing.assert_almost_equal(res, ind)
def test_without_replacement_too_many_samples(self):
ind = np.random.rand(16)
ind.sort()
samp, rem = preproc.random_split(ind, 20)
self.assertEqual(samp.shape, (16, ))
self.assertEqual(rem.shape, (0, ))
res = np.concatenate((samp, rem))
res.sort()
np.testing.assert_almost_equal(res, ind)
def test_with_replacement(self):
ind = np.random.rand(16)
ind.sort()
samp, rem = preproc.random_split(ind, 8, with_replacement=True)
self.assertEqual(samp.shape, (8, ))
self.assertEqual(rem.shape, (16, ))
np.testing.assert_almost_equal(ind, rem)
self.assertTrue(all([s in ind for s in samp]))
def test_with_replacement_too_many_samples(self):
ind = np.random.rand(16)
ind.sort()
samp, rem = preproc.random_split(ind, 20, with_replacement=True)
self.assertEqual(samp.shape, (20, ))
self.assertEqual(rem.shape, (16, ))
np.testing.assert_almost_equal(ind, rem)
self.assertTrue(all([s in ind for s in samp]))
class TestCompositeMask(unittest.TestCase):
def test_composite_one_sample_mean(self):
srows, scols = 16, 16
img = np.random.rand(16, 16, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
mode='mean')
exp = np.ones((16, 16))
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_mean(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
mode='mean')
exp = np.ones((32, 32))
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_mean_small_batches(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
batch_size=2,
mode='mean')
exp = np.ones((32, 32))
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
batch_size=3,
mode='mean')
exp = np.ones((32, 32))
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_mean_strided(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=5,
batch_size=2,
mode='mean')
exp = np.ones((32, 32))
exp[:, -1] = np.nan
exp[-1, :] = np.nan
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_mean_masked(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
mask = np.zeros((32, 32), dtype=np.bool)
mask[:4, :4] = 1
mask[-4:, -4:] = 1
detector = FakeDetector()
res = composite_mask(img, detector,
mask=mask,
srows=srows, scols=scols,
batch_stride=5,
batch_size=2,
mode='mean')
exp = np.ones((32, 32))
exp[:, -1] = np.nan
exp[-1, :] = np.nan
exp[~mask] = np.nan
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_one_field_peaks(self):
srows, scols = 16, 16
img = np.random.rand(16, 16, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
mode='peak')
exp = np.full((16, 16), np.nan)
exp[8, 8] = 1
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_peaks(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
mode='peaks')
exp = np.full((32, 32), np.nan)
exp[8:25, 8:25] = 1
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_peaks_rotations(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
mode='peaks',
transforms='rotations')
exp0 = np.full((32, 32), np.nan)
exp0[8:25, 8:25] = 1
exp1 = np.full((32, 32), np.nan)
exp1[8:25, 7:24] = 1
exp2 = np.full((32, 32), np.nan)
exp2[7:24, 7:24] = 1
exp3 = np.full((32, 32), np.nan)
exp3[7:24, 8:25] = 1
exp = [exp0, exp1, exp2, exp3]
self.assertEqual(len(res), len(exp))
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
def test_composite_full_field_peaks_small_batches(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
batch_size=2,
mode='peaks')
exp = np.full((32, 32), np.nan)
exp[8:25, 8:25] = 1
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=1,
batch_size=3,
mode='peaks')
exp = np.full((32, 32), np.nan)
exp[8:25, 8:25] = 1
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
def test_composite_full_field_peaks_strided(self):
srows, scols = 16, 16
img = np.random.rand(32, 32, 3)
detector = FakeDetector()
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=5,
batch_size=2,
mode='peaks')
exp = np.full((32, 32), np.nan)
exp[6:26, 6:26] = 1
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
res = composite_mask(img, detector,
srows=srows, scols=scols,
batch_stride=5,
batch_size=3,
mode='peaks')
exp = np.full((32, 32), np.nan)
exp[6:26, 6:26] = 1
self.assertEqual(len(res), 1)
np.testing.assert_almost_equal(res[0], exp)
class TestCompleteSampler(unittest.TestCase):
def test_samples_upper_corner(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.CompleteSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
sampler.current_index = 0
sampler.current_slice = 0
out_img = sampler.slice_next(1, img)
self.assertEqual(sampler.current_index, 0)
self.assertEqual(sampler.current_slice, 1)
self.assertEqual(out_img.shape, (1, 64, 96, 3))
np.testing.assert_almost_equal(out_img[0, ...], img[:64, :96, :])
out_img = sampler.slice_next(1, img)
self.assertEqual(sampler.current_index, 0)
self.assertEqual(sampler.current_slice, 2)
self.assertEqual(out_img.shape, (1, 64, 96, 3))
np.testing.assert_almost_equal(out_img[0, ...], img[:64, 1:97, :])
def test_samples_over_whole_image(self):
img = np.random.rand(100, 100, 3)
sampler = preproc.CompleteSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
sampler.current_index = 0
sampler.current_slice = 0
out_img = sampler.slice_next(185, img)
self.assertEqual(sampler.current_index, 1)
self.assertEqual(sampler.current_slice, 0)
self.assertEqual(out_img.shape, (185, 64, 96, 3))
for i in range(37):
for j in range(5):
idx = i * 5 + j
np.testing.assert_almost_equal(out_img[idx, ...],
img[i:i+64, j:j+96, :])
def test_samples_over_whole_image_color_to_gray(self):
img = np.random.rand(100, 100, 3)
img_gray = np.mean(img, axis=2)[..., np.newaxis]
sampler = preproc.CompleteSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 1),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
sampler.current_index = 0
sampler.current_slice = 0
out_img = sampler.slice_next(185, img)
self.assertEqual(sampler.current_index, 1)
self.assertEqual(sampler.current_slice, 0)
self.assertEqual(out_img.shape, (185, 64, 96, 1))
for i in range(37):
for j in range(5):
idx = i * 5 + j
np.testing.assert_almost_equal(out_img[idx, ...],
img_gray[i:i+64, j:j+96, :])
def test_samples_as_much_as_it_can(self):
img = np.random.rand(100, 100, 3)
sampler = preproc.CompleteSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
sampler.current_index = 0
sampler.current_slice = 0
out_img = sampler.slice_next(187, img)
self.assertEqual(sampler.current_index, 1)
self.assertEqual(sampler.current_slice, 0)
self.assertEqual(out_img.shape, (185, 64, 96, 3))
for i in range(37):
for j in range(5):
idx = i * 5 + j
np.testing.assert_almost_equal(out_img[idx, ...],
img[i:i+64, j:j+96, :])
def test_samples_as_much_as_it_can_with_an_offset(self):
img = np.random.rand(100, 100, 3)
sampler = preproc.CompleteSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
sampler.current_index = 0
sampler.current_slice = 5
out_img = sampler.slice_next(187, img)
self.assertEqual(sampler.current_index, 1)
self.assertEqual(sampler.current_slice, 0)
self.assertEqual(out_img.shape, (180, 64, 96, 3))
for i in range(37):
for j in range(5):
idx = i * 5 + j - 5
if idx < 0:
continue
np.testing.assert_almost_equal(out_img[idx, ...],
img[i:i+64, j:j+96, :])
def test_samples_multiple_whole_images(self):
img1 = np.random.rand(100, 100, 3)
img2 = np.random.rand(100, 100, 3)
sampler = preproc.CompleteSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
sampler.current_index = 0
sampler.current_slice = 0
out_img1, out_img2 = sampler.slice_next(185, img1, img2)
self.assertEqual(sampler.current_index, 1)
self.assertEqual(sampler.current_slice, 0)
self.assertEqual(out_img1.shape, (185, 64, 96, 3))
self.assertEqual(out_img2.shape, (185, 64, 96, 3))
for i in range(37):
for j in range(5):
idx = i * 5 + j
np.testing.assert_almost_equal(out_img1[idx, ...],
img1[i:i+64, j:j+96, :])
np.testing.assert_almost_equal(out_img2[idx, ...],
img2[i:i+64, j:j+96, :])
def test_resample_all_over_several_images(self):
img1 = np.random.rand(100, 100, 3)
img2 = np.random.rand(110, 110, 3)
class FakeCompleteSampler(preproc.CompleteSampler):
def load_file(self, filename):
if filename.name == '001.jpg':
return img1
elif filename.name == '003.jpg':
return img2
else:
return None
sampler = FakeCompleteSampler(files=['001.jpg', '002.jpg', '003.jpg'],
image_layout='tensorflow',
batch_size=1024,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_all(1024)
self.assertEqual(out_img.shape, (1024, 64, 96, 3))
self.assertEqual(sampler.current_slice, 134)
self.assertEqual(sampler.current_index, 0)
for i in range(37):
for j in range(5):
idx = i * 5 + j
np.testing.assert_almost_equal(out_img[idx, ...],
img1[i:i+64, j:j+96, :])
for i in range(47):
for j in range(15):
idx = i * 15 + j + 185
np.testing.assert_almost_equal(out_img[idx, ...],
img2[i:i+64, j:j+96, :])
for i in range(37):
for j in range(5):
idx = i * 5 + j + 890
if idx >= 1024:
break
np.testing.assert_almost_equal(out_img[idx, ...],
img1[i:i+64, j:j+96, :])
def test_resample_all_over_several_images_with_masks(self):
img1 = np.random.rand(100, 100, 3)
img2 = np.random.rand(110, 110, 3)
mask1 = np.random.rand(100, 100, 1)
mask2 = np.random.rand(110, 110, 1)
class FakeCompleteSampler(preproc.CompleteSampler):
def load_file(self, filename):
if filename.name == '001.jpg':
return img1
elif filename.name == '003.jpg':
return img2
else:
return None
def load_mask(self, filename, img):
if filename.name == '001.jpg':
return mask1
elif filename.name == '003.jpg':
return mask2
else:
return None
sampler = FakeCompleteSampler(files=['001.jpg', '002.jpg', '003.jpg'],
masks=['001.npz', '002.npz', '003.npz'],
image_layout='tensorflow',
batch_size=1024,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img, out_mask = sampler.resample_all(1024)
self.assertEqual(out_img.shape, (1024, 64, 96, 3))
self.assertEqual(out_mask.shape, (1024, 64, 96, 1))
self.assertEqual(sampler.current_slice, 134)
self.assertEqual(sampler.current_index, 0)
for i in range(37):
for j in range(5):
idx = i * 5 + j
np.testing.assert_almost_equal(out_img[idx, ...],
img1[i:i+64, j:j+96, :])
np.testing.assert_almost_equal(out_mask[idx, ...],
mask1[i:i+64, j:j+96, :])
for i in range(47):
for j in range(15):
idx = i * 15 + j + 185
np.testing.assert_almost_equal(out_img[idx, ...],
img2[i:i+64, j:j+96, :])
np.testing.assert_almost_equal(out_mask[idx, ...],
mask2[i:i+64, j:j+96, :])
for i in range(37):
for j in range(5):
idx = i * 5 + j + 890
if idx >= 1024:
break
np.testing.assert_almost_equal(out_img[idx, ...],
img1[i:i+64, j:j+96, :])
np.testing.assert_almost_equal(out_mask[idx, ...],
mask1[i:i+64, j:j+96, :])
class TestRandomSampler(unittest.TestCase):
def test_load_mask(self):
img = np.random.rand(300, 300, 3)
masks = {
'foo': [
[0.0, 0.0, 0.4, 0.5],
[0.9, 0.9, 1.0, 1.0],
],
}
sampler = preproc.RandomSampler(files=[],
masks=masks,
image_layout='theano',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_mask = sampler.load_mask(pathlib.Path('grr/foo.jpg'), img)
self.assertEqual(out_mask.shape, (300, 300, 1))
exp_mask = np.zeros((300, 300, 1), dtype=np.bool)
exp_mask[150:, :120, :] = True
exp_mask[:30, 270:, :] = True
np.testing.assert_almost_equal(exp_mask, out_mask)
def test_resample_image_theano(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='theano',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(img)
self.assertEqual(out_img.shape, (3, 96, 64))
def test_resample_image_tensorflow(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(img)
self.assertEqual(out_img.shape, (64, 96, 3))
def test_resample_image_tensorflow_color_to_gray(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 1),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(img)
self.assertEqual(out_img.shape, (64, 96, 1))
def test_can_resample_with_fixed_params_no_change(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(300, 300, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(
img, size=300, theta=0, shift=[0, 0],
flip_horizontal=False)
exp_img = img
np.testing.assert_almost_equal(exp_img, out_img, decimal=4)
def test_can_resample_with_fixed_params_zero_pad_no_change(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(300, 300, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
zero_padding=10,
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(
img, size=300, theta=0, shift=[10, 10],
flip_horizontal=False)
exp_img = img
np.testing.assert_almost_equal(exp_img, out_img, decimal=4)
def test_can_resample_with_fixed_params_shifts_flips(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(200, 200, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
flip_vertical=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(
img, size=200, theta=0, shift=[10, 10],
flip_horizontal=True, flip_vertical=True)
exp_img = img[10:-90, 10:-90, :]
exp_img = exp_img[::-1, ::-1, :]
np.testing.assert_almost_equal(exp_img, out_img, decimal=4)
def test_can_resample_with_fixed_params_only_resize(self):
img = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(64, 96, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img = sampler.resample_image(
img, size=300, theta=0, shift=[0, 0],
flip_horizontal=False)
exp_img = preproc.resample_in_box(
img, 300, np.eye(2), np.array([[150.0], [150.0]]),
input_shape=(64, 96, 3))
np.testing.assert_almost_equal(exp_img, out_img)
def test_can_resample_multiple_images_with_same_transform(self):
img1 = np.random.rand(300, 300, 3)
img2 = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(200, 200, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img1, out_img2 = sampler.resample_image(
img1, img2, size=200, theta=0, shift=[10, 10],
flip_horizontal=True)
exp_img1 = img1[10:-90, 10:-90, :]
exp_img1 = exp_img1[:, ::-1, :]
np.testing.assert_almost_equal(exp_img1, out_img1, decimal=4)
exp_img2 = img2[10:-90, 10:-90, :]
exp_img2 = exp_img2[:, ::-1, :]
np.testing.assert_almost_equal(exp_img2, out_img2, decimal=4)
def test_can_resample_multiple_images_with_same_transform_padding(self):
img1 = np.random.rand(300, 300, 3)
img2 = np.random.rand(300, 300, 3)
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(200, 200, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None,
zero_padding=5)
out_img1, out_img2 = sampler.resample_image(
img1, img2, size=200, theta=0, shift=[10, 10],
flip_horizontal=True)
exp_img1 = img1[5:205, 5:205, :]
exp_img1 = exp_img1[:, ::-1, :]
np.testing.assert_almost_equal(exp_img1, out_img1, decimal=4)
exp_img2 = img2[5:205, 5:205, :]
exp_img2 = exp_img2[:, ::-1, :]
np.testing.assert_almost_equal(exp_img2, out_img2, decimal=4)
def test_can_resample_multiple_images_random_transform(self):
img1 = np.random.rand(300, 300, 3)
img2 = img1.copy()
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(200, 200, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
flip_vertical=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img1, out_img2 = sampler.resample_image(
img1, img2)
np.testing.assert_almost_equal(out_img1, out_img2, decimal=4)
def test_can_resample_mask_with_image_same_transform(self):
img1 = np.random.rand(300, 300, 3)
img2 = np.zeros((300, 300), dtype=np.bool)
img2[10:-90, 10:-90] = False
sampler = preproc.RandomSampler(files=[],
image_layout='tensorflow',
batch_size=1,
input_shape=(200, 200, 3),
size_range=(128, 256),
rotation_range=(-10, 10),
flip_horizontal=True,
noise_type='none',
noise_fraction=0.1,
cache_size=None)
out_img1, out_img2 = sampler.resample_image(
img1, img2, size=200, theta=0, shift=[10, 10],
flip_horizontal=True)
exp_img1 = img1[10:-90, 10:-90, :]
exp_img1 = exp_img1[:, ::-1, :]
np.testing.assert_almost_equal(exp_img1, out_img1, decimal=4)
exp_img2 = img2[10:-90, 10:-90]
exp_img2 = exp_img2[:, ::-1]
np.testing.assert_almost_equal(exp_img2, out_img2, decimal=4)
self.assertTrue(np.all(exp_img2 == 0))
class TestResampleInBox(unittest.TestCase):
def test_resample_grayscale_2d(self):
img = np.random.random((512, 512, 3))
img = np.mean(img, axis=2)
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=256)
self.assertEqual(out_img.shape, (256, 256))
def test_resample_grayscale_3d(self):
img = np.random.random((512, 512, 3))
img = np.mean(img, axis=2)
img = img[:, :, np.newaxis]
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=256)
self.assertEqual(out_img.shape, (256, 256, 1))
def test_resample_grayscale_3d_colors(self):
img = np.random.random((512, 512, 3))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=256)
self.assertEqual(out_img.shape, (256, 256, 3))
def test_resample_grayscale_3d_colors_x_y_diff(self):
img = np.random.random((512, 512, 3))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=(256, 128))
self.assertEqual(out_img.shape, (256, 128, 3))
def test_resample_grayscale_2d_to_colors(self):
img = np.random.random((512, 512, ))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=(256, 128, 3))
self.assertEqual(out_img.shape, (256, 128, 3))
def test_resample_grayscale_3d_to_colors(self):
img = np.random.random((512, 512, 1))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=(256, 128, 3))
self.assertEqual(out_img.shape, (256, 128, 3))
def test_resample_colors_to_grayscale(self):
img = np.random.random((512, 512, 3))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
out_img = preproc.resample_in_box(
img, scale, rot, shift, input_shape=(256, 128, 1))
self.assertEqual(out_img.shape, (256, 128, 1))
def test_4d_input_shape_raises_errors(self):
img = np.random.random((512, 512, 3))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
with self.assertRaises(ValueError):
preproc.resample_in_box(
img, scale, rot, shift, input_shape=(256, 128, 1, 1))
def test_input_shape_with_crazy_dims_raises_errors(self):
img = np.random.random((512, 512, 3))
scale = 2
rot = np.array([
[1, 0],
[0, 1],
])
shift = np.array([
[1], [1]
])
with self.assertRaises(ValueError):
preproc.resample_in_box(
img, scale, rot, shift, input_shape=(256, 128, 2))
class TestImageResampler(helpers.FileSystemTestCase):
def fullpath(self, *args):
r = self.tempdir
for a in args:
r = r / a
return r
def make_image(self, *args, **kwargs):
image_path = self.fullpath(*args)
size = kwargs.pop('size', (512, 512, 3))
# Random noise image
img = np.random.random(size)
img = np.round(img * 255)
img[img < 0] = 0
img[img > 255] = 255
img = Image.fromarray(img.astype(np.uint8))
image_path.parent.mkdir(exist_ok=True, parents=True)
img.save(str(image_path))
return image_path
def make_mask(self, *args, **kwargs):
mask_path = self.fullpath(*args)
size = kwargs.pop('size', (512, 512))
# Random noise image
mask = np.random.random(size) > 0.5
mask_path.parent.mkdir(exist_ok=True, parents=True)
np.savez(str(mask_path), mask=mask, refined_mask=mask)
return mask_path
def make_resampler(self,
datadir=None,
data_finder=None,
mask_finder=None,
mask_type=None,
test_fraction=None,
validation_fraction=None,
**kwargs):
""" Make the ImageResampler object
:param Path datadir:
The data directory or self.tempdir
:param float validation_fraction:
How many images in the validation set (default 0)
:param \\*\\* kwargs:
Arguments to pass to the load_samplers method of the resampler object
:returns:
The loaded ImageResampler object
"""
if datadir is None:
datadir = self.tempdir
if data_finder is None:
data_finder = preproc.find_raw_data
proc = preproc.ImageResampler()
proc.set_data_loader(datadir, data_finder=data_finder)
if mask_finder is not None:
proc.set_mask_loader(mask_finder=mask_finder, mask_type=mask_type)
proc.load_files()
proc.calc_train_test_split(test_fraction=test_fraction,
validation_fraction=validation_fraction)
proc.load_samplers(**kwargs)
return proc
def test_is_split_under_datadir(self):
self.make_image('foo', '001.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=None,
batch_size=1)
self.assertTrue(proc.is_split_under_datadir(self.tempdir / 'foo'))
self.assertFalse(proc.is_split_under_datadir(self.tempdir / 'bees'))
self.assertFalse(proc.is_split_under_datadir(self.tempdir)) # FIXME: This should work
def test_resample_one_image(self):
self.make_image('foo', '001.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=None,
batch_size=1)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (1, 1, 256, 256))
def test_resample_several_images(self):
self.make_image('foo', '001.jpg')
self.make_image('foo', '002.jpg')
self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=0.333,
batch_size=2)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (2, 1, 256, 256))
with self.assertRaises(ValueError):
imgs = next(proc.validation_data)
proc.validation_data.batch_size = 1
imgs = next(proc.validation_data)
self.assertEqual(imgs.shape, (1, 1, 256, 256))
self.assertEqual(len(proc.train_data), 2)
self.assertEqual(len(proc.validation_data), 1)
def test_resample_several_images_colored(self):
self.make_image('foo', '001.jpg')
self.make_image('foo', '002.jpg')
self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=0.333,
batch_size=2,
input_shape=(256, 256, 3))
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (2, 3, 256, 256))
with self.assertRaises(ValueError):
imgs = next(proc.validation_data)
proc.validation_data.batch_size = 1
imgs = next(proc.validation_data)
self.assertEqual(imgs.shape, (1, 3, 256, 256))
self.assertEqual(len(proc.train_data), 2)
self.assertEqual(len(proc.validation_data), 1)
def test_resample_several_images_one_deleted(self):
i1 = self.make_image('foo', '001.jpg')
i2 = self.make_image('foo', '002.jpg')
i3 = self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=0.0,
batch_size=3,
cache_size=0)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
i1.unlink()
with self.assertRaises(ValueError):
next(proc.train_data)
proc.train_data.batch_size = 2
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (2, 1, 256, 256))
self.assertEqual(set(proc.train_data.files), {i2, i3})
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (2, 1, 256, 256))
self.assertEqual(set(proc.train_data.files), {i2, i3})
def test_resample_several_images_several_deleted(self):
i1 = self.make_image('foo', '001.jpg')
i2 = self.make_image('foo', '002.jpg')
i3 = self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=0.0,
batch_size=3,
cache_size=0)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
i1.unlink()
i3.unlink()
with self.assertRaises(ValueError):
next(proc.train_data)
proc.train_data.batch_size = 1
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (1, 1, 256, 256))
self.assertEqual(proc.train_data.files, [i2])
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (1, 1, 256, 256))
self.assertEqual(proc.train_data.files, [i2])
def test_resample_several_images_large_cache(self):
self.make_image('foo', '001.jpg')
self.make_image('foo', '002.jpg')
self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=None,
batch_size=3,
cache_size=5)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
self.assertEqual(len(proc.train_data), 3)
self.assertEqual(len(proc.train_data.image_cache), 3)
def test_resample_several_images_no_cache(self):
self.make_image('foo', '001.jpg')
self.make_image('foo', '002.jpg')
self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=None,
batch_size=3,
cache_size=None)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
self.assertEqual(len(proc.train_data), 3)
self.assertEqual(len(proc.train_data.image_cache), 0)
def test_resample_several_images_small_cache(self):
self.make_image('foo', '001.jpg')
self.make_image('foo', '002.jpg')
self.make_image('foo', '003.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=None,
batch_size=3,
cache_size=2)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
self.assertEqual(len(proc.train_data), 3)
self.assertEqual(len(proc.train_data.image_cache), 2)
def test_resample_several_images_deduplicated_cache(self):
self.make_image('foo', '001.jpg')
self.make_image('bar', '001.jpg')
self.make_image('baz', '001.jpg')
proc = self.make_resampler(test_fraction=None,
validation_fraction=None,
batch_size=3,
cache_size=5)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
self.assertEqual(len(proc.train_data), 3)
self.assertEqual(len(proc.train_data.image_cache), 1)
def test_resample_several_images_alternate_finder(self):
def find_data(datadir, blacklist=None):
channeldir = datadir / 'TL Brightfield'
for tiledir in channeldir.iterdir():
for image_file in tiledir.iterdir():
yield image_file
self.make_image('TL Brightfield', 's01', 's01-001.jpg')
self.make_image('TL Brightfield', 's02', 's02-001.jpg')
self.make_image('TL Brightfield', 's02', 's02-002.jpg')
proc = self.make_resampler(data_finder=find_data,
test_fraction=None,
validation_fraction=None,
batch_size=3,
cache_size=0)
imgs = next(proc.train_data)
self.assertEqual(imgs.shape, (3, 1, 256, 256))
self.assertEqual(len(proc.train_data), 3)
def test_resample_several_images_and_masks(self):
def find_data(datadir, blacklist=None):
channeldir = datadir / 'Corrected' / 'TL Brightfield'
for tiledir in channeldir.iterdir():
for image_file in tiledir.iterdir():
yield image_file
def find_masks(datadir, blacklist=None):
channeldir = datadir / 'colony_mask' / 'TL Brightfield'
for tiledir in channeldir.iterdir():
for image_file in tiledir.iterdir():
yield image_file.stem, image_file
self.make_image('Corrected', 'TL Brightfield', 's01', 's01-001.jpg')
self.make_image('Corrected', 'TL Brightfield', 's02', 's02-001.jpg')
self.make_image('Corrected', 'TL Brightfield', 's02', 's02-002.jpg')
self.make_mask('colony_mask', 'TL Brightfield', 's01', 's01-001.npz')
self.make_mask('colony_mask', 'TL Brightfield', 's02', 's02-001.npz')
proc = self.make_resampler(data_finder=find_data,
mask_finder=find_masks,
mask_type='file',
test_fraction=None,
validation_fraction=None,
batch_size=2,
cache_size=0)
imgs, masks = next(proc.train_data)
self.assertEqual(imgs.shape, (2, 1, 256, 256))
self.assertEqual(masks.shape, (2, 1, 256, 256))
self.assertEqual(len(proc.train_data), 2)
| [
"numpy.ones",
"deep_hipsc_tracking.model._preproc.composite_mask",
"pathlib.Path",
"numpy.mean",
"numpy.arange",
"deep_hipsc_tracking.model.preproc.resample_in_box",
"deep_hipsc_tracking.model.preproc.CompleteSampler",
"numpy.round",
"numpy.full",
"numpy.meshgrid",
"numpy.testing.assert_almost_e... | [((611, 653), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {'dtype': 'np.float32'}), '((batch_size, 1), dtype=np.float32)\n', (618, 653), True, 'import numpy as np\n'), ((1391, 1417), 'numpy.random.ranf', 'np.random.ranf', (['(256, 256)'], {}), '((256, 256))\n', (1405, 1417), True, 'import numpy as np\n'), ((1492, 1557), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(256, 256)'], {}), '(img, detector, (256, 256), (256, 256))\n', (1518, 1557), False, 'from deep_hipsc_tracking.model import preproc\n'), ((1615, 1655), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (1645, 1655), True, 'import numpy as np\n'), ((1722, 1748), 'numpy.random.ranf', 'np.random.ranf', (['(257, 257)'], {}), '((257, 257))\n', (1736, 1748), True, 'import numpy as np\n'), ((1823, 1888), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(256, 256)'], {}), '(img, detector, (256, 256), (256, 256))\n', (1849, 1888), False, 'from deep_hipsc_tracking.model import preproc\n'), ((1946, 1986), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (1976, 1986), True, 'import numpy as np\n'), ((2059, 2085), 'numpy.random.ranf', 'np.random.ranf', (['(257, 257)'], {}), '((257, 257))\n', (2073, 2085), True, 'import numpy as np\n'), ((2160, 2225), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(225, 225)'], {}), '(img, detector, (256, 256), (225, 225))\n', (2186, 2225), False, 'from deep_hipsc_tracking.model import preproc\n'), ((2283, 2323), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (2313, 2323), True, 'import numpy as np\n'), ((2401, 2427), 'numpy.random.ranf', 'np.random.ranf', (['(260, 347)'], {}), '((260, 347))\n', (2415, 2427), True, 'import numpy as np\n'), ((2502, 2567), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(225, 225)'], {}), '(img, detector, (256, 256), (225, 225))\n', (2528, 2567), False, 'from deep_hipsc_tracking.model import preproc\n'), ((2625, 2665), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (2655, 2665), True, 'import numpy as np\n'), ((2735, 2761), 'numpy.random.ranf', 'np.random.ranf', (['(260, 347)'], {}), '((260, 347))\n', (2749, 2761), True, 'import numpy as np\n'), ((2834, 2897), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(68, 68)'], {}), '(img, detector, (256, 256), (68, 68))\n', (2860, 2897), False, 'from deep_hipsc_tracking.model import preproc\n'), ((2955, 2995), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (2985, 2995), True, 'import numpy as np\n'), ((3073, 3099), 'numpy.random.ranf', 'np.random.ranf', (['(260, 347)'], {}), '((260, 347))\n', (3087, 3099), True, 'import numpy as np\n'), ((3172, 3246), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(68, 68)'], {'overlap': '(1)'}), '(img, detector, (256, 256), (68, 68), overlap=1)\n', (3198, 3246), False, 'from deep_hipsc_tracking.model import preproc\n'), ((3304, 3344), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (3334, 3344), True, 'import numpy as np\n'), ((3422, 3448), 'numpy.random.ranf', 'np.random.ranf', (['(260, 347)'], {}), '((260, 347))\n', (3436, 3448), True, 'import numpy as np\n'), ((3521, 3606), 'deep_hipsc_tracking.model.preproc.predict_with_steps', 'preproc.predict_with_steps', (['img', 'detector', '(256, 256)', '(68, 68)'], {'overlap': '(10, 8)'}), '(img, detector, (256, 256), (68, 68), overlap=(10, 8)\n )\n', (3547, 3606), False, 'from deep_hipsc_tracking.model import preproc\n'), ((3659, 3699), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'img'], {}), '(res, img)\n', (3689, 3699), True, 'import numpy as np\n'), ((3830, 3848), 'numpy.zeros', 'np.zeros', (['(64, 64)'], {}), '((64, 64))\n', (3838, 3848), True, 'import numpy as np\n'), ((3961, 3978), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3972, 3978), True, 'import numpy as np\n'), ((3992, 4018), 'numpy.sqrt', 'np.sqrt', (['(xx ** 2 + yy ** 2)'], {}), '(xx ** 2 + yy ** 2)\n', (3999, 4018), True, 'import numpy as np\n'), ((4098, 4203), 'deep_hipsc_tracking.model.preproc.calculate_peak_image', 'preproc.calculate_peak_image', (['target_img'], {'img_rows': '(32)', 'img_cols': '(32)', 'zero_padding': '(32)', 'peak_sharpness': '(8)'}), '(target_img, img_rows=32, img_cols=32,\n zero_padding=32, peak_sharpness=8)\n', (4126, 4203), False, 'from deep_hipsc_tracking.model import preproc\n'), ((4411, 4460), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img', 'peak_img'], {}), '(exp_img, peak_img)\n', (4441, 4460), True, 'import numpy as np\n'), ((4561, 4579), 'numpy.random.rand', 'np.random.rand', (['(16)'], {}), '(16)\n', (4575, 4579), True, 'import numpy as np\n'), ((4620, 4648), 'deep_hipsc_tracking.model.preproc.random_split', 'preproc.random_split', (['ind', '(8)'], {}), '(ind, 8)\n', (4640, 4648), False, 'from deep_hipsc_tracking.model import preproc\n'), ((4752, 4779), 'numpy.concatenate', 'np.concatenate', (['(samp, rem)'], {}), '((samp, rem))\n', (4766, 4779), True, 'import numpy as np\n'), ((4808, 4848), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'ind'], {}), '(res, ind)\n', (4838, 4848), True, 'import numpy as np\n'), ((4922, 4940), 'numpy.random.rand', 'np.random.rand', (['(16)'], {}), '(16)\n', (4936, 4940), True, 'import numpy as np\n'), ((4981, 5010), 'deep_hipsc_tracking.model.preproc.random_split', 'preproc.random_split', (['ind', '(20)'], {}), '(ind, 20)\n', (5001, 5010), False, 'from deep_hipsc_tracking.model import preproc\n'), ((5115, 5142), 'numpy.concatenate', 'np.concatenate', (['(samp, rem)'], {}), '((samp, rem))\n', (5129, 5142), True, 'import numpy as np\n'), ((5171, 5211), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'ind'], {}), '(res, ind)\n', (5201, 5211), True, 'import numpy as np\n'), ((5265, 5283), 'numpy.random.rand', 'np.random.rand', (['(16)'], {}), '(16)\n', (5279, 5283), True, 'import numpy as np\n'), ((5324, 5375), 'deep_hipsc_tracking.model.preproc.random_split', 'preproc.random_split', (['ind', '(8)'], {'with_replacement': '(True)'}), '(ind, 8, with_replacement=True)\n', (5344, 5375), False, 'from deep_hipsc_tracking.model import preproc\n'), ((5474, 5514), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ind', 'rem'], {}), '(ind, rem)\n', (5504, 5514), True, 'import numpy as np\n'), ((5640, 5658), 'numpy.random.rand', 'np.random.rand', (['(16)'], {}), '(16)\n', (5654, 5658), True, 'import numpy as np\n'), ((5699, 5751), 'deep_hipsc_tracking.model.preproc.random_split', 'preproc.random_split', (['ind', '(20)'], {'with_replacement': '(True)'}), '(ind, 20, with_replacement=True)\n', (5719, 5751), False, 'from deep_hipsc_tracking.model import preproc\n'), ((5850, 5890), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ind', 'rem'], {}), '(ind, rem)\n', (5880, 5890), True, 'import numpy as np\n'), ((6085, 6110), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)', '(3)'], {}), '(16, 16, 3)\n', (6099, 6110), True, 'import numpy as np\n'), ((6161, 6249), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'mode': '"""mean"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n mode='mean')\n", (6175, 6249), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((6347, 6364), 'numpy.ones', 'np.ones', (['(16, 16)'], {}), '((16, 16))\n', (6354, 6364), True, 'import numpy as np\n'), ((6412, 6455), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (6442, 6455), True, 'import numpy as np\n'), ((6549, 6574), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (6563, 6574), True, 'import numpy as np\n'), ((6625, 6713), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'mode': '"""mean"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n mode='mean')\n", (6639, 6713), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((6811, 6828), 'numpy.ones', 'np.ones', (['(32, 32)'], {}), '((32, 32))\n', (6818, 6828), True, 'import numpy as np\n'), ((6876, 6919), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (6906, 6919), True, 'import numpy as np\n'), ((7027, 7052), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (7041, 7052), True, 'import numpy as np\n'), ((7103, 7205), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'batch_size': '(2)', 'mode': '"""mean"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n batch_size=2, mode='mean')\n", (7117, 7205), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((7332, 7349), 'numpy.ones', 'np.ones', (['(32, 32)'], {}), '((32, 32))\n', (7339, 7349), True, 'import numpy as np\n'), ((7397, 7440), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (7427, 7440), True, 'import numpy as np\n'), ((7456, 7558), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'batch_size': '(3)', 'mode': '"""mean"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n batch_size=3, mode='mean')\n", (7470, 7558), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((7685, 7702), 'numpy.ones', 'np.ones', (['(32, 32)'], {}), '((32, 32))\n', (7692, 7702), True, 'import numpy as np\n'), ((7750, 7793), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (7780, 7793), True, 'import numpy as np\n'), ((7895, 7920), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (7909, 7920), True, 'import numpy as np\n'), ((7971, 8073), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(5)', 'batch_size': '(2)', 'mode': '"""mean"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=5,\n batch_size=2, mode='mean')\n", (7985, 8073), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((8200, 8217), 'numpy.ones', 'np.ones', (['(32, 32)'], {}), '((32, 32))\n', (8207, 8217), True, 'import numpy as np\n'), ((8321, 8364), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (8351, 8364), True, 'import numpy as np\n'), ((8465, 8490), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (8479, 8490), True, 'import numpy as np\n'), ((8506, 8539), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {'dtype': 'np.bool'}), '((32, 32), dtype=np.bool)\n', (8514, 8539), True, 'import numpy as np\n'), ((8642, 8755), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'mask': 'mask', 'srows': 'srows', 'scols': 'scols', 'batch_stride': '(5)', 'batch_size': '(2)', 'mode': '"""mean"""'}), "(img, detector, mask=mask, srows=srows, scols=scols,\n batch_stride=5, batch_size=2, mode='mean')\n", (8656, 8755), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((8911, 8928), 'numpy.ones', 'np.ones', (['(32, 32)'], {}), '((32, 32))\n', (8918, 8928), True, 'import numpy as np\n'), ((9060, 9103), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (9090, 9103), True, 'import numpy as np\n'), ((9197, 9222), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)', '(3)'], {}), '(16, 16, 3)\n', (9211, 9222), True, 'import numpy as np\n'), ((9273, 9361), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'mode': '"""peak"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n mode='peak')\n", (9287, 9361), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((9459, 9484), 'numpy.full', 'np.full', (['(16, 16)', 'np.nan'], {}), '((16, 16), np.nan)\n', (9466, 9484), True, 'import numpy as np\n'), ((9554, 9597), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (9584, 9597), True, 'import numpy as np\n'), ((9692, 9717), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (9706, 9717), True, 'import numpy as np\n'), ((9768, 9857), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'mode': '"""peaks"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n mode='peaks')\n", (9782, 9857), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((9955, 9980), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (9962, 9980), True, 'import numpy as np\n'), ((10056, 10099), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (10086, 10099), True, 'import numpy as np\n'), ((10204, 10229), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (10218, 10229), True, 'import numpy as np\n'), ((10280, 10393), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'mode': '"""peaks"""', 'transforms': '"""rotations"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n mode='peaks', transforms='rotations')\n", (10294, 10393), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((10522, 10547), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (10529, 10547), True, 'import numpy as np\n'), ((10593, 10618), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (10600, 10618), True, 'import numpy as np\n'), ((10664, 10689), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (10671, 10689), True, 'import numpy as np\n'), ((10735, 10760), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (10742, 10760), True, 'import numpy as np\n'), ((11068, 11093), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (11082, 11093), True, 'import numpy as np\n'), ((11144, 11247), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'batch_size': '(2)', 'mode': '"""peaks"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n batch_size=2, mode='peaks')\n", (11158, 11247), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((11374, 11399), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (11381, 11399), True, 'import numpy as np\n'), ((11475, 11518), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (11505, 11518), True, 'import numpy as np\n'), ((11534, 11637), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(1)', 'batch_size': '(3)', 'mode': '"""peaks"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=1,\n batch_size=3, mode='peaks')\n", (11548, 11637), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((11764, 11789), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (11771, 11789), True, 'import numpy as np\n'), ((11865, 11908), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (11895, 11908), True, 'import numpy as np\n'), ((12011, 12036), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (12025, 12036), True, 'import numpy as np\n'), ((12087, 12190), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(5)', 'batch_size': '(2)', 'mode': '"""peaks"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=5,\n batch_size=2, mode='peaks')\n", (12101, 12190), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((12317, 12342), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (12324, 12342), True, 'import numpy as np\n'), ((12418, 12461), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (12448, 12461), True, 'import numpy as np\n'), ((12477, 12580), 'deep_hipsc_tracking.model._preproc.composite_mask', 'composite_mask', (['img', 'detector'], {'srows': 'srows', 'scols': 'scols', 'batch_stride': '(5)', 'batch_size': '(3)', 'mode': '"""peaks"""'}), "(img, detector, srows=srows, scols=scols, batch_stride=5,\n batch_size=3, mode='peaks')\n", (12491, 12580), False, 'from deep_hipsc_tracking.model._preproc import composite_mask\n'), ((12707, 12732), 'numpy.full', 'np.full', (['(32, 32)', 'np.nan'], {}), '((32, 32), np.nan)\n', (12714, 12732), True, 'import numpy as np\n'), ((12808, 12851), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res[0]', 'exp'], {}), '(res[0], exp)\n', (12838, 12851), True, 'import numpy as np\n'), ((12957, 12984), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (12971, 12984), True, 'import numpy as np\n'), ((13004, 13243), 'deep_hipsc_tracking.model.preproc.CompleteSampler', 'preproc.CompleteSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (13027, 13243), False, 'from deep_hipsc_tracking.model import preproc\n'), ((13889, 13954), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[0, ...]', 'img[:64, :96, :]'], {}), '(out_img[0, ...], img[:64, :96, :])\n', (13919, 13954), True, 'import numpy as np\n'), ((14168, 14234), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[0, ...]', 'img[:64, 1:97, :]'], {}), '(out_img[0, ...], img[:64, 1:97, :])\n', (14198, 14234), True, 'import numpy as np\n'), ((14296, 14323), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (14310, 14323), True, 'import numpy as np\n'), ((14343, 14582), 'deep_hipsc_tracking.model.preproc.CompleteSampler', 'preproc.CompleteSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (14366, 14582), False, 'from deep_hipsc_tracking.model import preproc\n'), ((15529, 15556), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (15543, 15556), True, 'import numpy as np\n'), ((15633, 15872), 'deep_hipsc_tracking.model.preproc.CompleteSampler', 'preproc.CompleteSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 1)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 1), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (15656, 15872), False, 'from deep_hipsc_tracking.model import preproc\n'), ((16811, 16838), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (16825, 16838), True, 'import numpy as np\n'), ((16858, 17097), 'deep_hipsc_tracking.model.preproc.CompleteSampler', 'preproc.CompleteSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (16881, 17097), False, 'from deep_hipsc_tracking.model import preproc\n'), ((18046, 18073), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (18060, 18073), True, 'import numpy as np\n'), ((18093, 18332), 'deep_hipsc_tracking.model.preproc.CompleteSampler', 'preproc.CompleteSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (18116, 18332), False, 'from deep_hipsc_tracking.model import preproc\n'), ((19332, 19359), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (19346, 19359), True, 'import numpy as np\n'), ((19375, 19402), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (19389, 19402), True, 'import numpy as np\n'), ((19422, 19661), 'deep_hipsc_tracking.model.preproc.CompleteSampler', 'preproc.CompleteSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (19445, 19661), False, 'from deep_hipsc_tracking.model import preproc\n'), ((20822, 20849), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (20836, 20849), True, 'import numpy as np\n'), ((20865, 20892), 'numpy.random.rand', 'np.random.rand', (['(110)', '(110)', '(3)'], {}), '(110, 110, 3)\n', (20879, 20892), True, 'import numpy as np\n'), ((22882, 22909), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(3)'], {}), '(100, 100, 3)\n', (22896, 22909), True, 'import numpy as np\n'), ((22925, 22952), 'numpy.random.rand', 'np.random.rand', (['(110)', '(110)', '(3)'], {}), '(110, 110, 3)\n', (22939, 22952), True, 'import numpy as np\n'), ((22970, 22997), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(1)'], {}), '(100, 100, 1)\n', (22984, 22997), True, 'import numpy as np\n'), ((23014, 23041), 'numpy.random.rand', 'np.random.rand', (['(110)', '(110)', '(1)'], {}), '(110, 110, 1)\n', (23028, 23041), True, 'import numpy as np\n'), ((25876, 25903), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (25890, 25903), True, 'import numpy as np\n'), ((26063, 26308), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'masks': 'masks', 'image_layout': '"""theano"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], masks=masks, image_layout='theano',\n batch_size=1, input_shape=(64, 96, 3), size_range=(128, 256),\n rotation_range=(-10, 10), flip_horizontal=True, noise_type='none',\n noise_fraction=0.1, cache_size=None)\n", (26084, 26308), False, 'from deep_hipsc_tracking.model import preproc\n'), ((26845, 26883), 'numpy.zeros', 'np.zeros', (['(300, 300, 1)'], {'dtype': 'np.bool'}), '((300, 300, 1), dtype=np.bool)\n', (26853, 26883), True, 'import numpy as np\n'), ((26970, 27020), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_mask', 'out_mask'], {}), '(exp_mask, out_mask)\n', (27000, 27020), True, 'import numpy as np\n'), ((27079, 27106), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (27093, 27106), True, 'import numpy as np\n'), ((27126, 27359), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""theano"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='theano', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (27147, 27359), False, 'from deep_hipsc_tracking.model import preproc\n'), ((27869, 27896), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (27883, 27896), True, 'import numpy as np\n'), ((27916, 28153), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (27937, 28153), False, 'from deep_hipsc_tracking.model import preproc\n'), ((28677, 28704), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (28691, 28704), True, 'import numpy as np\n'), ((28724, 28961), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 1)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 1), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (28745, 28961), False, 'from deep_hipsc_tracking.model import preproc\n'), ((29486, 29513), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (29500, 29513), True, 'import numpy as np\n'), ((29533, 29772), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(300, 300, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(300, 300, 3), size_range=(128, 256), rotation_range=(-10, \n 10), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (29554, 29772), False, 'from deep_hipsc_tracking.model import preproc\n'), ((30278, 30337), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img', 'out_img'], {'decimal': '(4)'}), '(exp_img, out_img, decimal=4)\n', (30308, 30337), True, 'import numpy as np\n'), ((30424, 30451), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (30438, 30451), True, 'import numpy as np\n'), ((30471, 30727), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(300, 300, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'zero_padding': '(10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(300, 300, 3), size_range=(128, 256), rotation_range=(-10, \n 10), zero_padding=10, flip_horizontal=True, noise_type='none',\n noise_fraction=0.1, cache_size=None)\n", (30492, 30727), False, 'from deep_hipsc_tracking.model import preproc\n'), ((31275, 31334), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img', 'out_img'], {'decimal': '(4)'}), '(exp_img, out_img, decimal=4)\n', (31305, 31334), True, 'import numpy as np\n'), ((31415, 31442), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (31429, 31442), True, 'import numpy as np\n'), ((31462, 31721), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(200, 200, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'flip_vertical': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(200, 200, 3), size_range=(128, 256), rotation_range=(-10, \n 10), flip_horizontal=True, flip_vertical=True, noise_type='none',\n noise_fraction=0.1, cache_size=None)\n", (31483, 31721), False, 'from deep_hipsc_tracking.model import preproc\n'), ((32348, 32407), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img', 'out_img'], {'decimal': '(4)'}), '(exp_img, out_img, decimal=4)\n', (32378, 32407), True, 'import numpy as np\n'), ((32487, 32514), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (32501, 32514), True, 'import numpy as np\n'), ((32534, 32771), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(64, 96, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(64, 96, 3), size_range=(128, 256), rotation_range=(-10, 10\n ), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (32555, 32771), False, 'from deep_hipsc_tracking.model import preproc\n'), ((33399, 33447), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img', 'out_img'], {}), '(exp_img, out_img)\n', (33429, 33447), True, 'import numpy as np\n'), ((33534, 33561), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (33548, 33561), True, 'import numpy as np\n'), ((33577, 33604), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (33591, 33604), True, 'import numpy as np\n'), ((33624, 33863), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(200, 200, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(200, 200, 3), size_range=(128, 256), rotation_range=(-10, \n 10), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (33645, 33863), False, 'from deep_hipsc_tracking.model import preproc\n'), ((34449, 34510), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img1', 'out_img1'], {'decimal': '(4)'}), '(exp_img1, out_img1, decimal=4)\n', (34479, 34510), True, 'import numpy as np\n'), ((34604, 34665), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img2', 'out_img2'], {'decimal': '(4)'}), '(exp_img2, out_img2, decimal=4)\n', (34634, 34665), True, 'import numpy as np\n'), ((34760, 34787), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (34774, 34787), True, 'import numpy as np\n'), ((34803, 34830), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (34817, 34830), True, 'import numpy as np\n'), ((34850, 35105), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(200, 200, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None', 'zero_padding': '(5)'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(200, 200, 3), size_range=(128, 256), rotation_range=(-10, \n 10), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None, zero_padding=5)\n", (34871, 35105), False, 'from deep_hipsc_tracking.model import preproc\n'), ((35729, 35790), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img1', 'out_img1'], {'decimal': '(4)'}), '(exp_img1, out_img1, decimal=4)\n', (35759, 35790), True, 'import numpy as np\n'), ((35882, 35943), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img2', 'out_img2'], {'decimal': '(4)'}), '(exp_img2, out_img2, decimal=4)\n', (35912, 35943), True, 'import numpy as np\n'), ((36027, 36054), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (36041, 36054), True, 'import numpy as np\n'), ((36101, 36360), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(200, 200, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'flip_vertical': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(200, 200, 3), size_range=(128, 256), rotation_range=(-10, \n 10), flip_horizontal=True, flip_vertical=True, noise_type='none',\n noise_fraction=0.1, cache_size=None)\n", (36122, 36360), False, 'from deep_hipsc_tracking.model import preproc\n'), ((36834, 36895), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img1', 'out_img2'], {'decimal': '(4)'}), '(out_img1, out_img2, decimal=4)\n', (36864, 36895), True, 'import numpy as np\n'), ((36977, 37004), 'numpy.random.rand', 'np.random.rand', (['(300)', '(300)', '(3)'], {}), '(300, 300, 3)\n', (36991, 37004), True, 'import numpy as np\n'), ((37020, 37055), 'numpy.zeros', 'np.zeros', (['(300, 300)'], {'dtype': 'np.bool'}), '((300, 300), dtype=np.bool)\n', (37028, 37055), True, 'import numpy as np\n'), ((37112, 37351), 'deep_hipsc_tracking.model.preproc.RandomSampler', 'preproc.RandomSampler', ([], {'files': '[]', 'image_layout': '"""tensorflow"""', 'batch_size': '(1)', 'input_shape': '(200, 200, 3)', 'size_range': '(128, 256)', 'rotation_range': '(-10, 10)', 'flip_horizontal': '(True)', 'noise_type': '"""none"""', 'noise_fraction': '(0.1)', 'cache_size': 'None'}), "(files=[], image_layout='tensorflow', batch_size=1,\n input_shape=(200, 200, 3), size_range=(128, 256), rotation_range=(-10, \n 10), flip_horizontal=True, noise_type='none', noise_fraction=0.1,\n cache_size=None)\n", (37133, 37351), False, 'from deep_hipsc_tracking.model import preproc\n'), ((37937, 37998), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img1', 'out_img1'], {'decimal': '(4)'}), '(exp_img1, out_img1, decimal=4)\n', (37967, 37998), True, 'import numpy as np\n'), ((38086, 38147), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['exp_img2', 'out_img2'], {'decimal': '(4)'}), '(exp_img2, out_img2, decimal=4)\n', (38116, 38147), True, 'import numpy as np\n'), ((38299, 38330), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (38315, 38330), True, 'import numpy as np\n'), ((38345, 38365), 'numpy.mean', 'np.mean', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (38352, 38365), True, 'import numpy as np\n'), ((38399, 38425), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (38407, 38425), True, 'import numpy as np\n'), ((38478, 38498), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (38486, 38498), True, 'import numpy as np\n'), ((38539, 38603), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256)'}), '(img, scale, rot, shift, input_shape=256)\n', (38562, 38603), False, 'from deep_hipsc_tracking.model import preproc\n'), ((38728, 38759), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (38744, 38759), True, 'import numpy as np\n'), ((38774, 38794), 'numpy.mean', 'np.mean', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (38781, 38794), True, 'import numpy as np\n'), ((38864, 38890), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (38872, 38890), True, 'import numpy as np\n'), ((38943, 38963), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (38951, 38963), True, 'import numpy as np\n'), ((39004, 39068), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256)'}), '(img, scale, rot, shift, input_shape=256)\n', (39027, 39068), False, 'from deep_hipsc_tracking.model import preproc\n'), ((39203, 39234), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (39219, 39234), True, 'import numpy as np\n'), ((39268, 39294), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (39276, 39294), True, 'import numpy as np\n'), ((39347, 39367), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (39355, 39367), True, 'import numpy as np\n'), ((39408, 39472), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256)'}), '(img, scale, rot, shift, input_shape=256)\n', (39431, 39472), False, 'from deep_hipsc_tracking.model import preproc\n'), ((39616, 39647), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (39632, 39647), True, 'import numpy as np\n'), ((39681, 39707), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (39689, 39707), True, 'import numpy as np\n'), ((39760, 39780), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (39768, 39780), True, 'import numpy as np\n'), ((39821, 39892), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256, 128)'}), '(img, scale, rot, shift, input_shape=(256, 128))\n', (39844, 39892), False, 'from deep_hipsc_tracking.model import preproc\n'), ((40030, 40058), 'numpy.random.random', 'np.random.random', (['(512, 512)'], {}), '((512, 512))\n', (40046, 40058), True, 'import numpy as np\n'), ((40094, 40120), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (40102, 40120), True, 'import numpy as np\n'), ((40173, 40193), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (40181, 40193), True, 'import numpy as np\n'), ((40234, 40308), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256, 128, 3)'}), '(img, scale, rot, shift, input_shape=(256, 128, 3))\n', (40257, 40308), False, 'from deep_hipsc_tracking.model import preproc\n'), ((40446, 40477), 'numpy.random.random', 'np.random.random', (['(512, 512, 1)'], {}), '((512, 512, 1))\n', (40462, 40477), True, 'import numpy as np\n'), ((40511, 40537), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (40519, 40537), True, 'import numpy as np\n'), ((40590, 40610), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (40598, 40610), True, 'import numpy as np\n'), ((40651, 40725), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256, 128, 3)'}), '(img, scale, rot, shift, input_shape=(256, 128, 3))\n', (40674, 40725), False, 'from deep_hipsc_tracking.model import preproc\n'), ((40860, 40891), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (40876, 40891), True, 'import numpy as np\n'), ((40925, 40951), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (40933, 40951), True, 'import numpy as np\n'), ((41004, 41024), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (41012, 41024), True, 'import numpy as np\n'), ((41065, 41139), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256, 128, 1)'}), '(img, scale, rot, shift, input_shape=(256, 128, 1))\n', (41088, 41139), False, 'from deep_hipsc_tracking.model import preproc\n'), ((41274, 41305), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (41290, 41305), True, 'import numpy as np\n'), ((41339, 41365), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (41347, 41365), True, 'import numpy as np\n'), ((41418, 41438), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (41426, 41438), True, 'import numpy as np\n'), ((41690, 41721), 'numpy.random.random', 'np.random.random', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (41706, 41721), True, 'import numpy as np\n'), ((41755, 41781), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (41763, 41781), True, 'import numpy as np\n'), ((41834, 41854), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (41842, 41854), True, 'import numpy as np\n'), ((42381, 42403), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (42397, 42403), True, 'import numpy as np\n'), ((42418, 42437), 'numpy.round', 'np.round', (['(img * 255)'], {}), '(img * 255)\n', (42426, 42437), True, 'import numpy as np\n'), ((43905, 43929), 'deep_hipsc_tracking.model.preproc.ImageResampler', 'preproc.ImageResampler', ([], {}), '()\n', (43927, 43929), False, 'from deep_hipsc_tracking.model import preproc\n'), ((3893, 3906), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (3902, 3906), True, 'import numpy as np\n'), ((3924, 3937), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (3933, 3937), True, 'import numpy as np\n'), ((10923, 10959), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['r', 'e'], {}), '(r, e)\n', (10953, 10959), True, 'import numpy as np\n'), ((15576, 15596), 'numpy.mean', 'np.mean', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (15583, 15596), True, 'import numpy as np\n'), ((26734, 26761), 'pathlib.Path', 'pathlib.Path', (['"""grr/foo.jpg"""'], {}), "('grr/foo.jpg')\n", (26746, 26761), False, 'import pathlib\n'), ((33312, 33321), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (33318, 33321), True, 'import numpy as np\n'), ((33323, 33351), 'numpy.array', 'np.array', (['[[150.0], [150.0]]'], {}), '([[150.0], [150.0]])\n', (33331, 33351), True, 'import numpy as np\n'), ((38172, 38193), 'numpy.all', 'np.all', (['(exp_img2 == 0)'], {}), '(exp_img2 == 0)\n', (38178, 38193), True, 'import numpy as np\n'), ((41517, 41594), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256, 128, 1, 1)'}), '(img, scale, rot, shift, input_shape=(256, 128, 1, 1))\n', (41540, 41594), False, 'from deep_hipsc_tracking.model import preproc\n'), ((41933, 42007), 'deep_hipsc_tracking.model.preproc.resample_in_box', 'preproc.resample_in_box', (['img', 'scale', 'rot', 'shift'], {'input_shape': '(256, 128, 2)'}), '(img, scale, rot, shift, input_shape=(256, 128, 2))\n', (41956, 42007), False, 'from deep_hipsc_tracking.model import preproc\n'), ((42842, 42864), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (42858, 42864), True, 'import numpy as np\n'), ((15333, 15410), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img[i:i + 64, j:j + 96, :])\n', (15363, 15410), True, 'import numpy as np\n'), ((16623, 16710), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img_gray[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img_gray[i:i + 64, j:j + \n 96, :])\n', (16653, 16710), True, 'import numpy as np\n'), ((17848, 17925), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img[i:i + 64, j:j + 96, :])\n', (17878, 17925), True, 'import numpy as np\n'), ((19144, 19221), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img[i:i + 64, j:j + 96, :])\n', (19174, 19221), True, 'import numpy as np\n'), ((20490, 20569), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img1[idx, ...]', 'img1[i:i + 64, j:j + 96, :]'], {}), '(out_img1[idx, ...], img1[i:i + 64, j:j + 96, :])\n', (20520, 20569), True, 'import numpy as np\n'), ((20629, 20708), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img2[idx, ...]', 'img2[i:i + 64, j:j + 96, :]'], {}), '(out_img2[idx, ...], img2[i:i + 64, j:j + 96, :])\n', (20659, 20708), True, 'import numpy as np\n'), ((22147, 22225), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img1[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img1[i:i + 64, j:j + 96, :])\n', (22177, 22225), True, 'import numpy as np\n'), ((22385, 22463), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img2[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img2[i:i + 64, j:j + 96, :])\n', (22415, 22463), True, 'import numpy as np\n'), ((22679, 22757), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img1[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img1[i:i + 64, j:j + 96, :])\n', (22709, 22757), True, 'import numpy as np\n'), ((24710, 24788), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img1[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img1[i:i + 64, j:j + 96, :])\n', (24740, 24788), True, 'import numpy as np\n'), ((24848, 24933), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_mask[idx, ...]', 'mask1[i:i + 64, j:j + 96, :]'], {}), '(out_mask[idx, ...], mask1[i:i + 64, j:j + 96, :]\n )\n', (24878, 24933), True, 'import numpy as np\n'), ((25088, 25166), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img2[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img2[i:i + 64, j:j + 96, :])\n', (25118, 25166), True, 'import numpy as np\n'), ((25226, 25311), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_mask[idx, ...]', 'mask2[i:i + 64, j:j + 96, :]'], {}), '(out_mask[idx, ...], mask2[i:i + 64, j:j + 96, :]\n )\n', (25256, 25311), True, 'import numpy as np\n'), ((25522, 25600), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_img[idx, ...]', 'img1[i:i + 64, j:j + 96, :]'], {}), '(out_img[idx, ...], img1[i:i + 64, j:j + 96, :])\n', (25552, 25600), True, 'import numpy as np\n'), ((25660, 25745), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['out_mask[idx, ...]', 'mask1[i:i + 64, j:j + 96, :]'], {}), '(out_mask[idx, ...], mask1[i:i + 64, j:j + 96, :]\n )\n', (25690, 25745), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
from abcpy.graphtools import GraphTools
from abcpy.acceptedparametersmanager import *
import numpy as np
from sklearn import linear_model
class Summaryselections(metaclass=ABCMeta):
"""This abstract base class defines a way to choose the summary statistics.
"""
@abstractmethod
def __init__(self, model, statistics_calc, backend, n_samples=1000, seed=None):
"""The constructor of a sub-class must accept a non-optional model, statistics calculator and
backend which are stored to self.model, self.statistics_calc and self.backend. Further it
accepts two optional parameters n_samples and seed defining the number of simulated dataset
used for the pilot to decide the summary statistics and the integer to initialize the random
number generator.
Parameters
----------
model: abcpy.models.Model
Model object that conforms to the Model class.
statistics_cal: abcpy.statistics.Statistics
Statistics object that conforms to the Statistics class.
backend: abcpy.backends.Backend
Backend object that conforms to the Backend class.
n_samples: int, optional
The number of (parameter, simulated data) tuple generated to learn the summary statistics in pilot step.
The default value is 1000.
n_samples_per_param: int, optional
Number of data points in each simulated data set.
seed: integer, optional
Optional initial seed for the random number generator. The default value is generated randomly.
"""
raise NotImplementedError
def __getstate__(self):
state = self.__dict__.copy()
del state['backend']
return state
@abstractmethod
def transformation(self, statistics):
raise NotImplementedError
class Semiautomatic(Summaryselections, GraphTools):
"""This class implements the semi automatic summary statistics choice described in Fearnhead and Prangle [1].
[1] <NAME>., <NAME>. 2012. Constructing summary statistics for approximate
Bayesian computation: semi-automatic approximate Bayesian computation. J. Roy. Stat. Soc. B 74:419–474.
"""
def __init__(self, model, statistics_calc, backend, n_samples=1000, n_samples_per_param = 1, seed=None):
self.model = model
self.statistics_calc = statistics_calc
self.backend = backend
self.rng = np.random.RandomState(seed)
self.n_samples_per_param = n_samples_per_param
# An object managing the bds objects
self.accepted_parameters_manager = AcceptedParametersManager(self.model)
self.accepted_parameters_manager.broadcast(self.backend, [])
# main algorithm
seed_arr = self.rng.randint(1, n_samples * n_samples, size=n_samples, dtype=np.int32)
rng_arr = np.array([np.random.RandomState(seed) for seed in seed_arr])
rng_pds = self.backend.parallelize(rng_arr)
sample_parameters_statistics_pds = self.backend.map(self._sample_parameter_statistics, rng_pds)
sample_parameters_and_statistics = self.backend.collect(sample_parameters_statistics_pds)
sample_parameters, sample_statistics = [list(t) for t in zip(*sample_parameters_and_statistics)]
sample_parameters = np.array(sample_parameters)
sample_statistics = np.concatenate(sample_statistics)
self.coefficients_learnt = np.zeros(shape=(sample_parameters.shape[1], sample_statistics.shape[1]))
regr = linear_model.LinearRegression(fit_intercept=True)
for ind in range(sample_parameters.shape[1]):
regr.fit(sample_statistics, sample_parameters[:, ind])
self.coefficients_learnt[ind, :] = regr.coef_
def transformation(self, statistics):
if not statistics.shape[1] == self.coefficients_learnt.shape[1]:
raise ValueError('Mismatch in dimension of summary statistics')
return np.dot(statistics, np.transpose(self.coefficients_learnt))
def _sample_parameter_statistics(self, rng=np.random.RandomState()):
"""
Samples a single model parameter and simulates from it until
distance between simulated outcome and the observation is
smaller than eplison.
Parameters
----------
seed: int
value of a seed to be used for reseeding
Returns
-------
np.array
accepted parameter
"""
self.sample_from_prior(rng=rng)
parameter = self.get_parameters()
y_sim = self.simulate(self.n_samples_per_param, rng=rng)
if y_sim is not None:
statistics = self.statistics_calc.statistics(y_sim)
return (parameter, statistics)
| [
"numpy.zeros",
"numpy.transpose",
"numpy.random.RandomState",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"numpy.concatenate"
] | [((2513, 2540), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2534, 2540), True, 'import numpy as np\n'), ((3380, 3407), 'numpy.array', 'np.array', (['sample_parameters'], {}), '(sample_parameters)\n', (3388, 3407), True, 'import numpy as np\n'), ((3436, 3469), 'numpy.concatenate', 'np.concatenate', (['sample_statistics'], {}), '(sample_statistics)\n', (3450, 3469), True, 'import numpy as np\n'), ((3506, 3578), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sample_parameters.shape[1], sample_statistics.shape[1])'}), '(shape=(sample_parameters.shape[1], sample_statistics.shape[1]))\n', (3514, 3578), True, 'import numpy as np\n'), ((3594, 3643), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (3623, 3643), False, 'from sklearn import linear_model\n'), ((4137, 4160), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (4158, 4160), True, 'import numpy as np\n'), ((4049, 4087), 'numpy.transpose', 'np.transpose', (['self.coefficients_learnt'], {}), '(self.coefficients_learnt)\n', (4061, 4087), True, 'import numpy as np\n'), ((2940, 2967), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2961, 2967), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from scipy.linalg import null_space
import util.geometry_util as geo_util
from solvers.rigidity_solver import algo_core
from solvers.rigidity_solver.constraints_3d import direction_for_relative_disallowed_motions
from solvers.rigidity_solver.models import Model, Joint, Beam
class TestJointsAsStiffness(unittest.TestCase):
def test_3d_sliding_joint_no_special_soft_constraints(self):
'''
We have two tets connected through a sliding joint, all the DoFs apart from the allowed ones are of
uniform stiffness.
'''
source_points = np.eye(3)
target_points = np.eye(3) + np.array([1, 0, 1])
pivot = np.array([0, 1, 0])
rotation_axes = None
translation_vectors = np.array([[1, 1, 1]])
disallowed_motions = direction_for_relative_disallowed_motions(source_points, target_points,
rotation_axes, pivot, translation_vectors)
part_stiffness = algo_core.spring_energy_matrix(
np.vstack((source_points, target_points, np.ones((1, 3)) * 3, np.ones((1, 3)) * 4)),
np.array([(0, 1), (1, 2), (2, 0), (3, 4), (4, 5), (5, 3),
(0, 6), (1, 6), (2, 6),
(3, 7), (4, 7), (5, 7)])
)
joint_stiffness = np.zeros_like(part_stiffness)
joint_stiffness[:18, :18] = disallowed_motions.T @ disallowed_motions
global_stiffness = part_stiffness + joint_stiffness
joint_stiffness_rank = np.linalg.matrix_rank(joint_stiffness)
part_stiffness_rank = np.linalg.matrix_rank(part_stiffness)
global_stiffness_rank = np.linalg.matrix_rank(global_stiffness)
self.assertEqual(joint_stiffness_rank, 5) # get rid of 3 rotation and 2 translation, hence 5
self.assertEqual(part_stiffness_rank, 6 + 6) # each part: 4 * 3 - 6 = 6; two parts: 6 + 6
self.assertEqual(global_stiffness_rank, 17) # 5 + 12
def test_3d_sliding_joint_no_special_soft_constraints(self):
'''
We have two tets connected through a sliding joint, all the DoFs apart from the allowed ones are of
uniform stiffness.
'''
points = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
]) * 10
beams = [
Beam.tetra(points[0], points[1]),
Beam.tetra(points[0], points[2]),
]
pivot = points[0]
rotation_axes = np.array([0, 0, 1])
for index, coeff in enumerate((0, 0.0000002)):
hinge = Joint(beams[0], beams[1], pivot, rotation_axes=rotation_axes,
soft_translation=np.array([0, 0, 1]), soft_translation_coeff=np.array([coeff])
)
model = Model()
model.add_beams(beams)
model.add_joints([hinge])
pairs = model.soft_solve()
if index == 0:
self.assertTrue(np.isclose(pairs[6][0], 0))
self.assertTrue(np.isclose(pairs[7][0], 0))
elif index == 1:
self.assertTrue(np.isclose(pairs[6][0], 0))
self.assertFalse(np.isclose(pairs[7][0], 0))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.zeros_like",
"solvers.rigidity_solver.models.Model",
"solvers.rigidity_solver.models.Beam.tetra",
"solvers.rigidity_solver.constraints_3d.direction_for_relative_disallowed_motions",
"numpy.ones",
"numpy.isclose",
"numpy.linalg.matrix_rank",
"numpy.array",
"numpy.eye"
] | [((3275, 3290), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3288, 3290), False, 'import unittest\n'), ((608, 617), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (614, 617), True, 'import numpy as np\n'), ((690, 709), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (698, 709), True, 'import numpy as np\n'), ((769, 790), 'numpy.array', 'np.array', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (777, 790), True, 'import numpy as np\n'), ((821, 939), 'solvers.rigidity_solver.constraints_3d.direction_for_relative_disallowed_motions', 'direction_for_relative_disallowed_motions', (['source_points', 'target_points', 'rotation_axes', 'pivot', 'translation_vectors'], {}), '(source_points, target_points,\n rotation_axes, pivot, translation_vectors)\n', (862, 939), False, 'from solvers.rigidity_solver.constraints_3d import direction_for_relative_disallowed_motions\n'), ((1362, 1391), 'numpy.zeros_like', 'np.zeros_like', (['part_stiffness'], {}), '(part_stiffness)\n', (1375, 1391), True, 'import numpy as np\n'), ((1563, 1601), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['joint_stiffness'], {}), '(joint_stiffness)\n', (1584, 1601), True, 'import numpy as np\n'), ((1632, 1669), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['part_stiffness'], {}), '(part_stiffness)\n', (1653, 1669), True, 'import numpy as np\n'), ((1702, 1741), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['global_stiffness'], {}), '(global_stiffness)\n', (1723, 1741), True, 'import numpy as np\n'), ((2515, 2534), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2523, 2534), True, 'import numpy as np\n'), ((642, 651), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (648, 651), True, 'import numpy as np\n'), ((654, 673), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (662, 673), True, 'import numpy as np\n'), ((1174, 1285), 'numpy.array', 'np.array', (['[(0, 1), (1, 2), (2, 0), (3, 4), (4, 5), (5, 3), (0, 6), (1, 6), (2, 6), (3,\n 7), (4, 7), (5, 7)]'], {}), '([(0, 1), (1, 2), (2, 0), (3, 4), (4, 5), (5, 3), (0, 6), (1, 6), (\n 2, 6), (3, 7), (4, 7), (5, 7)])\n', (1182, 1285), True, 'import numpy as np\n'), ((2249, 2292), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, 0], [1, 0, 0], [0, 1, 0]])\n', (2257, 2292), True, 'import numpy as np\n'), ((2375, 2407), 'solvers.rigidity_solver.models.Beam.tetra', 'Beam.tetra', (['points[0]', 'points[1]'], {}), '(points[0], points[1])\n', (2385, 2407), False, 'from solvers.rigidity_solver.models import Model, Joint, Beam\n'), ((2421, 2453), 'solvers.rigidity_solver.models.Beam.tetra', 'Beam.tetra', (['points[0]', 'points[2]'], {}), '(points[0], points[2])\n', (2431, 2453), False, 'from solvers.rigidity_solver.models import Model, Joint, Beam\n'), ((2825, 2832), 'solvers.rigidity_solver.models.Model', 'Model', ([], {}), '()\n', (2830, 2832), False, 'from solvers.rigidity_solver.models import Model, Joint, Beam\n'), ((2715, 2734), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2723, 2734), True, 'import numpy as np\n'), ((2759, 2776), 'numpy.array', 'np.array', (['[coeff]'], {}), '([coeff])\n', (2767, 2776), True, 'import numpy as np\n'), ((3004, 3030), 'numpy.isclose', 'np.isclose', (['pairs[6][0]', '(0)'], {}), '(pairs[6][0], 0)\n', (3014, 3030), True, 'import numpy as np\n'), ((3064, 3090), 'numpy.isclose', 'np.isclose', (['pairs[7][0]', '(0)'], {}), '(pairs[7][0], 0)\n', (3074, 3090), True, 'import numpy as np\n'), ((1118, 1133), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (1125, 1133), True, 'import numpy as np\n'), ((1139, 1154), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (1146, 1154), True, 'import numpy as np\n'), ((3153, 3179), 'numpy.isclose', 'np.isclose', (['pairs[6][0]', '(0)'], {}), '(pairs[6][0], 0)\n', (3163, 3179), True, 'import numpy as np\n'), ((3214, 3240), 'numpy.isclose', 'np.isclose', (['pairs[7][0]', '(0)'], {}), '(pairs[7][0], 0)\n', (3224, 3240), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: <NAME>, <EMAIL> at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, <NAME>
# Copyright (c) 2010-2013, <NAME> <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
# file = '/Users/daveism/Downloads/75.png'
# ds = gdal.Open(file, gdal.GA_Update)
# band1 = ds.GetRasterBand(1)
# band2 = ds.GetRasterBand(2)
# band3 = ds.GetRasterBand(3)
#
# band1.SetNoDataValue(0)
# band2.SetNoDataValue(0)
# band3.SetNoDataValue(0)
#
# walk_dir = '/Users/daveism/nfwf_test_tiles/south_florida'
walk_dir = sys.argv[1]
print('walk_dir = ' + walk_dir)
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
# print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
for root, subdirs, files in os.walk(walk_dir):
for filename in files:
file_path = os.path.join(root, filename)
ext = os.path.splitext(file_path)[-1].lower()
if ext == ".png":
ds = gdal.Open(file_path, gdal.GA_ReadOnly)
mem_drv = gdal.GetDriverByName('MEM')
alphaband = ds.GetRasterBand(1).GetMaskBand()
alpha = alphaband.ReadRaster()
data = ds.ReadAsArray()
# print(numpy.sum(data) )
fullcount = numpy.count_nonzero(data)
count255 = numpy.count_nonzero(data==255)
# print(len(alpha))
# print(alpha.count('\x00'.encode('ascii')))
# Detect totally transparent tile and skip its creation
if fullcount == count255:
os.remove(file_path)
print('Deleting all 255 file %s (full path: %s)' % (filename, file_path))
# else:
# print('\t- mixed values file %s (full path: %s)' % (filename, file_path))
| [
"os.remove",
"numpy.count_nonzero",
"os.path.join",
"os.walk",
"os.path.splitext",
"osgeo.gdal.Open",
"osgeo.gdal.GetDriverByName"
] | [((3316, 3333), 'os.walk', 'os.walk', (['walk_dir'], {}), '(walk_dir)\n', (3323, 3333), False, 'import os\n'), ((3382, 3410), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (3394, 3410), False, 'import os\n'), ((3509, 3547), 'osgeo.gdal.Open', 'gdal.Open', (['file_path', 'gdal.GA_ReadOnly'], {}), '(file_path, gdal.GA_ReadOnly)\n', (3518, 3547), False, 'from osgeo import gdal\n'), ((3570, 3597), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""MEM"""'], {}), "('MEM')\n", (3590, 3597), False, 'from osgeo import gdal\n'), ((3798, 3823), 'numpy.count_nonzero', 'numpy.count_nonzero', (['data'], {}), '(data)\n', (3817, 3823), False, 'import numpy\n'), ((3847, 3879), 'numpy.count_nonzero', 'numpy.count_nonzero', (['(data == 255)'], {}), '(data == 255)\n', (3866, 3879), False, 'import numpy\n'), ((4091, 4111), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (4100, 4111), False, 'import os\n'), ((3425, 3452), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (3441, 3452), False, 'import os\n')] |
import numpy as np
from nearpy.distances.distance import Distance
def squared_euclidean_distance(x: np.ndarray, y: np.ndarray) -> np.ndarray:
d = x - y
return np.sum(d ** 2)
class SquaredEuclideanDistance(Distance):
""" Squared Euclidean distance for nearpy data structures """
def distance(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Computes squared euclidean distance between vectors x and y. Returns float.
"""
return squared_euclidean_distance(x, y)
| [
"numpy.sum"
] | [((170, 184), 'numpy.sum', 'np.sum', (['(d ** 2)'], {}), '(d ** 2)\n', (176, 184), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright (c) 2011-2019, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import unittest
import numpy as np
from wradlib import adjust
# Arguments to be used throughout all test classes
raw_x, raw_y = np.meshgrid(np.arange(4).astype("f4"),
np.arange(4).astype("f4"))
raw_coords = np.vstack((raw_x.ravel(), raw_y.ravel())).T
obs_coords = np.array([[1., 1.], [2., 1.], [1., 3.5], [3.5, 3.]])
raw = np.array([[1., 2., 1., 0., 1., 2., 1.,
2., 1., 0., 0., 3., 4., 0., 4., 0.],
[1., 2., 1., 0., 1., 2., 1.,
2., 1., 0., 0., 3., 4., 0., 4., 0.]
]).T
obs = np.array([[2., 3, 0., 4.], [2., 3, 0., 4.]]).T
nnear_raws = 2
mingages = 3
class AdjustBaseTest(unittest.TestCase):
def setUp(self):
self.raw_coords = raw_coords
self.obs_coords = obs_coords
self.raw = raw
self.obs = obs
self.nnear_raws = nnear_raws
self.mingages = mingages
def test___init__(self):
pass
def test__checkip(self):
pass
def test__check_shape(self):
pass
def test___call__(self):
pass
def test__get_valid_pairs(self):
pass
def test_xvalidate(self):
pass
class AdjustAddTest(unittest.TestCase):
def setUp(self):
self.raw_coords = raw_coords
self.obs_coords = obs_coords
self.raw = raw
self.obs = obs
self.nnear_raws = nnear_raws
self.mingages = mingages
def test_AdjustAdd_1(self):
adj = adjust.AdjustAdd(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages)
res = adj(self.obs, self.raw)
shouldbe = np.array([[1.62818784, 1.62818784],
[2.75926679, 2.75926679],
[2.09428144, 2.09428144],
[1.1466651, 1.1466651],
[1.51948941, 1.51948941],
[2.5, 2.5],
[2.5, 2.5],
[3.27498305, 3.27498305],
[1.11382822, 1.11382822],
[0.33900645, 0.33900645],
[0.89999998, 0.89999998],
[4.52409637, 4.52409637],
[3.08139533, 3.08139533],
[0., 0.],
[3.99180328, 3.99180328],
[2.16913891, 2.16913891]])
self.assertTrue(np.allclose(res, shouldbe))
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
self.assertTrue(np.allclose(res, shouldbe[:, 0]))
class AdjustMultiplyTest(unittest.TestCase):
def setUp(self):
self.raw_coords = raw_coords
self.obs_coords = obs_coords
self.raw = raw
self.obs = obs
self.nnear_raws = nnear_raws
self.mingages = mingages
def test_AdjustMultiply_1(self):
adj = adjust.AdjustMultiply(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages)
res = adj(self.obs, self.raw)
shouldbe = np.array([[1.44937706, 1.44937706],
[3.04539442, 3.04539442],
[1.74463618, 1.74463618],
[0., 0.],
[1.37804615, 1.37804615],
[2.66666675, 2.66666675],
[2., 2.],
[3.74106812, 3.74106812],
[1.17057478, 1.17057478],
[0., 0.],
[0., 0.],
[6.14457822, 6.14457822],
[2.43439031, 2.43439031],
[0., 0.],
[4.60765028, 4.60765028],
[0., 0.]])
self.assertTrue(np.allclose(res, shouldbe))
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
self.assertTrue(np.allclose(res, shouldbe[:, 0]))
class AdjustMixedTest(unittest.TestCase):
def setUp(self):
self.raw_coords = raw_coords
self.obs_coords = obs_coords
self.raw = raw
self.obs = obs
self.nnear_raws = nnear_raws
self.mingages = mingages
def test_AdjustMixed_1(self):
adj = adjust.AdjustMixed(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages)
res = adj(self.obs, self.raw)
shouldbe = np.array([[1.51427719, 1.51427719],
[2.95735525, 2.95735525],
[1.85710269, 1.85710269],
[0.36806121, 0.36806121],
[1.43181512, 1.43181512],
[2.61538471, 2.61538471],
[2.15384617, 2.15384617],
[3.59765723, 3.59765723],
[1.18370627, 1.18370627],
[0.15027952, 0.15027952],
[0.30825174, 0.30825174],
[5.63558862, 5.63558862],
[2.49066845, 2.49066845],
[-0.29200733, -0.29200733],
[4.31646909, 4.31646909],
[0.67854041, 0.67854041]])
self.assertTrue(np.allclose(res, shouldbe))
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
self.assertTrue(np.allclose(res, shouldbe[:, 0]))
class AdjustMFBTest(unittest.TestCase):
def setUp(self):
self.raw_coords = np.array([[0., 0.], [1., 1.]])
self.obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
self.raw = np.array([2., 2.])
self.obs = np.array([4., 4.])
self.nnear_raws = nnear_raws
self.mingages = 0
self.mfb_args = dict(method="mean")
def test_AdjustMFB_1(self):
adj = adjust.AdjustMFB(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages, mfb_args=self.mfb_args)
res = adj(self.obs, self.raw)
shouldbe = np.array([4., 4.])
self.assertTrue(np.allclose(res, shouldbe))
adj = adjust.AdjustMFB(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="median"))
adj(self.obs, self.raw)
adj = adjust.AdjustMFB(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="linregr", minslope=1.0,
minr='0.7', maxp=0.5))
adj(self.obs, self.raw)
class AdjustNoneTest(unittest.TestCase):
def setUp(self):
self.raw_coords = np.array([[0., 0.], [1., 1.]])
self.obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
self.raw = np.array([2., 2.])
self.obs = np.array([4., 4.])
self.nnear_raws = nnear_raws
self.mingages = 0
self.mfb_args = dict(method="mean")
def test_AdjustNone_1(self):
adj = adjust.AdjustNone(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages)
res = adj(self.obs, self.raw)
shouldbe = np.array([2., 2.])
self.assertTrue(np.allclose(res, shouldbe))
class GageOnlyTest(unittest.TestCase):
def setUp(self):
self.raw_coords = np.array([[0., 0.], [1., 1.]])
self.obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
self.raw = np.array([2., 2.])
self.obs = np.array([4., 4.])
self.nnear_raws = nnear_raws
self.mingages = 0
self.mfb_args = dict(method="mean")
def test_GageOnly_1(self):
adj = adjust.GageOnly(self.obs_coords, self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages)
res = adj(self.obs, self.raw)
shouldbe = np.array([4., 4.])
self.assertTrue(np.allclose(res, shouldbe))
class AdjustHelperTest(unittest.TestCase):
def test__get_neighbours_ix(self):
pass
def test__get_statfunc(self):
adjust._get_statfunc('median')
adjust._get_statfunc('best')
with self.assertRaises(NameError):
adjust._get_statfunc('wradlib')
def test_best(self):
x = 7.5
y = np.array([0., 1., 0., 1., 0., 7.7, 8., 8., 8., 8.])
self.assertEqual(adjust.best(x, y), 7.7)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"wradlib.adjust._get_statfunc",
"wradlib.adjust.AdjustNone",
"numpy.allclose",
"wradlib.adjust.AdjustAdd",
"wradlib.adjust.GageOnly",
"numpy.array",
"numpy.arange",
"wradlib.adjust.AdjustMFB",
"wradlib.adjust.AdjustMultiply",
"wradlib.adjust.best",
"wradlib.adjust.AdjustMixed"... | [((432, 490), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 1.0], [1.0, 3.5], [3.5, 3.0]]'], {}), '([[1.0, 1.0], [2.0, 1.0], [1.0, 3.5], [3.5, 3.0]])\n', (440, 490), True, 'import numpy as np\n'), ((491, 673), 'numpy.array', 'np.array', (['[[1.0, 2.0, 1.0, 0.0, 1.0, 2.0, 1.0, 2.0, 1.0, 0.0, 0.0, 3.0, 4.0, 0.0, 4.0,\n 0.0], [1.0, 2.0, 1.0, 0.0, 1.0, 2.0, 1.0, 2.0, 1.0, 0.0, 0.0, 3.0, 4.0,\n 0.0, 4.0, 0.0]]'], {}), '([[1.0, 2.0, 1.0, 0.0, 1.0, 2.0, 1.0, 2.0, 1.0, 0.0, 0.0, 3.0, 4.0,\n 0.0, 4.0, 0.0], [1.0, 2.0, 1.0, 0.0, 1.0, 2.0, 1.0, 2.0, 1.0, 0.0, 0.0,\n 3.0, 4.0, 0.0, 4.0, 0.0]])\n', (499, 673), True, 'import numpy as np\n'), ((708, 758), 'numpy.array', 'np.array', (['[[2.0, 3, 0.0, 4.0], [2.0, 3, 0.0, 4.0]]'], {}), '([[2.0, 3, 0.0, 4.0], [2.0, 3, 0.0, 4.0]])\n', (716, 758), True, 'import numpy as np\n'), ((9236, 9251), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9249, 9251), False, 'import unittest\n'), ((1608, 1715), 'wradlib.adjust.AdjustAdd', 'adjust.AdjustAdd', (['self.obs_coords', 'self.raw_coords'], {'nnear_raws': 'self.nnear_raws', 'mingages': 'self.mingages'}), '(self.obs_coords, self.raw_coords, nnear_raws=self.\n nnear_raws, mingages=self.mingages)\n', (1624, 1715), False, 'from wradlib import adjust\n'), ((1830, 2237), 'numpy.array', 'np.array', (['[[1.62818784, 1.62818784], [2.75926679, 2.75926679], [2.09428144, \n 2.09428144], [1.1466651, 1.1466651], [1.51948941, 1.51948941], [2.5, \n 2.5], [2.5, 2.5], [3.27498305, 3.27498305], [1.11382822, 1.11382822], [\n 0.33900645, 0.33900645], [0.89999998, 0.89999998], [4.52409637, \n 4.52409637], [3.08139533, 3.08139533], [0.0, 0.0], [3.99180328, \n 3.99180328], [2.16913891, 2.16913891]]'], {}), '([[1.62818784, 1.62818784], [2.75926679, 2.75926679], [2.09428144, \n 2.09428144], [1.1466651, 1.1466651], [1.51948941, 1.51948941], [2.5, \n 2.5], [2.5, 2.5], [3.27498305, 3.27498305], [1.11382822, 1.11382822], [\n 0.33900645, 0.33900645], [0.89999998, 0.89999998], [4.52409637, \n 4.52409637], [3.08139533, 3.08139533], [0.0, 0.0], [3.99180328, \n 3.99180328], [2.16913891, 2.16913891]])\n', (1838, 2237), True, 'import numpy as np\n'), ((3166, 3278), 'wradlib.adjust.AdjustMultiply', 'adjust.AdjustMultiply', (['self.obs_coords', 'self.raw_coords'], {'nnear_raws': 'self.nnear_raws', 'mingages': 'self.mingages'}), '(self.obs_coords, self.raw_coords, nnear_raws=self.\n nnear_raws, mingages=self.mingages)\n', (3187, 3278), False, 'from wradlib import adjust\n'), ((3403, 3765), 'numpy.array', 'np.array', (['[[1.44937706, 1.44937706], [3.04539442, 3.04539442], [1.74463618, \n 1.74463618], [0.0, 0.0], [1.37804615, 1.37804615], [2.66666675, \n 2.66666675], [2.0, 2.0], [3.74106812, 3.74106812], [1.17057478, \n 1.17057478], [0.0, 0.0], [0.0, 0.0], [6.14457822, 6.14457822], [\n 2.43439031, 2.43439031], [0.0, 0.0], [4.60765028, 4.60765028], [0.0, 0.0]]'], {}), '([[1.44937706, 1.44937706], [3.04539442, 3.04539442], [1.74463618, \n 1.74463618], [0.0, 0.0], [1.37804615, 1.37804615], [2.66666675, \n 2.66666675], [2.0, 2.0], [3.74106812, 3.74106812], [1.17057478, \n 1.17057478], [0.0, 0.0], [0.0, 0.0], [6.14457822, 6.14457822], [\n 2.43439031, 2.43439031], [0.0, 0.0], [4.60765028, 4.60765028], [0.0, 0.0]])\n', (3411, 3765), True, 'import numpy as np\n'), ((4683, 4792), 'wradlib.adjust.AdjustMixed', 'adjust.AdjustMixed', (['self.obs_coords', 'self.raw_coords'], {'nnear_raws': 'self.nnear_raws', 'mingages': 'self.mingages'}), '(self.obs_coords, self.raw_coords, nnear_raws=self.\n nnear_raws, mingages=self.mingages)\n', (4701, 4792), False, 'from wradlib import adjust\n'), ((4911, 5369), 'numpy.array', 'np.array', (['[[1.51427719, 1.51427719], [2.95735525, 2.95735525], [1.85710269, \n 1.85710269], [0.36806121, 0.36806121], [1.43181512, 1.43181512], [\n 2.61538471, 2.61538471], [2.15384617, 2.15384617], [3.59765723, \n 3.59765723], [1.18370627, 1.18370627], [0.15027952, 0.15027952], [\n 0.30825174, 0.30825174], [5.63558862, 5.63558862], [2.49066845, \n 2.49066845], [-0.29200733, -0.29200733], [4.31646909, 4.31646909], [\n 0.67854041, 0.67854041]]'], {}), '([[1.51427719, 1.51427719], [2.95735525, 2.95735525], [1.85710269, \n 1.85710269], [0.36806121, 0.36806121], [1.43181512, 1.43181512], [\n 2.61538471, 2.61538471], [2.15384617, 2.15384617], [3.59765723, \n 3.59765723], [1.18370627, 1.18370627], [0.15027952, 0.15027952], [\n 0.30825174, 0.30825174], [5.63558862, 5.63558862], [2.49066845, \n 2.49066845], [-0.29200733, -0.29200733], [4.31646909, 4.31646909], [\n 0.67854041, 0.67854041]])\n', (4919, 5369), True, 'import numpy as np\n'), ((6075, 6109), 'numpy.array', 'np.array', (['[[0.0, 0.0], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [1.0, 1.0]])\n', (6083, 6109), True, 'import numpy as np\n'), ((6132, 6166), 'numpy.array', 'np.array', (['[[0.5, 0.5], [1.5, 1.5]]'], {}), '([[0.5, 0.5], [1.5, 1.5]])\n', (6140, 6166), True, 'import numpy as np\n'), ((6186, 6206), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (6194, 6206), True, 'import numpy as np\n'), ((6224, 6244), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (6232, 6244), True, 'import numpy as np\n'), ((6397, 6528), 'wradlib.adjust.AdjustMFB', 'adjust.AdjustMFB', (['self.obs_coords', 'self.raw_coords'], {'nnear_raws': 'self.nnear_raws', 'mingages': 'self.mingages', 'mfb_args': 'self.mfb_args'}), '(self.obs_coords, self.raw_coords, nnear_raws=self.\n nnear_raws, mingages=self.mingages, mfb_args=self.mfb_args)\n', (6413, 6528), False, 'from wradlib import adjust\n'), ((6643, 6663), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (6651, 6663), True, 'import numpy as np\n'), ((7435, 7469), 'numpy.array', 'np.array', (['[[0.0, 0.0], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [1.0, 1.0]])\n', (7443, 7469), True, 'import numpy as np\n'), ((7492, 7526), 'numpy.array', 'np.array', (['[[0.5, 0.5], [1.5, 1.5]]'], {}), '([[0.5, 0.5], [1.5, 1.5]])\n', (7500, 7526), True, 'import numpy as np\n'), ((7546, 7566), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (7554, 7566), True, 'import numpy as np\n'), ((7584, 7604), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (7592, 7604), True, 'import numpy as np\n'), ((7758, 7866), 'wradlib.adjust.AdjustNone', 'adjust.AdjustNone', (['self.obs_coords', 'self.raw_coords'], {'nnear_raws': 'self.nnear_raws', 'mingages': 'self.mingages'}), '(self.obs_coords, self.raw_coords, nnear_raws=self.\n nnear_raws, mingages=self.mingages)\n', (7775, 7866), False, 'from wradlib import adjust\n'), ((7983, 8003), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (7991, 8003), True, 'import numpy as np\n'), ((8142, 8176), 'numpy.array', 'np.array', (['[[0.0, 0.0], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [1.0, 1.0]])\n', (8150, 8176), True, 'import numpy as np\n'), ((8199, 8233), 'numpy.array', 'np.array', (['[[0.5, 0.5], [1.5, 1.5]]'], {}), '([[0.5, 0.5], [1.5, 1.5]])\n', (8207, 8233), True, 'import numpy as np\n'), ((8253, 8273), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (8261, 8273), True, 'import numpy as np\n'), ((8291, 8311), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (8299, 8311), True, 'import numpy as np\n'), ((8463, 8569), 'wradlib.adjust.GageOnly', 'adjust.GageOnly', (['self.obs_coords', 'self.raw_coords'], {'nnear_raws': 'self.nnear_raws', 'mingages': 'self.mingages'}), '(self.obs_coords, self.raw_coords, nnear_raws=self.\n nnear_raws, mingages=self.mingages)\n', (8478, 8569), False, 'from wradlib import adjust\n'), ((8682, 8702), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (8690, 8702), True, 'import numpy as np\n'), ((8893, 8923), 'wradlib.adjust._get_statfunc', 'adjust._get_statfunc', (['"""median"""'], {}), "('median')\n", (8913, 8923), False, 'from wradlib import adjust\n'), ((8932, 8960), 'wradlib.adjust._get_statfunc', 'adjust._get_statfunc', (['"""best"""'], {}), "('best')\n", (8952, 8960), False, 'from wradlib import adjust\n'), ((9102, 9162), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 0.0, 7.7, 8.0, 8.0, 8.0, 8.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 0.0, 7.7, 8.0, 8.0, 8.0, 8.0])\n', (9110, 9162), True, 'import numpy as np\n'), ((281, 293), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (290, 293), True, 'import numpy as np\n'), ((335, 347), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (344, 347), True, 'import numpy as np\n'), ((2670, 2696), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe'], {}), '(res, shouldbe)\n', (2681, 2696), True, 'import numpy as np\n'), ((2822, 2854), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe[:, 0]'], {}), '(res, shouldbe[:, 0])\n', (2833, 2854), True, 'import numpy as np\n'), ((4193, 4219), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe'], {}), '(res, shouldbe)\n', (4204, 4219), True, 'import numpy as np\n'), ((4345, 4377), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe[:, 0]'], {}), '(res, shouldbe[:, 0])\n', (4356, 4377), True, 'import numpy as np\n'), ((5800, 5826), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe'], {}), '(res, shouldbe)\n', (5811, 5826), True, 'import numpy as np\n'), ((5952, 5984), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe[:, 0]'], {}), '(res, shouldbe[:, 0])\n', (5963, 5984), True, 'import numpy as np\n'), ((6686, 6712), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe'], {}), '(res, shouldbe)\n', (6697, 6712), True, 'import numpy as np\n'), ((8026, 8052), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe'], {}), '(res, shouldbe)\n', (8037, 8052), True, 'import numpy as np\n'), ((8725, 8751), 'numpy.allclose', 'np.allclose', (['res', 'shouldbe'], {}), '(res, shouldbe)\n', (8736, 8751), True, 'import numpy as np\n'), ((9016, 9047), 'wradlib.adjust._get_statfunc', 'adjust._get_statfunc', (['"""wradlib"""'], {}), "('wradlib')\n", (9036, 9047), False, 'from wradlib import adjust\n'), ((9179, 9196), 'wradlib.adjust.best', 'adjust.best', (['x', 'y'], {}), '(x, y)\n', (9190, 9196), False, 'from wradlib import adjust\n')] |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The original file of this is
#
# https://github.com/google-research/disentanglement_lib/disentanglement_lib/data/ground_truth/dsprites.py
#
# and has been modified by koukyo1994 to add customizability.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pathlib import Path
from PIL import Image
from typing import Union
from pytorch_disentanglement_lib.datasets.base import DatasetBase
class DSprites(DatasetBase):
"""
DSprites dataset.
The data set was originally introduced in "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework" and can be downloaded from
https://github.com/deepmind/dsprites-dataset.
The ground-truth factors of variation are (in the default setting):
0 - shape (3 different values)
1 - scale (6 different values)
2 - orientation (40 different values)
3 - position x (32 different values)
4 - position y (32 different values)
"""
def __init__(self, state_space, dataset_path: Union[str, Path], latent_factor_indices=None):
# By default, all factors (including shape) are considered as ground truth factors
if latent_factor_indices is None:
latent_factor_indices = list(range(6))
self.latent_factor_indices = latent_factor_indices
self.data_shape = [64, 64, 1]
data = np.load(dataset_path, encoding="latin1", allow_pickle=True)
self.images = np.array(data["imgs"])
self.factor_sizes = data["metadata"][()]["latents_sizes"][1:]
self.full_factor_sizes = [3, 6, 40, 32, 32]
self.factor_bases = np.prod(self.factor_sizes) / np.cumprod(self.factor_sizes)
self.state_space = state_space(factor_sizes=self.factor_sizes, latent_factor_indices=self.latent_factor_indices)
@property
def num_factors(self):
return self.state_space.num_latent_factors
@property
def factors_num_values(self):
return [self.full_factor_sizes[i] for i in self.latent_factor_indices]
@property
def observation_shape(self):
return self.data_shape
def sample_factors(self, num: int, random_state: np.random.RandomState):
"""
Sample a batch of factors Y.
"""
return self.state_space.sample_latent_factors(num, random_state)
def sample_observations_from_factors(self, factors: np.ndarray, random_state: np.random.RandomState):
all_factors = self.state_space.sample_all_factors(factors, random_state)
indices = np.array(np.dot(all_factors, self.factor_bases), dtype=np.int64)
return np.expand_dims(self.images[indices].astype(np.float32), axis=3)
class ColorDSprites(DSprites):
"""
Color DSprites.
This data set is the same as the original DSprites dataset except that when
sampling the observation X, the sprites is colored in a randomly sampled color.
The ground-truth factors of variation are (in the default setting):
0 - shape (3 differnt values)
1 - scale (6 different values)
2 - orientation (40 different values)
3 - position x (32 different values)
4 - position y (32 different values)
"""
def __init__(self, state_space, dataset_path: Union[str, Path], latent_factor_indices=None):
super().__init__(state_space, dataset_path, latent_factor_indices)
self.data_shape = [64, 64, 3]
def sample_obbservations_from_factors(self, factors: np.ndarray, random_state: np.random.RandomState):
no_color_observations = super().sample_observations_from_factors(factors, random_state)
observations = np.repeat(no_color_observations, 3, axis=3)
color = np.repeat(
np.repeat(
random_state.uniform(0.5, 1, [observations.shape[0], 1, 1, 3]),
observations.shape[1],
axis=1
),
observations.shape[2],
axis=2
)
return observations * color
class NoisyDSprites(DSprites):
"""
Noisy DSprites.
This data set is the same as the original DSprites data set except that when
sampling the observation X, the background pixels are replaced with random noise.
The ground-truth factors of variation are (in the default setting):
0 - shape (3 differnt values)
1 - scale (6 different values)
2 - orientation (40 different values)
3 - position x (32 different values)
4 - position y (32 different values)
"""
def __init__(self, state_space, dataset_path: Union[str, Path], latent_factor_indices=None):
super().__init__(state_space, dataset_path, latent_factor_indices)
self.data_shape = [64, 64, 3]
def sample_observations_from_factors(self, factors: np.ndarray, random_state: np.random.RandomState):
no_color_observations = super().sample_observations_from_factors(factors, random_state)
observations = np.repeat(no_color_observations, 3, axis=3)
color = random_state.uniform(0, 1, [observations.shape[0], 64, 64, 3])
return np.minimum(observations + color, 1.0)
class ScreamDSprites(DSprites):
"""
Scream DSprites.
This data set is the same as the original DSprites data set except that when
sampling the observations X, a random patch of the Scream image is sampled as
the background and the sprite is embedded into the image by inverting the
color of the sampled patch at the pixels of the sprite.
The ground-truth factors of variation are (in the default setting):
0 - shape (3 differnt values)
1 - scale (6 different values)
2 - orientation (40 different values)
3 - position x (32 different values)
4 - position y (32 different values)
"""
def __init__(self, state_space, dataset_path: Union[str, Path], scream_path: Union[str, Path], latent_factor_indices=None):
super().__init__(state_space, dataset_path, latent_factor_indices)
self.data_shape = [64, 64, 3]
scream = Image.open(scream_path)
scream.thumbnail((350, 274))
self.scream = np.array(scream) * 1. / 255
def sample_observations_from_factors(self, factors: np.ndarray, random_state: np.random.RandomState):
no_color_observations = super().sample_observations_from_factors(factors, random_state)
observations = np.repeat(no_color_observations, 3, axis=3)
for i in range(observations.shape[0]):
x_crop = random_state.randint(0, self.scream.shape[0] - 64)
y_crop = random_state.randint(0, self.scream.shape[1] - 64)
background = (self.scream[x_crop: x_crop + 64, y_crop: y_crop + 64] +
random_state.unifor(0, 1, size=3)) / 2.0
mask = (observations[i] == 1)
background[mask] = 1 - background[mask]
observations[i] = background
return observations
| [
"numpy.load",
"numpy.minimum",
"numpy.cumprod",
"PIL.Image.open",
"numpy.array",
"numpy.dot",
"numpy.prod",
"numpy.repeat"
] | [((2057, 2116), 'numpy.load', 'np.load', (['dataset_path'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(dataset_path, encoding='latin1', allow_pickle=True)\n", (2064, 2116), True, 'import numpy as np\n'), ((2139, 2161), 'numpy.array', 'np.array', (["data['imgs']"], {}), "(data['imgs'])\n", (2147, 2161), True, 'import numpy as np\n'), ((4290, 4333), 'numpy.repeat', 'np.repeat', (['no_color_observations', '(3)'], {'axis': '(3)'}), '(no_color_observations, 3, axis=3)\n', (4299, 4333), True, 'import numpy as np\n'), ((5578, 5621), 'numpy.repeat', 'np.repeat', (['no_color_observations', '(3)'], {'axis': '(3)'}), '(no_color_observations, 3, axis=3)\n', (5587, 5621), True, 'import numpy as np\n'), ((5716, 5753), 'numpy.minimum', 'np.minimum', (['(observations + color)', '(1.0)'], {}), '(observations + color, 1.0)\n', (5726, 5753), True, 'import numpy as np\n'), ((6651, 6674), 'PIL.Image.open', 'Image.open', (['scream_path'], {}), '(scream_path)\n', (6661, 6674), False, 'from PIL import Image\n'), ((6988, 7031), 'numpy.repeat', 'np.repeat', (['no_color_observations', '(3)'], {'axis': '(3)'}), '(no_color_observations, 3, axis=3)\n', (6997, 7031), True, 'import numpy as np\n'), ((2312, 2338), 'numpy.prod', 'np.prod', (['self.factor_sizes'], {}), '(self.factor_sizes)\n', (2319, 2338), True, 'import numpy as np\n'), ((2341, 2370), 'numpy.cumprod', 'np.cumprod', (['self.factor_sizes'], {}), '(self.factor_sizes)\n', (2351, 2370), True, 'import numpy as np\n'), ((3219, 3257), 'numpy.dot', 'np.dot', (['all_factors', 'self.factor_bases'], {}), '(all_factors, self.factor_bases)\n', (3225, 3257), True, 'import numpy as np\n'), ((6734, 6750), 'numpy.array', 'np.array', (['scream'], {}), '(scream)\n', (6742, 6750), True, 'import numpy as np\n')] |
# vim: fdm=indent
'''
author: <NAME>
date: 02/03/18
content: Initial quality control of samples.
'''
# Modules
import os
import sys
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
os.environ['SINGLET_CONFIG_FILENAME'] = 'singlet.yml'
sys.path.append('/home/fabio/university/postdoc/singlet')
from singlet.dataset import Dataset, CountsTable, SampleSheet, FeatureSheet
from mCMV.filenames import get_count_filenames
# Functions
# Script
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--n-reads-min', type=int, default=15000,
help='Minimal number of reads for good cells')
args = parser.parse_args()
expname = 'mouse_mCMV_1'
samplename = 'all'
print('Read counts table')
fn = get_count_filenames(expname, samplename, fmt='dataframe')
counts = CountsTable(1e6 * pd.read_csv(
fn,
sep='\t',
index_col=0,
dtype={0: str},
))
counts._normalized = 'counts_per_million'
print('Read gene metadata')
fn = '../../data/mouse_mCMV_1/feature_metadata.tsv'
featuresheet = FeatureSheet(pd.read_csv(
fn,
sep='\t',
index_col=0,
dtype={0: str},
))
print('Read sample metadata')
fn = '../../data/mouse_mCMV_1/all/samplesheet.tsv'
samplesheet = SampleSheet(pd.read_csv(
fn,
sep='\t',
index_col=0,
dtype={0: str},
))
print('Build dataset')
ds = Dataset(
counts_table=counts,
featuresheet=featuresheet,
samplesheet=samplesheet,
)
print('Add normalized virus counts')
ds.samplesheet['virus_reads_per_million'] = 1e6 * ds.samplesheet['n_reads_virus'] / ds.samplesheet['n_reads']
ds.samplesheet['log_virus_reads_per_million'] = np.log10(0.1 + ds.samplesheet['virus_reads_per_million'])
print('Filter low-quality cells')
n_reads_min = args.n_reads_min
ds.query_samples_by_metadata('n_reads > @n_reads_min', local_dict=locals(), inplace=True)
print('Limit to decently expressed genes')
ind = (ds.counts > 10).sum(axis=1) >= 10
ds.counts = ds.counts.loc[ind]
print('Ignore genes with multiple IDs')
from collections import Counter
genec = Counter(ds.featuresheet['GeneName'].values)
genes_multiple = [k for k, v in genec.items() if v > 1]
ds.featuresheet = ds.featuresheet.loc[~ds.featuresheet['GeneName'].isin(genes_multiple)]
print('Translate to gene names')
ds.rename(axis='features', column='GeneName', inplace=True)
print('Get average expression')
ds.counts.log(inplace=True)
ds.featuresheet.loc[:, 'expression_geometric_mean'] = ds.counts.mean(axis=1)
print('Divide features in host and pathogen')
features_host = ds.featurenames[ds.featuresheet['Organism'] == 'mouse']
features_virus = ds.featurenames[ds.featuresheet['Organism'] == 'mCMV']
print('Plot average expression of viral genes')
fig, ax = plt.subplots(figsize=(5, 3.5))
ax.hist(
ds.featuresheet.loc[features_virus, 'expression_geometric_mean'].values,
bins=np.linspace(-1, 2, 20),
align='mid',
)
ax.grid(True)
ax.set_xlabel('Average expression')
ax.set_ylabel('# viral genes')
ax.set_ylim(ymin=0.1)
ax.set_yscale('log')
ax.set_xlim(-1.1, 2.6)
ax.set_xticks(np.arange(-1, 3))
ax.set_xticklabels([
'$0$',
'$1$',
'$10$',
'$10^2$',
])
plt.tight_layout()
print('Check correlations between virus genes')
dsv = ds.query_features_by_metadata('Organism == "mCMV"')
dsv.query_samples_by_metadata('moi in ("low", "high")', inplace=True)
dsv.query_samples_by_metadata('n_reads_virus > 0', inplace=True)
cluster_samples = dsv.cluster.hierarchical(axis='samples', log_features=False, optimal_ordering=True)
cluster_features = dsv.cluster.hierarchical(axis='features', log_features=False, optimal_ordering=True)
g = dsv.plot.clustermap(
cluster_samples=cluster_samples['linkage'],
cluster_features=cluster_features['linkage'],
labels_samples=False,
annotate_samples={
'log_virus_reads_per_million': 'viridis',
'moi': 'Set1',
'biosample': 'Set2',
},
annotate_features={
'expression_geometric_mean': 'viridis',
},
colorbars=True,
cbar_kws={'label': 'log10 expression'},
figsize=[16.12, 9.82],
)
for tk in g.ax_row_colors.get_xticklabels():
tk.set_rotation(0)
for tk in g.ax_heatmap.get_yticklabels():
tk.set_fontsize(4)
g.ax_col_colors.set_yticklabels(['# virus reads', 'moi', 'biosample'])
plt.subplots_adjust(bottom=0.03, right=0.93)
for hli in [40.5, 56.5, 70.5, 95]:
g.ax_heatmap.axhline(hli, lw=2, color='steelblue', zorder=20)
for vli in [760, 1150, 1320, 1540]:
g.ax_heatmap.axvline(vli, lw=2, color='steelblue', zorder=20)
import json
#modules = {
# '1': cluster_features['leaves'][71: 95],
# '2': cluster_features['leaves'][56: 71],
# '3': cluster_features['leaves'][40: 56],
# }
#with open('../../data/mouse_mCMV_1/virus_gene_modules_1.json', 'wt') as f:
# json.dump(modules, f, indent=2)
with open('../../data/mouse_mCMV_1/virus_gene_modules_1.json', 'rt') as f:
modules = json.load(f)
print('Plot gene expression of modules')
virus_bins = np.array([0] + list(np.logspace(1.5, 5.5, 7)))
virus_center = np.sqrt(np.maximum(10**0.5, virus_bins[:-1]) * virus_bins[1:])
vrpm = dsv.samplesheet['virus_reads_per_million']
exp = []
frac_cells_exp = []
for ib in range(len(virus_bins) - 1):
if ib != len(virus_bins) - 1:
ind = (vrpm >= virus_bins[ib]) & (vrpm < virus_bins[ib + 1])
else:
ind = (vrpm >= virus_bins[ib])
sn = dsv.samplenames[ind]
exp.append({})
frac_cells_exp.append({})
for modname, modgenes in modules.items():
ex = 10**(dsv.counts.loc[modgenes, sn].values.mean() + 0.1)
exp[-1][modname] = ex
fc = (dsv.counts.loc[modgenes, sn].values > 0.1).mean()
frac_cells_exp[-1][modname] = fc
exp = pd.DataFrame(
exp,
index=pd.Index(virus_center, name='virus_reads_per_million'),
)
exp.columns.name = 'module'
frac_cells_exp = pd.DataFrame(
frac_cells_exp,
index=pd.Index(virus_center, name='virus_reads_per_million'),
)
frac_cells_exp.columns.name = 'module'
fig, axs = plt.subplots(2, 1, figsize=(4, 6), sharex=True)
colors = sns.color_palette('Set1', n_colors=exp.shape[1])
ax = axs[0]
x = exp.index.values
for modname, color in zip(exp.columns, colors):
y = exp.loc[:, modname].values
ax.plot(x, y, lw=2, color=color, label=modname)
#ax.set_xlabel('Virus reads per million (time?)')
ax.set_ylabel('Mean expression')
ax.grid(True)
ax.legend(loc='upper left', title='Module:')
ax.set_xlim(xmin=1)
ax.set_xscale('log')
ax.set_ylim(ymin=0.1)
ax.set_yscale('log')
ax = axs[1]
x = exp.index.values
for modname, color in zip(exp.columns, colors):
y = frac_cells_exp.loc[:, modname].values
ax.plot(x, y, lw=2, color=color, label=modname)
ax.set_xlabel('Virus reads per million (time?)')
ax.set_ylabel('Fraction of cells expressing')
ax.grid(True)
ax.set_xlim(xmin=1)
ax.set_xscale('log')
plt.tight_layout()
plt.ion()
plt.show()
sys.exit()
print('Correlate number of virus reads with gene expression')
corr = ds.correlation.correlate_features_features(
features=features_host,
features2=features_virus,
method='spearman')
corr_virvir = ds.correlation.correlate_features_features(
features=features_virus,
features2=features_virus,
method='spearman')
print('Plot top correlates')
# Partial sort of the correlations
tmpi = []
tmpj = []
# Positive correlations
tmp = (corr).values.ravel()
tmparg = tmp.argpartition(-16)[-16:][::-1]
tmpj.extend(list(tmparg % corr.shape[1]))
tmpi.extend(list(tmparg // corr.shape[1]))
# Negative correlations
tmp = (-corr).values.ravel()
tmparg = tmp.argpartition(-16)[-16:][::-1]
tmpj.extend(list(tmparg % corr.shape[1]))
tmpi.extend(list(tmparg // corr.shape[1]))
baseline = 1e6 / ds.samplesheet['n_reads']
baseline_avg = np.log10(0.1 + baseline).mean()
fig, axs = plt.subplots(4, 8, figsize=(23, 13), sharex=True, sharey=True)
axs = axs.ravel()
for ax, ih, iv in zip(axs, tmpi, tmpj):
geneh = corr.index[ih]
genev = corr.columns[iv]
rho = corr.loc[geneh, genev]
x = ds.counts.loc[genev].values
y = ds.counts.loc[geneh].values
avg = ds.featuresheet.loc[geneh, 'expression_geometric_mean']
ax.scatter(x, y, s=15, alpha=0.05, label='$exp = {:.1f}$\n$\\rho = {:.2f}$'.format(avg, rho))
sns.kdeplot(data=x, data2=y, legend=False, cmap='viridis', ax=ax)
ax.axhline(baseline_avg, lw=2, color='darkred', zorder=5)
ax.axvline(baseline_avg, lw=2, color='darkred', zorder=5)
ax.grid(True)
ax.set_xlabel(genev)
ax.set_ylabel(geneh)
ax.set_xlim(-1.1, 4.1)
ax.set_xticks(np.arange(-1, 5))
ax.set_xticklabels([
'$0$',
'$1$',
'$10$',
'$10^2$',
'$10^3$',
'$10^4$',
])
ax.set_ylim(-1.1, 5.1)
ax.set_yticks(np.arange(-1, 6))
ax.set_yticklabels([
'$0$',
'$1$',
'$10$',
'$10^2$',
'$10^3$',
'$10^4$',
'$10^5$',
])
ax.legend(loc='best', framealpha=1)
fig.text(0.01, 0.25, '$\\rho \ll 0$', rotation=90, va='center')
fig.text(0.01, 0.75, '$\\rho \gg 0$', rotation=90, va='center')
plt.tight_layout(rect=(0.02, 0, 1, 1))
plt.ion()
plt.show()
| [
"seaborn.kdeplot",
"argparse.ArgumentParser",
"numpy.maximum",
"mCMV.filenames.get_count_filenames",
"pandas.read_csv",
"numpy.logspace",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"sys.path.append",
"numpy.linspace",
"collections.Counter",
"numpy.log10",
"matplotlib.pyplot.subplots",... | [((309, 366), 'sys.path.append', 'sys.path.append', (['"""/home/fabio/university/postdoc/singlet"""'], {}), "('/home/fabio/university/postdoc/singlet')\n", (324, 366), False, 'import sys\n'), ((556, 617), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (579, 617), False, 'import argparse\n'), ((881, 938), 'mCMV.filenames.get_count_filenames', 'get_count_filenames', (['expname', 'samplename'], {'fmt': '"""dataframe"""'}), "(expname, samplename, fmt='dataframe')\n", (900, 938), False, 'from mCMV.filenames import get_count_filenames\n'), ((1591, 1676), 'singlet.dataset.Dataset', 'Dataset', ([], {'counts_table': 'counts', 'featuresheet': 'featuresheet', 'samplesheet': 'samplesheet'}), '(counts_table=counts, featuresheet=featuresheet, samplesheet=samplesheet\n )\n', (1598, 1676), False, 'from singlet.dataset import Dataset, CountsTable, SampleSheet, FeatureSheet\n'), ((1931, 1988), 'numpy.log10', 'np.log10', (["(0.1 + ds.samplesheet['virus_reads_per_million'])"], {}), "(0.1 + ds.samplesheet['virus_reads_per_million'])\n", (1939, 1988), True, 'import numpy as np\n'), ((2378, 2421), 'collections.Counter', 'Counter', (["ds.featuresheet['GeneName'].values"], {}), "(ds.featuresheet['GeneName'].values)\n", (2385, 2421), False, 'from collections import Counter\n'), ((3097, 3127), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 3.5)'}), '(figsize=(5, 3.5))\n', (3109, 3127), True, 'import matplotlib.pyplot as plt\n'), ((3637, 3655), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3653, 3655), True, 'import matplotlib.pyplot as plt\n'), ((4945, 4989), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.03)', 'right': '(0.93)'}), '(bottom=0.03, right=0.93)\n', (4964, 4989), True, 'import matplotlib.pyplot as plt\n'), ((6876, 6923), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(4, 6)', 'sharex': '(True)'}), '(2, 1, figsize=(4, 6), sharex=True)\n', (6888, 6923), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6985), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {'n_colors': 'exp.shape[1]'}), "('Set1', n_colors=exp.shape[1])\n", (6954, 6985), True, 'import seaborn as sns\n'), ((7808, 7826), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7824, 7826), True, 'import matplotlib.pyplot as plt\n'), ((7832, 7841), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (7839, 7841), True, 'import matplotlib.pyplot as plt\n'), ((7846, 7856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7854, 7856), True, 'import matplotlib.pyplot as plt\n'), ((7862, 7872), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7870, 7872), False, 'import sys\n'), ((8886, 8948), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(8)'], {'figsize': '(23, 13)', 'sharex': '(True)', 'sharey': '(True)'}), '(4, 8, figsize=(23, 13), sharex=True, sharey=True)\n', (8898, 8948), True, 'import matplotlib.pyplot as plt\n'), ((10399, 10437), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '(0.02, 0, 1, 1)'}), '(rect=(0.02, 0, 1, 1))\n', (10415, 10437), True, 'import matplotlib.pyplot as plt\n'), ((10443, 10452), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (10450, 10452), True, 'import matplotlib.pyplot as plt\n'), ((10457, 10467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10465, 10467), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1292), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'sep': '"""\t"""', 'index_col': '(0)', 'dtype': '{(0): str}'}), "(fn, sep='\\t', index_col=0, dtype={(0): str})\n", (1247, 1292), True, 'import pandas as pd\n'), ((1455, 1511), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'sep': '"""\t"""', 'index_col': '(0)', 'dtype': '{(0): str}'}), "(fn, sep='\\t', index_col=0, dtype={(0): str})\n", (1466, 1511), True, 'import pandas as pd\n'), ((3495, 3511), 'numpy.arange', 'np.arange', (['(-1)', '(3)'], {}), '(-1, 3)\n', (3504, 3511), True, 'import numpy as np\n'), ((5638, 5650), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5647, 5650), False, 'import json\n'), ((9376, 9441), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'data': 'x', 'data2': 'y', 'legend': '(False)', 'cmap': '"""viridis"""', 'ax': 'ax'}), "(data=x, data2=y, legend=False, cmap='viridis', ax=ax)\n", (9387, 9441), True, 'import seaborn as sns\n'), ((970, 1026), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'sep': '"""\t"""', 'index_col': '(0)', 'dtype': '{(0): str}'}), "(fn, sep='\\t', index_col=0, dtype={(0): str})\n", (981, 1026), True, 'import pandas as pd\n'), ((3243, 3265), 'numpy.linspace', 'np.linspace', (['(-1)', '(2)', '(20)'], {}), '(-1, 2, 20)\n', (3254, 3265), True, 'import numpy as np\n'), ((5788, 5826), 'numpy.maximum', 'np.maximum', (['(10 ** 0.5)', 'virus_bins[:-1]'], {}), '(10 ** 0.5, virus_bins[:-1])\n', (5798, 5826), True, 'import numpy as np\n'), ((6564, 6618), 'pandas.Index', 'pd.Index', (['virus_center'], {'name': '"""virus_reads_per_million"""'}), "(virus_center, name='virus_reads_per_million')\n", (6572, 6618), True, 'import pandas as pd\n'), ((6747, 6801), 'pandas.Index', 'pd.Index', (['virus_center'], {'name': '"""virus_reads_per_million"""'}), "(virus_center, name='virus_reads_per_million')\n", (6755, 6801), True, 'import pandas as pd\n'), ((8839, 8863), 'numpy.log10', 'np.log10', (['(0.1 + baseline)'], {}), '(0.1 + baseline)\n', (8847, 8863), True, 'import numpy as np\n'), ((9707, 9723), 'numpy.arange', 'np.arange', (['(-1)', '(5)'], {}), '(-1, 5)\n', (9716, 9723), True, 'import numpy as np\n'), ((9974, 9990), 'numpy.arange', 'np.arange', (['(-1)', '(6)'], {}), '(-1, 6)\n', (9983, 9990), True, 'import numpy as np\n'), ((5734, 5758), 'numpy.logspace', 'np.logspace', (['(1.5)', '(5.5)', '(7)'], {}), '(1.5, 5.5, 7)\n', (5745, 5758), True, 'import numpy as np\n')] |
'''
Descripttion:
version:
Author: zpliu
Date: 2021-07-02 21:57:39
LastEditors: zpliu
LastEditTime: 2021-07-02 23:27:38
@param:
'''
import logging
import pandas as pd
import numpy as np
import pybedtools
import re
import pysam
import sys
logger=logging.getLogger()
logger.setLevel(logging.INFO)
logger.info("loading module...")
##########################################
#get homolog paired region
#! 根据bnMapper的结果将lastz比对准确的区域对应起来,过滤不在同源基因区间的映射
#! 对于中间的间隔区域,同样使用muscle对应起来.
#! 由于muscle在比对的时候无法区分正负链
##########################################
def merge_filterInterval(IntervalList):
'''
#! get all interval and sorted
args:
- IntervalList: BedTools list
return:
- list region
'''
tmp=np.array([i[0][:] for i in IntervalList])
return [tmp[0,0],tmp[0,1],tmp[-1,2]]
homologGeneID_stand=pd.read_csv("./gene_region/homoeolog_promoter_geneBody.bed",header=None,index_col=None,sep="\t")
##################################################
#filter the mappter Interval
##################################################
out=[]
with open(sys.argv[1],'r') as File:
count=1
for line in File:
if count%10000==0:
logger.info(count)
count+=1
line=line.strip("\n").split("\t")
if line[4]=="None":
#! without mapper region
continue
else:
homoeologRegionData=homologGeneID_stand.loc[(homologGeneID_stand[3]==line[3].strip("*[+-]"))|(homologGeneID_stand[8]==line[3].strip("*[+-]"))]
At_geneRegion="\t".join(homoeologRegionData.iloc[0,0:4].astype(str))+"*"+homoeologRegionData.iloc[0,4]
Dt_geneRegion="\t".join(homoeologRegionData.iloc[0,5:-1].astype(str))+"*"+homoeologRegionData.iloc[0,-1]
At_geneInterval=pybedtools.BedTool(At_geneRegion,from_string=True)
Dt_geneInterval=pybedtools.BedTool(Dt_geneRegion,from_string=True)
#! retain the interval intersect with homoeolog region
#! get Mapper interval from string
#! example 'Ghir_D10:38086-38275'
getInterval=lambda x: pybedtools.BedTool(re.sub(r'[:-]',"\t",x),from_string=True)
if re.match(r'^Ghir_A',line[0]):
#! At request and mapper to Dt, filter the mapper
#! a request may have more than one mapping
#! todo(1): the mapprer may have a large interval between them
mapper=Dt_geneRegion.split("\t")[-1]
filterInterval=[getInterval(i) for i in line[4:] if getInterval(i).intersect(Dt_geneInterval)]
if re.match(r'^Ghir_D',line[0]):
mapper=At_geneRegion.split("\t")[-1]
#! Dt request and mapper to At, filter the mapper
filterInterval=[getInterval(i) for i in line[4:] if getInterval(i).intersect(Dt_geneInterval)]
###########################################
# filter interval in the mapper gene region
###########################################
if filterInterval and re.match("Ghir_D",mapper):
#change order to At-Dt
out.append("\t".join(line[0:4])+"\t"+"\t".join(merge_filterInterval(filterInterval))+"\t"+mapper+"\n")
elif filterInterval and re.match("Ghir_A",mapper):
#! change order to At-Dt
out.append("\t".join(merge_filterInterval(filterInterval))+"\t"+mapper+"\t".join(line[0:4])+"\n")
else:
#! the mapper out of 2k+gene body
continue
# break
with open(sys.argv[2],'w') as File:
for line in out:
File.write(line)
| [
"pandas.read_csv",
"re.match",
"pybedtools.BedTool",
"numpy.array",
"re.sub",
"logging.getLogger"
] | [((251, 270), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (268, 270), False, 'import logging\n'), ((838, 941), 'pandas.read_csv', 'pd.read_csv', (['"""./gene_region/homoeolog_promoter_geneBody.bed"""'], {'header': 'None', 'index_col': 'None', 'sep': '"""\t"""'}), "('./gene_region/homoeolog_promoter_geneBody.bed', header=None,\n index_col=None, sep='\\t')\n", (849, 941), True, 'import pandas as pd\n'), ((735, 776), 'numpy.array', 'np.array', (['[i[0][:] for i in IntervalList]'], {}), '([i[0][:] for i in IntervalList])\n', (743, 776), True, 'import numpy as np\n'), ((1775, 1826), 'pybedtools.BedTool', 'pybedtools.BedTool', (['At_geneRegion'], {'from_string': '(True)'}), '(At_geneRegion, from_string=True)\n', (1793, 1826), False, 'import pybedtools\n'), ((1854, 1905), 'pybedtools.BedTool', 'pybedtools.BedTool', (['Dt_geneRegion'], {'from_string': '(True)'}), '(Dt_geneRegion, from_string=True)\n', (1872, 1905), False, 'import pybedtools\n'), ((2176, 2204), 're.match', 're.match', (['"""^Ghir_A"""', 'line[0]'], {}), "('^Ghir_A', line[0])\n", (2184, 2204), False, 'import re\n'), ((2591, 2619), 're.match', 're.match', (['"""^Ghir_D"""', 'line[0]'], {}), "('^Ghir_D', line[0])\n", (2599, 2619), False, 'import re\n'), ((3054, 3080), 're.match', 're.match', (['"""Ghir_D"""', 'mapper'], {}), "('Ghir_D', mapper)\n", (3062, 3080), False, 'import re\n'), ((2120, 2143), 're.sub', 're.sub', (['"""[:-]"""', '"""\t"""', 'x'], {}), "('[:-]', '\\t', x)\n", (2126, 2143), False, 'import re\n'), ((3275, 3301), 're.match', 're.match', (['"""Ghir_A"""', 'mapper'], {}), "('Ghir_A', mapper)\n", (3283, 3301), False, 'import re\n')] |
import gin
import h5py
import numpy as np
import pandas as pd
import time
import models
import utils
from dqn import DQN
from circle import CircleEnv
# Load configuration for DQN and model
gin.parse_config_file('configs/influence/influence.gin')
def influence(state, training_data, test_data, oracle_init_model, oracle_init_target_model, file_prefix):
"""Calculate the influence of a state with respect to the training data. Returns all influences for each state occurence."""
# We drop duplicates, because a specific state at a specific episode and step can be reused several times
state_occurences = training_data[(training_data['state_x'] == state['state_x']) &
(training_data['state_y'] == state['state_y'])].drop_duplicates()
# Array to hold our state/influence pairs.
state_influences = np.empty((len(state_occurences), 7))
count = 0
for _, state_occurence in state_occurences.iterrows():
start_time = time.time()
episode, step = state_occurence['episode'], state_occurence['step']
occurences = len(training_data[(training_data.state_x == state_occurence.state_x) &
(training_data.state_y == state_occurence.state_y) &
(training_data.episode == episode) &
(training_data.step == step)])
# Every state except those that occurs on or after the above step during the above episode.
# theta_X/E
full_trace = training_data[(training_data['episode'] != episode) |
(training_data['step'] < step)]
# Every state except those that occur after the above step during the above episode.
# theta_X/F
partial_trace = training_data[(training_data['episode'] != episode) |
(training_data['step'] <= step)]
# Setup our two agents to train on each of the filtered training sets above.
ft_agent = DQN()
ft_agent.model.load_weights(oracle_init_model)
ft_agent.target_model.load_weights(oracle_init_target_model)
pt_agent = DQN()
pt_agent.model.load_weights(oracle_init_model)
pt_agent.target_model.load_weights(oracle_init_target_model)
# Train our agents, get their optimal actions on testing data, and get consistencies.
utils.train_agent_offline(ft_agent, full_trace.to_numpy())
utils.train_agent_offline(pt_agent, partial_trace.to_numpy())
ft_q_values = utils.get_q_values(ft_agent.model, training_data[['state_x', 'state_y']].drop_duplicates().to_numpy())
pt_q_values = utils.get_q_values(pt_agent.model, training_data[['state_x', 'state_y']].drop_duplicates().to_numpy())
ft_agent_actions = np.argmax(ft_q_values, axis=1)
pt_agent_actions = np.argmax(pt_q_values, axis=1)
ft_agent_acc = utils.agent_consistency(ft_agent_actions, test_data['action'].to_numpy())
pt_agent_acc = utils.agent_consistency(pt_agent_actions, test_data['action'].to_numpy())
# TODO: Carefully consider what we wish to have saved and how we name our save files...
# Idea: state_x, state_y, episode, step, pt_agent_acc, ft_agent_acc,
state_influences[count] = np.array((state_occurence['state_x'], state_occurence['state_y'], episode, step, pt_agent_acc, ft_agent_acc, occurences), dtype=np.float64)
count += 1
print("Time elapsed for one loop iteration: {}".format(time.time()-start_time))
data = pd.DataFrame(state_influences, columns=['state_x', 'state_y', 'episode', 'step', 'pt_agent_cons', 'ft_agent_cons', 'occurences'])
if file_prefix:
data.to_pickle('data/circle/experiments/influences_v1/infl_'+file_prefix+'.pkl')
return data
def influence2(state, training_data, test_data, oracle_init_model, oracle_init_target_model, file_prefix):
"""Calculate the influence of a state with respect to the training data. Returns all influences for each state occurence."""
# We drop duplicates, because a specific state at a specific episode and step can be reused several times
state_occurences = training_data[(training_data['state_x'] == state['state_x']) &
(training_data['state_y'] == state['state_y'])].drop_duplicates()
full_trace = training_data
partial_trace = training_data
for _, state_occurence in state_occurences.iterrows():
start_time = time.time()
episode, step = state_occurence['episode'], state_occurence['step']
occurences = len(training_data[(training_data.state_x == state_occurence.state_x) &
(training_data.state_y == state_occurence.state_y) &
(training_data.episode == episode) &
(training_data.step == step)])
# Every state except those that occur on or after the above step during the above episode.
# theta_X/E
full_trace = full_trace[(full_trace['episode'] != episode) |
(full_trace['step'] < step)]
# Every state except those that occur after the above step during the above episode.
# theta_X/F
partial_trace = partial_trace[(partial_trace['episode'] != episode) |
(partial_trace['step'] <= step)]
print('Traces removed.')
# Setup our two agents to train on each of the filtered training sets above.
ft_agent = DQN()
ft_agent.model.load_weights(oracle_init_model)
ft_agent.target_model.load_weights(oracle_init_target_model)
pt_agent = DQN()
pt_agent.model.load_weights(oracle_init_model)
pt_agent.target_model.load_weights(oracle_init_target_model)
# Train our agents and get their q values for all the unique states in the training data
print('Retraining agents.')
training_start = time.time()
utils.train_agent_offline(ft_agent, full_trace.to_numpy())
print('Trained first agent in {} seconds.'.format(time.time() - training_start))
training_start = time.time()
utils.train_agent_offline(pt_agent, partial_trace.to_numpy())
print('Trained second agent in {} seconds.'.format(time.time() - training_start))
pt_q_values = utils.get_q_values(pt_agent.model, training_data[['state_x', 'state_y']].drop_duplicates().to_numpy())
ft_q_values = utils.get_q_values(ft_agent.model, training_data[['state_x', 'state_y']].drop_duplicates().to_numpy())
ft_agent.model.save('data/circle/experiments/models/'+file_prefix+'_ft_model.h5')
pt_agent.model.save('data/circle/experiments/models/'+file_prefix+'_pt_model.h5')
with h5py.File('data/circle/experiments/influences/infl_'+file_prefix+'.hdf5', 'w') as f:
ptqv = f.create_dataset('pt_q_values', data=pt_q_values)
ftqv = f.create_dataset('ft_q_values', data=ft_q_values)
pt = f.create_dataset('pt', data=partial_trace.index.to_numpy())
ft = f.create_dataset('ft', data=full_trace.index.to_numpy()) | [
"pandas.DataFrame",
"h5py.File",
"numpy.argmax",
"dqn.DQN",
"time.time",
"numpy.array",
"gin.parse_config_file"
] | [((192, 248), 'gin.parse_config_file', 'gin.parse_config_file', (['"""configs/influence/influence.gin"""'], {}), "('configs/influence/influence.gin')\n", (213, 248), False, 'import gin\n'), ((3590, 3723), 'pandas.DataFrame', 'pd.DataFrame', (['state_influences'], {'columns': "['state_x', 'state_y', 'episode', 'step', 'pt_agent_cons', 'ft_agent_cons',\n 'occurences']"}), "(state_influences, columns=['state_x', 'state_y', 'episode',\n 'step', 'pt_agent_cons', 'ft_agent_cons', 'occurences'])\n", (3602, 3723), True, 'import pandas as pd\n'), ((5586, 5591), 'dqn.DQN', 'DQN', ([], {}), '()\n', (5589, 5591), False, 'from dqn import DQN\n'), ((5728, 5733), 'dqn.DQN', 'DQN', ([], {}), '()\n', (5731, 5733), False, 'from dqn import DQN\n'), ((6001, 6012), 'time.time', 'time.time', ([], {}), '()\n', (6010, 6012), False, 'import time\n'), ((6182, 6193), 'time.time', 'time.time', ([], {}), '()\n', (6191, 6193), False, 'import time\n'), ((988, 999), 'time.time', 'time.time', ([], {}), '()\n', (997, 999), False, 'import time\n'), ((2034, 2039), 'dqn.DQN', 'DQN', ([], {}), '()\n', (2037, 2039), False, 'from dqn import DQN\n'), ((2188, 2193), 'dqn.DQN', 'DQN', ([], {}), '()\n', (2191, 2193), False, 'from dqn import DQN\n'), ((2827, 2857), 'numpy.argmax', 'np.argmax', (['ft_q_values'], {'axis': '(1)'}), '(ft_q_values, axis=1)\n', (2836, 2857), True, 'import numpy as np\n'), ((2885, 2915), 'numpy.argmax', 'np.argmax', (['pt_q_values'], {'axis': '(1)'}), '(pt_q_values, axis=1)\n', (2894, 2915), True, 'import numpy as np\n'), ((3327, 3470), 'numpy.array', 'np.array', (["(state_occurence['state_x'], state_occurence['state_y'], episode, step,\n pt_agent_acc, ft_agent_acc, occurences)"], {'dtype': 'np.float64'}), "((state_occurence['state_x'], state_occurence['state_y'], episode,\n step, pt_agent_acc, ft_agent_acc, occurences), dtype=np.float64)\n", (3335, 3470), True, 'import numpy as np\n'), ((4529, 4540), 'time.time', 'time.time', ([], {}), '()\n', (4538, 4540), False, 'import time\n'), ((6784, 6870), 'h5py.File', 'h5py.File', (["('data/circle/experiments/influences/infl_' + file_prefix + '.hdf5')", '"""w"""'], {}), "('data/circle/experiments/influences/infl_' + file_prefix +\n '.hdf5', 'w')\n", (6793, 6870), False, 'import h5py\n'), ((6130, 6141), 'time.time', 'time.time', ([], {}), '()\n', (6139, 6141), False, 'import time\n'), ((6315, 6326), 'time.time', 'time.time', ([], {}), '()\n', (6324, 6326), False, 'import time\n'), ((3549, 3560), 'time.time', 'time.time', ([], {}), '()\n', (3558, 3560), False, 'import time\n')] |
import base64
import json
import os
import random
import re
import sys
import threading
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import zmq
import dataHandler as dh
import symbolDNNClassifier as DNNC
# Model attributes for prediction
classes = 15
tags = ['button', 'input', 'textarea', 'alert', 'table', 'footer', 'link',
'sidebar', 'status', 'paragraph', 'br', 'timeline', 'item', 'header', 'undefined']
'''
Input: One-hot 2-dim list with predictions (e.g. [ [0 1 0 ...] [1 0 0 ...] [0 0 ... 1]])
Returns: Categorical list matching tags (e.g. [button, input, ...])
'''
def oneHotToCategorical(onehot):
numerical = [index for row in onehot for index,
_ in enumerate(row) if row[index] == 1.]
categorical = [tags[int(item)] for item in numerical]
return categorical
'''
Input: Probabilities 2-dim list for each of the classes (e.g. [ [0.92 0.01 ...] [0.02 0.21 ...] ])
Returns: Higher probability list, confidence % (e.g. [76 94 87 ...])
'''
def handleProbabilities(decProbs):
probs = []
for row in decProbs:
higher = 0
for col in row:
if col > higher:
higher = col
probs.append(higher * 100)
return probs
'''
Obtains a random port in the range 8000-9000 and checks if the port is in use
Returns: The new port or 0 if it's already in use
'''
def getNewFreePort():
sck = context.socket(zmq.REP)
prt = random.randint(8000, 9000)
try:
sck.bind("tcp://*:%s" % prt)
return prt
except zmq.ZMQError:
print("Port already in use")
return 0
'''
_bytes_feature requires a list
_float_feature & _int_feature require a numeric value
Returns: TensorFlow tfrecord compliant feature
'''
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
'''
Input: Name of the client making the request (str), Objects to be infer (list of dicts)
Returns: Path where the tfrecord of objects has been created
- Parse objects as TensorFlow features and write a tfrecord to feed the estimator
'''
def createDataset(client, objs):
objs = dh.handleInferences(objs)
client = dh._replaceMultiple(client, [':', '.'], '_')
filename = 'predictions/' + client + '_'
filename += dh._replaceMultiple(str(datetime.now()
).split('.')[0], [' ', ':', '-'], '_')
filename += '.tfrecord'
with open(filename, 'w') as text_file:
print('', file=text_file)
writer = tf.python_io.TFRecordWriter(filename)
for index, _ in enumerate(objs):
feature = {
'labels': _int_feature(0),
'insName': _bytes_feature(objs[index]['name']),
'insSymID': _bytes_feature(objs[index]['symbolID']),
'insSymIDBool': _int_feature(0 if b'none' in objs[index]['symbolID'] else 1),
'insColor': _bytes_feature(objs[index]['color']),
'insColorBool': _int_feature(0 if b'none' in objs[index]['color'] else 1),
'insText': _bytes_feature(objs[index]['text']),
'insFrameClass': _bytes_feature(objs[index]['frame']['class']),
'insFrameHeight': _float_feature(float(objs[index]['frame']['height'])),
'insFrameWidth': _float_feature(float(objs[index]['frame']['width'])),
'prevIns': _bytes_feature(objs[index]['previous']),
'prevInsBool': _int_feature(0 if b'none' in objs[index]['previous'] else 1),
'nxtIns': _bytes_feature(objs[index]['next']),
'nxtInsBool': _int_feature(0 if b'none' in objs[index]['next'] else 1),
'parent': _bytes_feature(objs[index]['parent']),
'parentBool': _int_feature(0 if b'none' in objs[index]['parent'] else 1),
}
for i in range(1, 6):
obj = 'obj' + str(i)
feature[obj+'Name'] = _bytes_feature(objs[index][obj]['name'])
feature[obj+'Class'] = _bytes_feature(objs[index][obj]['class'])
feature[obj+'Bool'] = _int_feature(
0 if b'none' in objs[index][obj]['name'] else 1)
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
return filename
'''
- Worker thread function
Input: Port to be binded and DNNClassifier to predict
- Worker binds to newPort and waits to receive message from server:
If the message is inference, the worker makes the prediction and return it
If the message is ping, the worker pings back
If the message is close, the worker stops
Note: If any error happen in the socket the worker signal the error and die
'''
def inference(newPort, classifier):
sock = context.socket(zmq.REP)
sock.RCVTIMEO = 5000
try:
sock.bind("tcp://*:%s" % newPort)
print("Worker started at port ", newPort)
while True:
message = sock.recv().decode('utf-8')
print("Worker received request")
if 'inference' in message.lower():
try:
objs = message.split('> ', 1)[1]
objs = eval(objs)
client = message.split('> ', 1)[0].split('client ', 1)[1]
client = dh._replaceMultiple(client, [':', '.'], '_')
print(client)
dataset_path = createDataset(client, objs)
predict_fn = DNNC.create_input_feat_fn([dataset_path],
len(objs))
predictions = list(classifier.predict(input_fn=predict_fn))
probabilities = np.array(
[item['probabilities'] for item in predictions])
pred_class_id = np.array(
[item['class_ids'][0] for item in predictions])
pred_one_hot = tf.keras.utils.to_categorical(
pred_class_id, num_classes=classes)
categor = oneHotToCategorical(pred_one_hot)
probs = handleProbabilities(probabilities)
result = list(zip(categor, probs))
print('Worker sending inference results at port', newPort)
sock.send((threading.currentThread().getName() +
" inference: " + str(result)).encode('utf-8'))
except Exception as e:
print(str(e))
sock.send(('Exception <'+str(e)+'> during inference').encode('utf-8'))
elif 'ping' in message.lower():
print('Worker sending ping at port', newPort)
sock.send((threading.currentThread().getName() +
' ping').encode('utf-8'))
elif 'close' in message.lower():
break
print('Worker killed at port ', newPort)
except zmq.ZMQError:
print("Error & worked killed at port ", newPort)
'''
- Models Serving main:
Check and substitue command arguments, if any. Non-defined arguments are ignored
Train the classifier
Start a socket to listen node requests:
If infer request is received, a new port is obtained and a new worker binded to this port and requester is created to serve him
If metrics request is received, the server returns the metrics of the last training period
'''
if __name__ == '__main__':
args = sys.argv[1:]
now = str(datetime.now()).split('.')[0].replace(
' ', '_').replace(':', '_').replace('-', '_')
if '--help' in args or 'help' in args:
print('Arguments format -> --arg1=val1 --arg2=val2 ...')
print('--learning-rate=X ; defaults to 0.000862')
print('--batch-size=X ; defaults to 45')
print('--steps=X ; defaults to 2000')
print('-hidden-layers=X ; defaults to 5 hidden layers with 10 node each')
print('--periods=X ; defaults to 15')
print('--port=X ; defaults to 7999')
sys.exit('--directory=X ; defaults to runs/')
learning_rate =0.0002
batch_size = 45
steps = 2000
hidden_layers = [
10,
10,
10,
10,
10
]
periods = 30
directory = 'runs/' + now
port = 7999
for arg in args:
if '--learning-rate' in arg:
learning_rate = float(arg.split('=')[1])
if '--batch-size' in arg:
batch_size = int(arg.split('=')[1])
if '--steps' in arg:
steps = int(arg.split('=')[1])
if '--hidden-layers' in arg:
hidden_layers = arg.split('=')[1]
hidden_layers = hidden_layers.split(',')
if '--periods' in arg:
periods = int(arg.split('=')[1])
if '--directory' in arg:
directory = arg.split('=')[1] + now
if '--port' in arg:
port = arg.split('=')[1]
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError as e:
print(str(e))
directory = 'runs/' + now
pass
print('Training parameters: ')
print(' - learning_rate: ', learning_rate)
print(' - steps: ', steps)
print(' - mini_batch_size: ', batch_size)
print(' - periods: ', periods)
print(' - hidden_layers', hidden_layers)
print(' - Optimizer: Nesterov Momentum with lambda = 0.9 and clipping = 3')
classifier, metrics_dir = DNNC.train_and_evaluate(
learning_rate, steps, batch_size, periods, hidden_layers, directory + '/')
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:%s" % port)
print("Model Serving started")
while True:
message = socket.recv().decode('utf-8')
print("Server Received request")
if 'infer' in message:
newPort = 0
while True:
newPort = getNewFreePort()
if newPort != 0:
break
th = threading.Thread(name='Port %d' % newPort,
target=inference, args=(newPort, classifier))
th.start()
print('Sending new port back to ', message.split('from')[1])
socket.send(("Request assigned port: " +
str(newPort)).encode('utf-8'))
elif 'metrics' in message:
print('Sending last metrics to ', message.split('from')[1])
with open(metrics_dir+'_metrics.txt', 'r') as text_file:
metrics = text_file.read()
with open(metrics_dir+'_cm.txt', 'r') as text_file:
cm = text_file.read()
with open(metrics_dir+'_confusion_matrix.png', 'rb') as image_file:
cm_pic = base64.b64encode(image_file.read())
with open(metrics_dir+'_log_error.png', 'rb') as image_file:
log_error_pic = base64.b64encode(image_file.read())
ret = {
'metrics': metrics,
'cm': cm,
'cm_pic': cm_pic.decode('utf-8'),
'log_error_pic': log_error_pic.decode('utf-8')
}
socket.send(json.dumps(ret).encode('utf-8'))
| [
"dataHandler._replaceMultiple",
"tensorflow.train.Int64List",
"json.dumps",
"sys.stdout.flush",
"tensorflow.train.FloatList",
"threading.currentThread",
"zmq.Context",
"random.randint",
"os.path.exists",
"datetime.datetime.now",
"tensorflow.train.BytesList",
"threading.Thread",
"tensorflow.k... | [((1473, 1499), 'random.randint', 'random.randint', (['(8000)', '(9000)'], {}), '(8000, 9000)\n', (1487, 1499), False, 'import random\n'), ((2381, 2406), 'dataHandler.handleInferences', 'dh.handleInferences', (['objs'], {}), '(objs)\n', (2400, 2406), True, 'import dataHandler as dh\n'), ((2421, 2465), 'dataHandler._replaceMultiple', 'dh._replaceMultiple', (['client', "[':', '.']", '"""_"""'], {}), "(client, [':', '.'], '_')\n", (2440, 2465), True, 'import dataHandler as dh\n'), ((2766, 2803), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (2793, 2803), True, 'import tensorflow as tf\n'), ((4581, 4599), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4597, 4599), False, 'import sys\n'), ((9821, 9923), 'symbolDNNClassifier.train_and_evaluate', 'DNNC.train_and_evaluate', (['learning_rate', 'steps', 'batch_size', 'periods', 'hidden_layers', "(directory + '/')"], {}), "(learning_rate, steps, batch_size, periods,\n hidden_layers, directory + '/')\n", (9844, 9923), True, 'import symbolDNNClassifier as DNNC\n'), ((9944, 9957), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (9955, 9957), False, 'import zmq\n'), ((8383, 8428), 'sys.exit', 'sys.exit', (['"""--directory=X ; defaults to runs/"""'], {}), "('--directory=X ; defaults to runs/')\n", (8391, 8428), False, 'import sys\n'), ((1856, 1887), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': 'value'}), '(value=value)\n', (1874, 1887), True, 'import tensorflow as tf\n'), ((1957, 1990), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': '[value]'}), '(value=[value])\n', (1975, 1990), True, 'import tensorflow as tf\n'), ((2058, 2091), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (2076, 2091), True, 'import tensorflow as tf\n'), ((9311, 9336), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (9325, 9336), False, 'import os\n'), ((9350, 9372), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (9361, 9372), False, 'import os\n'), ((10376, 10468), 'threading.Thread', 'threading.Thread', ([], {'name': "('Port %d' % newPort)", 'target': 'inference', 'args': '(newPort, classifier)'}), "(name='Port %d' % newPort, target=inference, args=(newPort,\n classifier))\n", (10392, 10468), False, 'import threading\n'), ((4471, 4505), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (4488, 4505), True, 'import tensorflow as tf\n'), ((5614, 5658), 'dataHandler._replaceMultiple', 'dh._replaceMultiple', (['client', "[':', '.']", '"""_"""'], {}), "(client, [':', '.'], '_')\n", (5633, 5658), True, 'import dataHandler as dh\n'), ((6016, 6073), 'numpy.array', 'np.array', (["[item['probabilities'] for item in predictions]"], {}), "([item['probabilities'] for item in predictions])\n", (6024, 6073), True, 'import numpy as np\n'), ((6135, 6191), 'numpy.array', 'np.array', (["[item['class_ids'][0] for item in predictions]"], {}), "([item['class_ids'][0] for item in predictions])\n", (6143, 6191), True, 'import numpy as np\n'), ((6252, 6317), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['pred_class_id'], {'num_classes': 'classes'}), '(pred_class_id, num_classes=classes)\n', (6281, 6317), True, 'import tensorflow as tf\n'), ((2552, 2566), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2564, 2566), False, 'from datetime import datetime\n'), ((11548, 11563), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (11558, 11563), False, 'import json\n'), ((7065, 7090), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (7088, 7090), False, 'import threading\n'), ((7847, 7861), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7859, 7861), False, 'from datetime import datetime\n'), ((6638, 6663), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (6661, 6663), False, 'import threading\n')] |
# Copyright (C)
# Honda Research Institute Europe GmbH
# Carl-Legien-Str. 30
# 63073 Offenbach/Main
# Germany
#
# UNPUBLISHED PROPRIETARY MATERIAL.
# ALL RIGHTS RESERVED.
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import itertools
import numpy as np
from PyQt5.QtWidgets import QWidget, QVBoxLayout
from .lcx.openGLWidget import openGLWidget
class XSensVisualizer(QWidget):
WINDOW_WIDTH = 500
WINDOW_HEIGHT = 500
VISUALIZATION_FRAME_RATE = 10
def __init__(self):
super().__init__(parent=None)
self.xyz = ['_x', '_y', '_z']
self.quaternions = ['_q1', '_qi', '_qj', '_qk']
#self.xyz = ['_0', '_1', '_2']
#self.quaternions = ['_0', '_1', '_2', '_3']
self.skeleton_data = {
'Pelvis': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'L5': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'T8': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'Neck': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'Head': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'RightUpperLeg': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'RightLowerLeg': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'RightFoot': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'LeftUpperLeg': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'LeftLowerLeg': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'LeftFoot': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'RightUpperArm': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'RightForeArm': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'RightHand': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'LeftUpperArm': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'LeftForeArm': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]},
'LeftHand': {'position': [0, 0, 0], 'quaternion': [0, 0, 0, 0]}
}
self.resize(XSensVisualizer.WINDOW_WIDTH, XSensVisualizer.WINDOW_HEIGHT)
self.mapped_segment_names=['Pelvis','L5','T8','Neck','Head',
'RightUpperLeg','RightLowerLeg','RightFoot',
'LeftUpperLeg','LeftLowerLeg','LeftFoot',
'RightUpperArm','RightForeArm','RightHand',
'LeftUpperArm','LeftForeArm','LeftHand', 'Floor', 'Coordinate']
self.gl_widget = openGLWidget(self, XSensVisualizer.WINDOW_WIDTH, XSensVisualizer.WINDOW_HEIGHT-100, self.mapped_segment_names)
self.init_layout()
def init_layout(self):
root_layout = QVBoxLayout()
root_layout.addWidget(self.gl_widget)
self.setLayout(root_layout)
def draw_model(self):
for idx, key in enumerate(self.skeleton_data.keys()):
self.gl_widget.updateData(idx, key, self.skeleton_data[key]['position'], self.skeleton_data[key]['quaternion'])
self.gl_widget.display()
def set_skeleton_segment_from_data_row(self, segment_name, data_row, center_offset=None):
column_names = [v[0] + v[1] for v in itertools.product(['position_%s' % segment_name], self.xyz)]
self.skeleton_data[segment_name]['position'] = data_row[column_names]
if center_offset is not None:
self.skeleton_data[segment_name]['position'] -= center_offset
def set_skeleton_quaternion_from_data_row(self, segment_name, data_row):
column_names = [v[0] + v[1] for v in itertools.product(['orientation_%s' % segment_name], self.quaternions)]
self.skeleton_data[segment_name]['quaternion'] = data_row[column_names]
def update_model(self, data_row):
self.set_skeleton_segment_from_data_row('RightFoot', data_row)
self.set_skeleton_quaternion_from_data_row('RightFoot', data_row)
self.set_skeleton_segment_from_data_row('LeftFoot', data_row)
self.set_skeleton_quaternion_from_data_row('LeftFoot', data_row)
center_pos_offset = np.array(
[(self.skeleton_data['RightFoot']['position'][0] + self.skeleton_data['LeftFoot']['position'][0]) * 0.5,
(self.skeleton_data['RightFoot']['position'][1] + self.skeleton_data['LeftFoot']['position'][1]) * 0.5, 0.0])
self.skeleton_data['RightFoot']['position'] -= center_pos_offset
self.skeleton_data['LeftFoot']['position'] -= center_pos_offset
for key in self.skeleton_data:
if key not in ["rightfoot", "leftfoot"]:
self.set_skeleton_segment_from_data_row(key, data_row, center_pos_offset)
self.set_skeleton_quaternion_from_data_row(key, data_row)
self.draw_model()
| [
"PyQt5.QtWidgets.QVBoxLayout",
"numpy.array",
"itertools.product"
] | [((2730, 2743), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (2741, 2743), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout\n'), ((4095, 4333), 'numpy.array', 'np.array', (["[(self.skeleton_data['RightFoot']['position'][0] + self.skeleton_data[\n 'LeftFoot']['position'][0]) * 0.5, (self.skeleton_data['RightFoot'][\n 'position'][1] + self.skeleton_data['LeftFoot']['position'][1]) * 0.5, 0.0]"], {}), "([(self.skeleton_data['RightFoot']['position'][0] + self.\n skeleton_data['LeftFoot']['position'][0]) * 0.5, (self.skeleton_data[\n 'RightFoot']['position'][1] + self.skeleton_data['LeftFoot']['position'\n ][1]) * 0.5, 0.0])\n", (4103, 4333), True, 'import numpy as np\n'), ((3212, 3271), 'itertools.product', 'itertools.product', (["['position_%s' % segment_name]", 'self.xyz'], {}), "(['position_%s' % segment_name], self.xyz)\n", (3229, 3271), False, 'import itertools\n'), ((3586, 3656), 'itertools.product', 'itertools.product', (["['orientation_%s' % segment_name]", 'self.quaternions'], {}), "(['orientation_%s' % segment_name], self.quaternions)\n", (3603, 3656), False, 'import itertools\n')] |
# This is a modified version of IRLS implementation in 'sattsmodels'
# Copyright (C) 2006, <NAME>
# All rights reserved.
#
# Copyright (c) 2006-2008 Scipy Developers.
# All rights reserved.
#
# Copyright (c) 2009-2018 statsmodels Developers.
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of statsmodels nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL STATSMODELS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import numpy as np
from scipy.stats import norm as Gaussian
import warnings
def mad(a, c=Gaussian.ppf(3/4.), axis=0):
# c \approx .6745
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately .6745.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c`
"""
# a = array_like(a, 'a', ndim=None)
# c = float_like(c, 'c')
return np.median(np.abs(a) / c, axis=axis)
def _estimate_scale(residual):
return mad(residual)
class HuberT(object):
"""
Huber's T for M estimation.
Parameters
----------
t : float, optional
The tuning constant for Huber's t function. The default value is
1.345.
See Also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, t=1.345):
self.t = t
def _subset(self, z):
"""
Huber's T is defined piecewise over the range for z
"""
z = np.asarray(z)
return np.less_equal(np.abs(z), self.t)
def rho(self, z):
r"""
The robust criterion function for Huber's t.
Parameters
----------
z : array_like
1d array
Returns
-------
rho : array
rho(z) = .5*z**2 for \|z\| <= t
rho(z) = \|z\|*t - .5*t**2 for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
return (test * 0.5 * z**2 +
(1 - test) * (np.abs(z) * self.t - 0.5 * self.t**2))
def psi(self, z):
r"""
The psi function for Huber's t estimator
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= t
psi(z) = sign(z)*t for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
return test * z + (1 - test) * self.t * np.sign(z)
def weights(self, z):
r"""
Huber's t weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= t
weights(z) = t/\|z\| for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
absz = np.abs(z)
absz[test] = 1.0
return test + (1 - test) * self.t / absz
def psi_deriv(self, z):
"""
The derivative of Huber's t psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.less_equal(np.abs(z), self.t)
def __call__(self, z):
"""
Returns the value of estimator rho applied to an input
"""
return self.rho(z)
class Residual(object):
def __init__(self, X, y):
self.X, self.y = X, y
def compute(self, params):
return self.y - self.X.dot(params)
def least_squares(X, y):
params, _, _, _ = np.linalg.lstsq(X, y, rcond=-1)
return params
def weighted_least_squares(X, y, weights):
sqrt_weights = np.sqrt(weights)
params, _, _, _ = np.linalg.lstsq(sqrt_weights[:, None] * X,
sqrt_weights * y,
rcond=-1)
return params
def fit(X, y, max_iter=100, M=HuberT()):
residual = Residual(X, y)
params = least_squares(X, y)
r = residual.compute(params)
scale = _estimate_scale(r)
for i in range(max_iter):
if scale == 0.0:
break
params = weighted_least_squares(X, y, weights=M.weights(r / scale))
r = residual.compute(params)
scale = _estimate_scale(r)
# TODO
# if _check_convergence():
# break
return params
| [
"scipy.stats.norm.ppf",
"numpy.abs",
"numpy.linalg.lstsq",
"numpy.asarray",
"numpy.sign",
"numpy.sqrt"
] | [((1847, 1868), 'scipy.stats.norm.ppf', 'Gaussian.ppf', (['(3 / 4.0)'], {}), '(3 / 4.0)\n', (1859, 1868), True, 'from scipy.stats import norm as Gaussian\n'), ((5091, 5122), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X', 'y'], {'rcond': '(-1)'}), '(X, y, rcond=-1)\n', (5106, 5122), True, 'import numpy as np\n'), ((5205, 5221), 'numpy.sqrt', 'np.sqrt', (['weights'], {}), '(weights)\n', (5212, 5221), True, 'import numpy as np\n'), ((5244, 5314), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(sqrt_weights[:, None] * X)', '(sqrt_weights * y)'], {'rcond': '(-1)'}), '(sqrt_weights[:, None] * X, sqrt_weights * y, rcond=-1)\n', (5259, 5314), True, 'import numpy as np\n'), ((2904, 2917), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (2914, 2917), True, 'import numpy as np\n'), ((3327, 3340), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (3337, 3340), True, 'import numpy as np\n'), ((3851, 3864), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (3861, 3864), True, 'import numpy as np\n'), ((4361, 4374), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (4371, 4374), True, 'import numpy as np\n'), ((4421, 4430), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (4427, 4430), True, 'import numpy as np\n'), ((2362, 2371), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2368, 2371), True, 'import numpy as np\n'), ((2947, 2956), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (2953, 2956), True, 'import numpy as np\n'), ((4720, 4729), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (4726, 4729), True, 'import numpy as np\n'), ((3944, 3954), 'numpy.sign', 'np.sign', (['z'], {}), '(z)\n', (3951, 3954), True, 'import numpy as np\n'), ((3438, 3447), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (3444, 3447), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Author: <NAME>
================================
"""
from typing import List
import numpy as np
def absorption_coefficient(zc: List[complex], kc: List[complex],
thickness: float, z_air: float):
"""
Returns the Sound Absorption Coefficient.
NOTE 1: The values for 'zc' and 'kc' are already divided by the porosity.
NOTE 2: This function only considers the normal incidence angle.
Args:
zc (List[complex]): Material Charactheristic Impedance.
kc (List[complex]): Material Wave Number.
thickness (float): Material Thickness.
z_air (float): Air Characteristic Impedance.
Returns:
absorption (np. ndarray): Sound Absorption Coefficient [no units].
"""
zs = -1j * (zc / np.tan(kc * thickness)) # Surface impedance (zs)
vp = (zs - z_air) / (zs + z_air) # Reflection coefficient (vp)
return 1 - np.abs(vp) ** 2 # Sound Absorption Coefficient
| [
"numpy.tan",
"numpy.abs"
] | [((797, 819), 'numpy.tan', 'np.tan', (['(kc * thickness)'], {}), '(kc * thickness)\n', (803, 819), True, 'import numpy as np\n'), ((930, 940), 'numpy.abs', 'np.abs', (['vp'], {}), '(vp)\n', (936, 940), True, 'import numpy as np\n')] |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
class TestArgMax(OnnxRuntimeLayerTest):
def create_net(self, shape, axis, keepdims, ir_version):
"""
ONNX net IR net
Input->ArgMax->Output => Input->TopK
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
output_shape = shape.copy()
output_shape[axis if axis is not None else 0] = 1
output_shape_squeeze = output_shape.copy()
if keepdims == 0:
output_shape_squeeze.remove(1)
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.INT64, output_shape_squeeze)
const = np.random.randint(-10, 10, output_shape_squeeze).astype(np.int64)
args = dict()
if axis is not None:
args['axis'] = axis
else:
axis = 0
if keepdims is not None:
args['keepdims'] = keepdims
node_def = onnx.helper.make_node(
'ArgMax',
inputs=['input'],
outputs=['argmax' if keepdims is None or keepdims == 1 else 'output'],
**args
)
edges = [node_def]
if keepdims is None or keepdims == 1:
node_flatten_def = onnx.helper.make_node(
'Flatten',
inputs=['argmax'],
outputs=['output']
)
edges.append(node_flatten_def)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
edges,
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'const_indata': {'shape': [1], 'kind': 'data'},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'shape': [], 'kind': 'data'}, # TODO shape [] or [1] ??
'node': {'kind': 'op', 'type': 'TopK'},
'node_data': {'shape': output_shape, 'kind': 'data'},
'indices_data': {'shape': output_shape, 'kind': 'data'},
'result1': {'kind': 'op', 'type': 'Result'},
'result2': {'kind': 'op', 'type': 'Result'}
}
edges = [('input', 'input_data'),
('const_indata', 'const'),
('const', 'const_data'),
('input_data', 'node'),
('const_data', 'node'),
('node', 'node_data'),
('node', 'indices_data'),
('node_data', 'result1')]
if keepdims == 0:
nodes_attributes.update({'squeeze_const_indata': {'shape': [1], 'kind': 'data'},
'squeeze_const': {'kind': 'op', 'type': 'Const'},
'squeeze_const_data': {'shape': [1], 'kind': 'data'},
'squeeze': {'kind': 'op', 'type': 'Squeeze'},
'squeeze_data': {'shape': output_shape_squeeze,
'kind': 'data'}
})
edges.extend([('squeeze_const_indata', 'squeeze_const'),
('squeeze_const', 'squeeze_const_data'),
('indices_data', 'squeeze'),
('squeeze_const_data', 'squeeze'),
('squeeze', 'squeeze_data'),
('squeeze_data', 'result2')])
else:
nodes_attributes.update(
{'flatten_const_indata': {'kind': 'data', 'value': [0, -1]},
'flatten_const': {'kind': 'op', 'type': 'Const'},
'flatten_const_data': {'shape': [2], 'kind': 'data'},
'flatten': {'kind': 'op', 'type': 'Reshape'},
'flatten_data': {
'shape': [output_shape_squeeze[0], np.prod(output_shape_squeeze[1:])],
'kind': 'data'}
})
edges.extend([('indices_data', 'flatten'),
('flatten_const_indata', 'flatten_const'),
('flatten_const', 'flatten_const_data'),
('flatten_const_data', 'flatten'),
('flatten', 'flatten_data'),
('flatten_data', 'result2')])
ref_net = build_graph(nodes_attributes, edges)
return onnx_net, ref_net
test_data = [
dict(shape=[10, 12], axis=None),
dict(shape=[10, 12], axis=1),
dict(shape=[8, 10, 12], axis=None),
dict(shape=[8, 10, 12], axis=1),
dict(shape=[8, 10, 12], axis=2),
dict(shape=[6, 8, 10, 12], axis=None),
dict(shape=[6, 8, 10, 12], axis=1),
dict(shape=[6, 8, 10, 12], axis=2),
dict(shape=[6, 8, 10, 12], axis=3),
dict(shape=[4, 6, 8, 10, 12], axis=None),
dict(shape=[4, 6, 8, 10, 12], axis=1),
dict(shape=[4, 6, 8, 10, 12], axis=2),
dict(shape=[4, 6, 8, 10, 12], axis=3),
dict(shape=[4, 6, 8, 10, 12], axis=4)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keepdims", [None, 0])
@pytest.mark.nightly
def test_argmax(self, params, keepdims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, keepdims=keepdims),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
| [
"onnx.helper.make_node",
"onnx.helper.make_model",
"unit_tests.utils.graph.build_graph",
"onnx.helper.make_tensor_value_info",
"common.layer_test_class.check_ir_version",
"numpy.random.randint",
"pytest.mark.parametrize",
"onnx.helper.make_graph",
"numpy.prod"
] | [((5982, 6026), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data'], {}), "('params', test_data)\n", (6005, 6026), False, 'import pytest\n'), ((6032, 6078), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""keepdims"""', '[None, 0]'], {}), "('keepdims', [None, 0])\n", (6055, 6078), False, 'import pytest\n'), ((883, 947), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input"""', 'TensorProto.FLOAT', 'shape'], {}), "('input', TensorProto.FLOAT, shape)\n", (912, 947), False, 'from onnx import helper\n'), ((965, 1050), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output"""', 'TensorProto.INT64', 'output_shape_squeeze'], {}), "('output', TensorProto.INT64, output_shape_squeeze\n )\n", (994, 1050), False, 'from onnx import helper\n'), ((1340, 1473), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ArgMax"""'], {'inputs': "['input']", 'outputs': "['argmax' if keepdims is None or keepdims == 1 else 'output']"}), "('ArgMax', inputs=['input'], outputs=['argmax' if \n keepdims is None or keepdims == 1 else 'output'], **args)\n", (1361, 1473), False, 'import onnx\n'), ((1870, 1927), 'onnx.helper.make_graph', 'helper.make_graph', (['edges', '"""test_model"""', '[input]', '[output]'], {}), "(edges, 'test_model', [input], [output])\n", (1887, 1927), False, 'from onnx import helper\n'), ((2047, 2103), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""test_model"""'}), "(graph_def, producer_name='test_model')\n", (2064, 2103), False, 'from onnx import helper\n'), ((2196, 2234), 'common.layer_test_class.check_ir_version', 'check_ir_version', (['(10)', 'None', 'ir_version'], {}), '(10, None, ir_version)\n', (2212, 2234), False, 'from common.layer_test_class import check_ir_version\n'), ((1632, 1703), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Flatten"""'], {'inputs': "['argmax']", 'outputs': "['output']"}), "('Flatten', inputs=['argmax'], outputs=['output'])\n", (1653, 1703), False, 'import onnx\n'), ((5264, 5300), 'unit_tests.utils.graph.build_graph', 'build_graph', (['nodes_attributes', 'edges'], {}), '(nodes_attributes, edges)\n', (5275, 5300), False, 'from unit_tests.utils.graph import build_graph\n'), ((1063, 1111), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)', 'output_shape_squeeze'], {}), '(-10, 10, output_shape_squeeze)\n', (1080, 1111), True, 'import numpy as np\n'), ((4753, 4786), 'numpy.prod', 'np.prod', (['output_shape_squeeze[1:]'], {}), '(output_shape_squeeze[1:])\n', (4760, 4786), True, 'import numpy as np\n')] |
#import libraries
import re
import string
import tensorflow as tf
from numpy import array
from pickle import dump
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Activation
from tensorflow.keras.callbacks import EarlyStopping
#load document
def load_doc(filename):
file = open(filename, 'r',encoding="utf-8")
text = file.read()
file.close()
return text
in_filename = 'news.txt'
doc = load_doc(in_filename)
#clean text
def clean_doc(doc):
doc = doc.replace('-','')
tokens = doc.split()
# prepare regex for char filtering
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# make lower case
tokens = [word.lower() for word in tokens]
return tokens
tokens = clean_doc(doc)
#print(tokens)
#print(len(set(tokens)))
#save clean text
length = 50+1
sequences = list()
for i in range(length, len(tokens)):
seq = tokens[i-length:i]
line = ' '.join(seq)
# store
sequences.append(line)
#print('Total Sequences: %d' % len(sequences))
#save tokens to file
def save_doc(lines, filename):
data = '\n'.join(lines)
file = open(filename, 'w',encoding="utf-8")
file.write(data)
file.close()
out_filename = 'news_sequences.txt'
save_doc(sequences, out_filename)
#train a model
def load_doc(filename):
# open the file as read only
file = open(filename, 'r',encoding="utf-8")
# read all text
text = file.read()
# close the file
file.close()
return text
# load
in_filename = 'news_sequences.txt'
doc = load_doc(in_filename)
lines = doc.split('\n')
# integer encode sequences of words
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
sequences = tokenizer.texts_to_sequences(lines)
vocab_size = len(tokenizer.word_index) + 1
sequences = array(sequences)
X, y = sequences[:,:-1], sequences[:,-1]
y = to_categorical(y, num_classes=vocab_size)
seq_length = X.shape[1]
model = Sequential()
model.add(Embedding(vocab_size, seq_length, input_length=seq_length))
model.add(LSTM(seq_length * 2, return_sequences=True))
model.add(LSTM(50))
model.add(Dense(50, activation='relu'))
model.add(Dense(vocab_size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
earlystop = EarlyStopping(monitor='loss', min_delta=0, patience=5, verbose=0, mode='auto')
model.fit(X, y, epochs=50, verbose = 2, callbacks=[earlystop])
# save the model to file
model.save('model.h5')
# save the tokenizer
dump(tokenizer, open('tokenizer.pkl', 'wb'))
| [
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Dense",
"re.escape",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.callbacks.EarlyStopping"... | [((2241, 2252), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (2250, 2252), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2392, 2408), 'numpy.array', 'array', (['sequences'], {}), '(sequences)\n', (2397, 2408), False, 'from numpy import array\n'), ((2456, 2497), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y'], {'num_classes': 'vocab_size'}), '(y, num_classes=vocab_size)\n', (2470, 2497), False, 'from tensorflow.keras.utils import to_categorical\n'), ((2534, 2546), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2544, 2546), False, 'from tensorflow.keras.models import Sequential\n'), ((2891, 2969), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='loss', min_delta=0, patience=5, verbose=0, mode='auto')\n", (2904, 2969), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), ((2558, 2616), 'tensorflow.keras.layers.Embedding', 'Embedding', (['vocab_size', 'seq_length'], {'input_length': 'seq_length'}), '(vocab_size, seq_length, input_length=seq_length)\n', (2567, 2616), False, 'from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional\n'), ((2629, 2672), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(seq_length * 2)'], {'return_sequences': '(True)'}), '(seq_length * 2, return_sequences=True)\n', (2633, 2672), False, 'from tensorflow.keras.layers import LSTM\n'), ((2685, 2693), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(50)'], {}), '(50)\n', (2689, 2693), False, 'from tensorflow.keras.layers import LSTM\n'), ((2706, 2734), 'tensorflow.keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (2711, 2734), False, 'from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional\n'), ((2747, 2786), 'tensorflow.keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (2752, 2786), False, 'from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional\n'), ((986, 1015), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (995, 1015), False, 'import re\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.