code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Authors: <NAME>, <NAME> and <NAME>
All rights reserved, 2017.
"""
__all__ = ['filter_bank']
import torch
import numpy as np
import scipy.fftpack as fft
def filter_bank_real(M, N, J, L=8):
"""
Builds in Fourier the Morlet filters used for the scattering transform.
Each single filter is provided as a dictionary with the following keys:
* 'j' : scale
* 'theta' : angle used
Parameters
----------
M, N : int
spatial support of the input
J : int
logscale of the scattering
L : int, optional
number of angles used for the wavelet transform
Returns
-------
filters : list
A two list of dictionary containing respectively the low-pass and
wavelet filters.
Notes
-----
The design of the filters is optimized for the value L = 8
"""
filters = {}
filters['psi'] = []
offset_unpad = 0
for j in range(J):
for theta in range(L):
psi = {}
psi['j'] = j
psi['theta'] = theta
psi_signal = morlet_2d(M, N, 0.8 * 2**j, (int(L-L/2-1)-theta) * np.pi / L, 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)
psi_signal_fourier = fft.fft2(psi_signal)
for res in range(j + 1):
psi_signal_fourier_res = periodize_filter_fft(psi_signal_fourier, res)
psi[res]=torch.FloatTensor(np.stack((np.real(psi_signal_fourier_res), np.imag(psi_signal_fourier_res)), axis=2))
# Normalization to avoid doing it with the FFT!
psi[res].div_(M*N// 2**(2*j))
filters['psi'].append(psi)
filters['phi'] = {}
phi_signal = gabor_2d(M, N, 0.8 * 2**(J-1), 0, 0, offset=offset_unpad)
phi_signal_fourier = fft.fft2(phi_signal)
filters['phi']['j'] = J
for res in range(J):
phi_signal_fourier_res = periodize_filter_fft(phi_signal_fourier, res)
filters['phi'][res]=torch.FloatTensor(np.stack((np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)), axis=2))
filters['phi'][res].div_(M*N // 2 ** (2 * J))
return filters
def filter_bank(M, N, J, L=8, cache=False):
"""
Builds in Fourier the Morlet filters used for the scattering transform.
Each single filter is provided as a dictionary with the following keys:
* 'j' : scale
* 'theta' : angle used
Parameters
----------
M, N : int
spatial support of the input
J : int
logscale of the scattering
L : int, optional
number of angles used for the wavelet transform
N.B.: the design of the filters is optimized for the value L = 8
cache : string or false
If False, returnes the same as scattering_filter_factory_real. Otherwise,
it stores the computed filters in the string cache. It is particularly
useful when the filters are large and avoids recomputing them at each call
to the package.
Returns
-------
filters : list
A two list of dictionary containing respectively the low-pass and
wavelet filters.
"""
if not cache:
return filters_bank_real(M, N, J, L)
try:
print('Attempting to load from ',cache,' ...')
data = torch.load(cache)
assert M == data['M'], 'M mismatch'
assert N == data['N'], 'N mismatch'
assert J == data['J'], 'J mismatch'
assert L == data['L'], 'L mismatch'
filters = data['filters']
print('Loaded.')
return filters
except Exception as e:
print('Load Error: ',e)
print('(Re-)computing filters.')
filters = filter_bank_real(M, N, J, L)
print('Attempting to save to ',cache,' ...')
try:
with open(cache, 'wb') as fp:
data = {'M':M, 'N':N, 'J':J, 'L':L, 'filters':filters}
torch.save(data, cache)
print('Saved.')
except Exception as f:
print('Save Error: ',f)
return filters
def periodize_filter_fft(x, res):
"""
Parameters
----------
x : numpy array
signal to periodize in Fourier
res :
resolution to which the signal is cropped.
Returns
-------
crop : numpy array
It returns a crop version of the filter, assuming that
the convolutions will be done via compactly supported signals.
"""
M = x.shape[0]
N = x.shape[1]
crop = np.zeros((M // 2 ** res, N // 2 ** res), np.complex64)
mask = np.ones(x.shape, np.float32)
len_x = int(M * (1 - 2 ** (-res)))
start_x = int(M * 2 ** (-res - 1))
len_y = int(N * (1 - 2 ** (-res)))
start_y = int(N * 2 ** (-res - 1))
mask[start_x:start_x + len_x,:] = 0
mask[:, start_y:start_y + len_y] = 0
x = np.multiply(x,mask)
for k in range(int(M / 2 ** res)):
for l in range(int(N / 2 ** res)):
for i in range(int(2 ** res)):
for j in range(int(2 ** res)):
crop[k, l] += x[k + i * int(M / 2 ** res), l + j * int(N / 2 ** res)]
return crop
def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):
"""
Computes a 2D Morlet filter.
A Morlet filter is the sum of a Gabor filter and a low-pass filter
to ensure that the sum has exactly zero mean in the temporal domain.
It is defined by the following formula in space:
psi(u) = g_{sigma}(u) (e^(i xi^T u) - beta)
where g_{sigma} is a Gaussian envelope, xi is a frequency and beta is
the cancelling parameter.
Parameters
----------
M, N : int
spatial sizes
sigma : float
bandwidth parameter
xi : float
central frequency (in [0, 1])
theta : float
angle in [0, pi]
slant : float, optional
parameter which guides the elipsoidal shape of the morlet
offset : int, optional
offset by which the signal starts
fft_shift : boolean
if true, shift the signal in a numpy style
Returns
-------
morlet_fft : ndarray
numpy array of size (M, N)
"""
wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)
wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)
K = np.sum(wv) / np.sum(wv_modulus)
mor = wv - K * wv_modulus
return mor
def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):
"""
Computes a 2D Gabor filter.
A Gabor filter is defined by the following formula in space:
psi(u) = g_{sigma}(u) e^(i xi^T u)
where g_{sigma} is a Gaussian envelope and xi is a frequency.
Parameters
----------
M, N : int
spatial sizes
sigma : float
bandwidth parameter
xi : float
central frequency (in [0, 1])
theta : float
angle in [0, pi]
slant : float, optional
parameter which guides the elipsoidal shape of the morlet
offset : int, optional
offset by which the signal starts
fft_shift : boolean
if true, shift the signal in a numpy style
Returns
-------
morlet_fft : ndarray
numpy array of size (M, N)
"""
gab = np.zeros((M, N), np.complex64)
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)
R_inv = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], np.float32)
D = np.array([[1, 0], [0, slant * slant]])
curv = np.dot(R, np.dot(D, R_inv)) / ( 2 * sigma * sigma)
for ex in [-2, -1, 0, 1, 2]:
for ey in [-2, -1, 0, 1, 2]:
[xx, yy] = np.mgrid[offset + ex * M:offset + M + ex * M, offset + ey * N:offset + N + ey * N]
arg = -(curv[0, 0] * np.multiply(xx, xx) + (curv[0, 1] + curv[1, 0]) * np.multiply(xx, yy) + curv[
1, 1] * np.multiply(yy, yy)) + 1.j * (xx * xi * np.cos(theta) + yy * xi * np.sin(theta))
gab = gab + np.exp(arg)
norm_factor = (2 * 3.1415 * sigma * sigma / slant)
gab = gab / norm_factor
if (fft_shift):
gab = np.fft.fftshift(gab, axes=(0, 1))
return gab
| [
"numpy.multiply",
"numpy.ones",
"torch.load",
"scipy.fftpack.fft2",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.dot",
"numpy.cos",
"torch.save",
"numpy.exp",
"numpy.sin",
"numpy.fft.fftshift",
"numpy.real",
"numpy.imag"
] | [((1842, 1862), 'scipy.fftpack.fft2', 'fft.fft2', (['phi_signal'], {}), '(phi_signal)\n', (1850, 1862), True, 'import scipy.fftpack as fft\n'), ((4651, 4705), 'numpy.zeros', 'np.zeros', (['(M // 2 ** res, N // 2 ** res)', 'np.complex64'], {}), '((M // 2 ** res, N // 2 ** res), np.complex64)\n', (4659, 4705), True, 'import numpy as np\n'), ((4718, 4746), 'numpy.ones', 'np.ones', (['x.shape', 'np.float32'], {}), '(x.shape, np.float32)\n', (4725, 4746), True, 'import numpy as np\n'), ((4992, 5012), 'numpy.multiply', 'np.multiply', (['x', 'mask'], {}), '(x, mask)\n', (5003, 5012), True, 'import numpy as np\n'), ((7574, 7604), 'numpy.zeros', 'np.zeros', (['(M, N)', 'np.complex64'], {}), '((M, N), np.complex64)\n', (7582, 7604), True, 'import numpy as np\n'), ((7809, 7847), 'numpy.array', 'np.array', (['[[1, 0], [0, slant * slant]]'], {}), '([[1, 0], [0, slant * slant]])\n', (7817, 7847), True, 'import numpy as np\n'), ((3416, 3433), 'torch.load', 'torch.load', (['cache'], {}), '(cache)\n', (3426, 3433), False, 'import torch\n'), ((6561, 6571), 'numpy.sum', 'np.sum', (['wv'], {}), '(wv)\n', (6567, 6571), True, 'import numpy as np\n'), ((6574, 6592), 'numpy.sum', 'np.sum', (['wv_modulus'], {}), '(wv_modulus)\n', (6580, 6592), True, 'import numpy as np\n'), ((8458, 8491), 'numpy.fft.fftshift', 'np.fft.fftshift', (['gab'], {'axes': '(0, 1)'}), '(gab, axes=(0, 1))\n', (8473, 8491), True, 'import numpy as np\n'), ((1294, 1314), 'scipy.fftpack.fft2', 'fft.fft2', (['psi_signal'], {}), '(psi_signal)\n', (1302, 1314), True, 'import scipy.fftpack as fft\n'), ((7869, 7885), 'numpy.dot', 'np.dot', (['D', 'R_inv'], {}), '(D, R_inv)\n', (7875, 7885), True, 'import numpy as np\n'), ((4030, 4053), 'torch.save', 'torch.save', (['data', 'cache'], {}), '(data, cache)\n', (4040, 4053), False, 'import torch\n'), ((7624, 7637), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7630, 7637), True, 'import numpy as np\n'), ((7657, 7670), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7663, 7670), True, 'import numpy as np\n'), ((7672, 7685), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7678, 7685), True, 'import numpy as np\n'), ((7724, 7737), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7730, 7737), True, 'import numpy as np\n'), ((7739, 7752), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7745, 7752), True, 'import numpy as np\n'), ((7772, 7785), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7778, 7785), True, 'import numpy as np\n'), ((8327, 8338), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (8333, 8338), True, 'import numpy as np\n'), ((2051, 2082), 'numpy.real', 'np.real', (['phi_signal_fourier_res'], {}), '(phi_signal_fourier_res)\n', (2058, 2082), True, 'import numpy as np\n'), ((2084, 2115), 'numpy.imag', 'np.imag', (['phi_signal_fourier_res'], {}), '(phi_signal_fourier_res)\n', (2091, 2115), True, 'import numpy as np\n'), ((7640, 7653), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7646, 7653), True, 'import numpy as np\n'), ((7757, 7770), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7763, 7770), True, 'import numpy as np\n'), ((1492, 1523), 'numpy.real', 'np.real', (['psi_signal_fourier_res'], {}), '(psi_signal_fourier_res)\n', (1499, 1523), True, 'import numpy as np\n'), ((1525, 1556), 'numpy.imag', 'np.imag', (['psi_signal_fourier_res'], {}), '(psi_signal_fourier_res)\n', (1532, 1556), True, 'import numpy as np\n'), ((8222, 8241), 'numpy.multiply', 'np.multiply', (['yy', 'yy'], {}), '(yy, yy)\n', (8233, 8241), True, 'import numpy as np\n'), ((8262, 8275), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (8268, 8275), True, 'import numpy as np\n'), ((8288, 8301), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (8294, 8301), True, 'import numpy as np\n'), ((8120, 8139), 'numpy.multiply', 'np.multiply', (['xx', 'xx'], {}), '(xx, xx)\n', (8131, 8139), True, 'import numpy as np\n'), ((8170, 8189), 'numpy.multiply', 'np.multiply', (['xx', 'yy'], {}), '(xx, yy)\n', (8181, 8189), True, 'import numpy as np\n')] |
import os.path
import numpy as np
from lyapunov_reachability.speculation_tabular.base import QBase
import cplex
from cplex.exceptions import CplexSolverError
class LyapunovQAgent(QBase):
def __init__(
self, env, confidence, nb_states, nb_actions, initial_policy, terminal_states, seed=None, strict_done=True,
safe_init=False, baseline_dir=None, baseline_step=None, save_dir='../../spec-tb-lyapunov'):
self.operative_q = np.ones((nb_states, nb_actions))
self.operative_q[terminal_states] = 0.
self.time_q = np.zeros((nb_states, nb_actions))
self.lyapunov_q = self.operative_q * 1.
self.auxiliary_cost = 0.
super(LyapunovQAgent, self).__init__(
env, confidence, nb_states, nb_actions, initial_policy, terminal_states, seed=seed, strict_done=strict_done,
safe_init=safe_init, baseline_dir=baseline_dir, baseline_step=baseline_step, save_dir=save_dir)
def load_baseline(self, baseline):
data = np.load(baseline)
self.reachability_q[:] = data['reachability_q']
self.updates_q[:] = data['updates_q']
if 'policy' in data.keys():
self.policy = data['policy']
else:
safest_reachability = np.min(self.reachability_q, axis=-1, keepdims=True)
self.policy[:] = (self.reachability_q - safest_reachability == 0.) * 1.
self.policy[:] = self.policy / np.sum(self.policy, axis=-1, keepdims=True)
self.operative_q[:] = data['reachability_q']
def save_model(self, path):
info = dict()
info['policy'] = self.policy
info['steps'] = self.steps
# Q learning-specific properties
info['reachability_q'] = self.reachability_q
info['updates_q'] = self.updates_q
info['operative_q'] = self.operative_q
info['time_q'] = self.time_q
info['lyapunov_q'] = self.lyapunov_q
# Other values...
info['auxiliary_cost'] = self.auxiliary_cost
np.savez(path + '.npz', **info)
def load_model(self, load_dir):
data = np.load(os.path.join(load_dir, '{}.npz'.format(self.steps)))
self.reachability_q = data['reachability_q']
self.updates_q = data['updates_q']
self.operative_q = data['operative_q']
self.time_q = data['time_q']
self.lyapunov_q = data['lyapunov_q']
self.auxiliary_cost = data['auxiliary_cost']
def step(self, state, **kwargs):
try:
action = np.random.choice(self.nb_actions, 1, p=self.policy[state, :])[0]
if kwargs.get('epsilon', 0.) > np.random.rand():
action = self.env.action_space.sample()
if np.min(self.reachability_q[state, :]) > 1. - self.confidence:
action = np.argmin(self.reachability_q[state, :])
return action
except ValueError:
print("Error: stochastic policy is not feasible. Policy=\t" + str(self.policy[state, :]))
def extra_setup(self, steps, episode_length, improve_interval, log_interval, save_interval, **kwargs):
self.time_q[:] = episode_length
def _log_auxiliary(self, **kwargs):
return
def _iteration(self, t, state, action, next_state, safety, done, **kwargs):
improve_interval = kwargs.get('improve_interval', 1)
lr = kwargs.get('learning_rate', 1.)
gamma = kwargs.get('gamma', .99)
criterion = kwargs.get('criterion', 1e2)
# Approximate the Q-functions ---------------------------------------------------------------------------------
self.updates_q[state, action] += 1.
_lr = lr / (0.99 + 0.01 * self.updates_q[state, action])
if safety == 0.:
self.reachability_q[state, :] = 1.
self.operative_q[state, :] = 1.
else:
self.reachability_q[state, action] =\
(1. - _lr) * self.reachability_q[state, action] +\
_lr * gamma * np.min(self.reachability_q[next_state, :]) * (1. - done)
self.operative_q[state, action] =\
(1. - _lr) * self.operative_q[state, action] +\
_lr * gamma * np.sum(self.operative_q[next_state, :] * self.policy[next_state, :]) * (1. - done)
self.time_q[state, action] =\
(1. - _lr) * self.time_q[state, action] +\
_lr * (1. + np.sum(self.time_q[next_state, :] * self.policy[next_state, :])) * (1. - done)
# Improve the policy ------------------------------------------------------------------------------------------
if t % improve_interval == 0:
convergence_mask = np.min(self.updates_q, -1) > criterion
self.updates_q[convergence_mask] *= 0.
self._policy_improvement(1. * convergence_mask)
return
def _policy_improvement(self, convergence_mask):
converged_states = np.where(convergence_mask > 0.)[0]
_operative_v = np.sum(self.operative_q * self.policy, -1)
_operative_t = np.sum(self.time_q * self.policy, -1)
try:
_max_reachability = np.max(_operative_v[_operative_v <= 1. - self.confidence])
except ValueError:
_max_reachability = 1. - self.confidence
epsilon = ((1. - self.confidence) - _max_reachability) / np.max(_operative_t)
_lyapunov_q = self.operative_q + self.time_q * epsilon
invalid_indices = np.isnan(_lyapunov_q)
valid_indices = ~invalid_indices
self.lyapunov_q[valid_indices] = _lyapunov_q[valid_indices]
self.lyapunov_q[invalid_indices] = self.operative_q[invalid_indices]
c = cplex.Cplex()
c.set_log_stream(None)
c.set_error_stream(None)
c.set_warning_stream(None)
c.set_results_stream(None)
# for state in converged_states:
for state in range(self.nb_states):
c.variables.delete()
c.linear_constraints.delete()
# Objective: Minimize pi(*|x) * Q_L(x,*) for each x. *Get the safest*
# Bounds: pi(a|x) >= 0 for all a (same as default setting)
obj = self.lyapunov_q[state, :] - np.min(self.lyapunov_q[state, :])
lb = [0.0] * self.nb_actions
indices = list(c.variables.add(obj=list(obj), lb=lb))
# Subject to: (1) sum(pi(*|x)) == 1, (2) pi(*|x) * Q_L(x,*) <= L(x)
# (2) is inequality, (1) is equality constraint. ("L")
A = [cplex.SparsePair(indices[:], [1.] * self.nb_actions)]
b = [1.]
senses = ["E"]
# (2) only applies when the state is safe.
A.append(cplex.SparsePair(indices[:], list(self.lyapunov_q[state, :])))
b.append(np.sum(self.lyapunov_q[state, :] * self.policy[state, :]) + epsilon)
senses.append("L")
c.linear_constraints.add(lin_expr=A, senses=senses, rhs=b)
try:
c.solve()
_answer = np.array(c.solution.get_values())
if np.sum(_answer) == 1. and np.sum(_answer > 1.) == 0 and np.sum(_answer < 0.) == 0:
self.policy[state, :] = _answer
except CplexSolverError:
print("Error: unable to find feasible policy at [state ID: %d]." % state)
return
| [
"numpy.savez",
"numpy.ones",
"numpy.random.rand",
"numpy.where",
"numpy.random.choice",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"cplex.Cplex",
"numpy.isnan",
"numpy.min",
"numpy.argmin",
"numpy.load",
"cplex.SparsePair"
] | [((473, 505), 'numpy.ones', 'np.ones', (['(nb_states, nb_actions)'], {}), '((nb_states, nb_actions))\n', (480, 505), True, 'import numpy as np\n'), ((577, 610), 'numpy.zeros', 'np.zeros', (['(nb_states, nb_actions)'], {}), '((nb_states, nb_actions))\n', (585, 610), True, 'import numpy as np\n'), ((1034, 1051), 'numpy.load', 'np.load', (['baseline'], {}), '(baseline)\n', (1041, 1051), True, 'import numpy as np\n'), ((2076, 2107), 'numpy.savez', 'np.savez', (["(path + '.npz')"], {}), "(path + '.npz', **info)\n", (2084, 2107), True, 'import numpy as np\n'), ((5088, 5130), 'numpy.sum', 'np.sum', (['(self.operative_q * self.policy)', '(-1)'], {}), '(self.operative_q * self.policy, -1)\n', (5094, 5130), True, 'import numpy as np\n'), ((5155, 5192), 'numpy.sum', 'np.sum', (['(self.time_q * self.policy)', '(-1)'], {}), '(self.time_q * self.policy, -1)\n', (5161, 5192), True, 'import numpy as np\n'), ((5561, 5582), 'numpy.isnan', 'np.isnan', (['_lyapunov_q'], {}), '(_lyapunov_q)\n', (5569, 5582), True, 'import numpy as np\n'), ((5795, 5808), 'cplex.Cplex', 'cplex.Cplex', ([], {}), '()\n', (5806, 5808), False, 'import cplex\n'), ((1285, 1336), 'numpy.min', 'np.min', (['self.reachability_q'], {'axis': '(-1)', 'keepdims': '(True)'}), '(self.reachability_q, axis=-1, keepdims=True)\n', (1291, 1336), True, 'import numpy as np\n'), ((5029, 5061), 'numpy.where', 'np.where', (['(convergence_mask > 0.0)'], {}), '(convergence_mask > 0.0)\n', (5037, 5061), True, 'import numpy as np\n'), ((5240, 5299), 'numpy.max', 'np.max', (['_operative_v[_operative_v <= 1.0 - self.confidence]'], {}), '(_operative_v[_operative_v <= 1.0 - self.confidence])\n', (5246, 5299), True, 'import numpy as np\n'), ((5447, 5467), 'numpy.max', 'np.max', (['_operative_t'], {}), '(_operative_t)\n', (5453, 5467), True, 'import numpy as np\n'), ((1466, 1509), 'numpy.sum', 'np.sum', (['self.policy'], {'axis': '(-1)', 'keepdims': '(True)'}), '(self.policy, axis=-1, keepdims=True)\n', (1472, 1509), True, 'import numpy as np\n'), ((2584, 2645), 'numpy.random.choice', 'np.random.choice', (['self.nb_actions', '(1)'], {'p': 'self.policy[state, :]'}), '(self.nb_actions, 1, p=self.policy[state, :])\n', (2600, 2645), True, 'import numpy as np\n'), ((2693, 2709), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2707, 2709), True, 'import numpy as np\n'), ((2784, 2821), 'numpy.min', 'np.min', (['self.reachability_q[state, :]'], {}), '(self.reachability_q[state, :])\n', (2790, 2821), True, 'import numpy as np\n'), ((2872, 2912), 'numpy.argmin', 'np.argmin', (['self.reachability_q[state, :]'], {}), '(self.reachability_q[state, :])\n', (2881, 2912), True, 'import numpy as np\n'), ((4777, 4803), 'numpy.min', 'np.min', (['self.updates_q', '(-1)'], {}), '(self.updates_q, -1)\n', (4783, 4803), True, 'import numpy as np\n'), ((6317, 6350), 'numpy.min', 'np.min', (['self.lyapunov_q[state, :]'], {}), '(self.lyapunov_q[state, :])\n', (6323, 6350), True, 'import numpy as np\n'), ((6629, 6682), 'cplex.SparsePair', 'cplex.SparsePair', (['indices[:]', '([1.0] * self.nb_actions)'], {}), '(indices[:], [1.0] * self.nb_actions)\n', (6645, 6682), False, 'import cplex\n'), ((6896, 6953), 'numpy.sum', 'np.sum', (['(self.lyapunov_q[state, :] * self.policy[state, :])'], {}), '(self.lyapunov_q[state, :] * self.policy[state, :])\n', (6902, 6953), True, 'import numpy as np\n'), ((4084, 4126), 'numpy.min', 'np.min', (['self.reachability_q[next_state, :]'], {}), '(self.reachability_q[next_state, :])\n', (4090, 4126), True, 'import numpy as np\n'), ((4285, 4353), 'numpy.sum', 'np.sum', (['(self.operative_q[next_state, :] * self.policy[next_state, :])'], {}), '(self.operative_q[next_state, :] * self.policy[next_state, :])\n', (4291, 4353), True, 'import numpy as np\n'), ((4488, 4551), 'numpy.sum', 'np.sum', (['(self.time_q[next_state, :] * self.policy[next_state, :])'], {}), '(self.time_q[next_state, :] * self.policy[next_state, :])\n', (4494, 4551), True, 'import numpy as np\n'), ((7195, 7210), 'numpy.sum', 'np.sum', (['_answer'], {}), '(_answer)\n', (7201, 7210), True, 'import numpy as np\n'), ((7221, 7242), 'numpy.sum', 'np.sum', (['(_answer > 1.0)'], {}), '(_answer > 1.0)\n', (7227, 7242), True, 'import numpy as np\n'), ((7251, 7272), 'numpy.sum', 'np.sum', (['(_answer < 0.0)'], {}), '(_answer < 0.0)\n', (7257, 7272), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# author: peilun
# 特征融合
# 15
import numpy as np
import os
input_dir = "../aic19-track1-mtmc/train"
def load_ft_file(feature_file):
# load ft file
img2deepft_dict = {}
file = open(feature_file, 'r')
count = 0
while True:
line = file.readline()
count += 1
if line:
words = line.split()
key = words[0]
l = len(words)-1
ft = np.zeros(l)
img2deepft_dict[key] = ft
for i in range(1, len(words)):
img2deepft_dict[key][i-1] = float(words[i])
else:
break
return img2deepft_dict
def load_gps_ft_file(gps_ft_file):
img2gpsft_dict = {}
lines = open(gps_ft_file).readlines()
for line in lines:
words = line.strip('\n').split(',')
key = words[0]
ww = ''
for w in words[1:]:
ww += w + ','
img2gpsft_dict[key] = ww
return img2gpsft_dict
def main():
scene_dirs = []
scene_fds = os.listdir(input_dir)
for scene_fd in scene_fds:
scene_dirs.append(os.path.join(input_dir, scene_fd))
for scene_dir in scene_dirs:
camera_dirs = []
fds = os.listdir(scene_dir)
for fd in fds:
if fd.startswith('c0'):
camera_dirs.append(os.path.join(scene_dir, fd))
for camera_dir in camera_dirs:
print(camera_dir)
other_ft_file = camera_dir + '/det_gps_feature.txt'
deep_ft_file = camera_dir + '/deep_features.txt'
out_path = camera_dir + '/det_reid_features.txt'
img2gpsft_dict = load_gps_ft_file(other_ft_file)
print('loading deep feature file...')
img2deepft_dict = load_ft_file(deep_ft_file)
print('load done.')
f = open(out_path, 'w')
for key in img2gpsft_dict:
ww = img2gpsft_dict[key]
fts = img2deepft_dict[key]
ww += str(fts[0])
for i in range(1, fts.size):
ft = fts[i]
ww += ',' + str(ft)
ww += '\n'
f.write(ww)
f.close()
if __name__ == '__main__':
main() | [
"numpy.zeros",
"os.listdir",
"os.path.join"
] | [((1024, 1045), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (1034, 1045), False, 'import os\n'), ((1210, 1231), 'os.listdir', 'os.listdir', (['scene_dir'], {}), '(scene_dir)\n', (1220, 1231), False, 'import os\n'), ((440, 451), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (448, 451), True, 'import numpy as np\n'), ((1103, 1136), 'os.path.join', 'os.path.join', (['input_dir', 'scene_fd'], {}), '(input_dir, scene_fd)\n', (1115, 1136), False, 'import os\n'), ((1326, 1353), 'os.path.join', 'os.path.join', (['scene_dir', 'fd'], {}), '(scene_dir, fd)\n', (1338, 1353), False, 'import os\n')] |
import numpy as np
from numpy.testing import assert_equal
from terrapin.flow_direction import aread8, convert_d8_directions
test_sets = [
# source:
# http://resources.arcgis.com/en/help/main/10.1/index.html#//009z00000051000000
# lower right corner of flow accumulation array is 2 in url but it should be 1
# confirmed in example in url -> http://www.nws.noaa.gov/ohd/hrl/gis/data.html
['esri',
np.array([
[ 2, 2, 2, 4, 4, 8],
[ 2, 2, 2, 4, 4, 8],
[ 1, 1, 2, 4, 8, 4],
[128, 128, 1, 2, 4, 8],
[ 2, 2, 1, 4, 4, 4],
[ 1, 1, 1, 1, 4, 16],
]),
np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 2, 2, 0],
[0, 3, 7, 5, 4, 0],
[0, 0, 0, 20, 0, 1],
[0, 0, 0, 1, 24, 0],
[0, 2, 4, 7, 35, 1] #
])
],
# source: http://www.geo.uzh.ch/microsite/geo372/PDF/GEO372_W7_Hydrology_2013.pdf
['esri',
np.array([
[32, 16, 16, 16, 16, 16],
[64, 32, 16, 32, 16, 16],
[64, 64, 32, 64, 64, 32],
[64, 32, 32, 32, 32, 32],
[64, 32, 16, 32, 32, 32],
[64, 16, 32, 32, 32, 16],
]),
np.array([
[35, 13, 12, 2, 1, 0],
[10, 9, 0, 8, 4, 0],
[ 9, 4, 2, 2, 1, 0],
[ 7, 0, 3, 1, 1, 0],
[ 2, 3, 1, 2, 0, 0],
[ 1, 0, 0, 0, 1, 0],
])
],
# source: http://www.geospatialworld.net/paper/application/ArticleView.aspx?aid=1356
['esri',
np.array([
[ 1, 64, 1, 64, 16, 16],
[ 4, 64, 32, 64, 32, 4],
[16, 16, 16, 1, 1, 1],
[64, 32, 2, 4, 8, 4],
[ 4, 4, 1, 4, 16, 4],
[ 4, 4, 1, 4, 16, 16],
]),
np.array([
[0, 3, 0, 5, 1, 0], # the 5 is a 9 in the paper which is wrong.
[0, 0, 0, 0, 0, 0],
[5, 1, 0, 0, 1, 3],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 1],
[1, 1, 0, 11, 3, 2], # the 11 is a 9 in the paper which is wrong.
])
],
]
def test_flow_accumulation():
for fmt, d8, area in test_sets:
d8 = convert_d8_directions(d8, fmt, inverse=True)
a = aread8(d8)
a.accumulate()
assert_equal(area, a.accumulation)
| [
"numpy.array",
"terrapin.flow_direction.convert_d8_directions",
"numpy.testing.assert_equal",
"terrapin.flow_direction.aread8"
] | [((425, 564), 'numpy.array', 'np.array', (['[[2, 2, 2, 4, 4, 8], [2, 2, 2, 4, 4, 8], [1, 1, 2, 4, 8, 4], [128, 128, 1, \n 2, 4, 8], [2, 2, 1, 4, 4, 4], [1, 1, 1, 1, 4, 16]]'], {}), '([[2, 2, 2, 4, 4, 8], [2, 2, 2, 4, 4, 8], [1, 1, 2, 4, 8, 4], [128,\n 128, 1, 2, 4, 8], [2, 2, 1, 4, 4, 4], [1, 1, 1, 1, 4, 16]])\n', (433, 564), True, 'import numpy as np\n'), ((692, 829), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0], [0, 1, 1, 2, 2, 0], [0, 3, 7, 5, 4, 0], [0, 0, 0, 20, \n 0, 1], [0, 0, 0, 1, 24, 0], [0, 2, 4, 7, 35, 1]]'], {}), '([[0, 0, 0, 0, 0, 0], [0, 1, 1, 2, 2, 0], [0, 3, 7, 5, 4, 0], [0, 0,\n 0, 20, 0, 1], [0, 0, 0, 1, 24, 0], [0, 2, 4, 7, 35, 1]])\n', (700, 829), True, 'import numpy as np\n'), ((1025, 1201), 'numpy.array', 'np.array', (['[[32, 16, 16, 16, 16, 16], [64, 32, 16, 32, 16, 16], [64, 64, 32, 64, 64, \n 32], [64, 32, 32, 32, 32, 32], [64, 32, 16, 32, 32, 32], [64, 16, 32, \n 32, 32, 16]]'], {}), '([[32, 16, 16, 16, 16, 16], [64, 32, 16, 32, 16, 16], [64, 64, 32, \n 64, 64, 32], [64, 32, 32, 32, 32, 32], [64, 32, 16, 32, 32, 32], [64, \n 16, 32, 32, 32, 16]])\n', (1033, 1201), True, 'import numpy as np\n'), ((1260, 1399), 'numpy.array', 'np.array', (['[[35, 13, 12, 2, 1, 0], [10, 9, 0, 8, 4, 0], [9, 4, 2, 2, 1, 0], [7, 0, 3, \n 1, 1, 0], [2, 3, 1, 2, 0, 0], [1, 0, 0, 0, 1, 0]]'], {}), '([[35, 13, 12, 2, 1, 0], [10, 9, 0, 8, 4, 0], [9, 4, 2, 2, 1, 0], [\n 7, 0, 3, 1, 1, 0], [2, 3, 1, 2, 0, 0], [1, 0, 0, 0, 1, 0]])\n', (1268, 1399), True, 'import numpy as np\n'), ((1604, 1754), 'numpy.array', 'np.array', (['[[1, 64, 1, 64, 16, 16], [4, 64, 32, 64, 32, 4], [16, 16, 16, 1, 1, 1], [64,\n 32, 2, 4, 8, 4], [4, 4, 1, 4, 16, 4], [4, 4, 1, 4, 16, 16]]'], {}), '([[1, 64, 1, 64, 16, 16], [4, 64, 32, 64, 32, 4], [16, 16, 16, 1, 1,\n 1], [64, 32, 2, 4, 8, 4], [4, 4, 1, 4, 16, 4], [4, 4, 1, 4, 16, 16]])\n', (1612, 1754), True, 'import numpy as np\n'), ((1839, 1974), 'numpy.array', 'np.array', (['[[0, 3, 0, 5, 1, 0], [0, 0, 0, 0, 0, 0], [5, 1, 0, 0, 1, 3], [0, 0, 0, 0, 0,\n 0], [0, 0, 0, 5, 0, 1], [1, 1, 0, 11, 3, 2]]'], {}), '([[0, 3, 0, 5, 1, 0], [0, 0, 0, 0, 0, 0], [5, 1, 0, 0, 1, 3], [0, 0,\n 0, 0, 0, 0], [0, 0, 0, 5, 0, 1], [1, 1, 0, 11, 3, 2]])\n', (1847, 1974), True, 'import numpy as np\n'), ((2211, 2255), 'terrapin.flow_direction.convert_d8_directions', 'convert_d8_directions', (['d8', 'fmt'], {'inverse': '(True)'}), '(d8, fmt, inverse=True)\n', (2232, 2255), False, 'from terrapin.flow_direction import aread8, convert_d8_directions\n'), ((2268, 2278), 'terrapin.flow_direction.aread8', 'aread8', (['d8'], {}), '(d8)\n', (2274, 2278), False, 'from terrapin.flow_direction import aread8, convert_d8_directions\n'), ((2310, 2344), 'numpy.testing.assert_equal', 'assert_equal', (['area', 'a.accumulation'], {}), '(area, a.accumulation)\n', (2322, 2344), False, 'from numpy.testing import assert_equal\n')] |
import random
import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
print("Welcome to Polarization Model Simulator")
tutorialMode = input("Do you want to use in tutorial mode? (y/n): ")
while tutorialMode != 'y' and tutorialMode != 'n':
tutorialMode = input("Please input y/n: ")
if tutorialMode == 'y':
print("A uniform distribution will be used for agents' inital opinions")
print("We will use 100 agents")
SIZE_OF_AGENTS = 100
agentOpinions = np.linspace(0,1,SIZE_OF_AGENTS)
epsilon = 0.15
proximity = 5
NUMBER_OF_TRIALS = SIZE_OF_AGENTS
selfConfidence = np.random.rand(SIZE_OF_AGENTS)
else:
SIZE_OF_AGENTS = int(input("Enter size of agents and trial (same value): "))
NUMBER_OF_TRIALS = SIZE_OF_AGENTS
# NUMBER_OF_TRIALS = int(input("Enter number of trials: "))
epsilon = float(input("Enter the epsilon value: "))
proximity = int(input("Enter the proximity value: "))
selfConfidence = np.random.rand(SIZE_OF_AGENTS)
print("Enter your option for agent opinion distribution")
print("1 = Normal distribution")
print("2 = Uniform distribution")
print("3 = Bimodal distribution")
print("4 = Random distribution")
distributionMode = int(input("Enter choice of distribution: "))
if (distributionMode == 1) :
mu = float(input("Enter mean value (btwn 0-1): "))
# sigma = float(input("Enter s.d. value (btwn 0-1): "))
sigma = 0.2
agentOpinions = np.random.normal(mu, sigma, SIZE_OF_AGENTS)
for i in range(len(agentOpinions)):
if agentOpinions[i] > 1:
agentOpinions[i] = 1
elif agentOpinions[i] < 0:
agentOpinions[i] = 0
elif (distributionMode == 2) :
agentOpinions = np.linspace(0,1,SIZE_OF_AGENTS)
if (distributionMode == 3) :
mu1 = float(input("Enter first mean value (btwn 0-1): "))
mu2 = float(input("Enter second mean value (btwn 0-1): "))
sig1 = 0.2
sig2 = 0.2
normOpinions1 = np.random.normal(mu1, sig1, ((SIZE_OF_AGENTS//2) +
(SIZE_OF_AGENTS%2)))
normOpinions2 = np.random.normal(mu2, sig2, (SIZE_OF_AGENTS//2))
agentOpinions = np.concatenate((normOpinions1, normOpinions2),
axis=None)
for i in range(len(agentOpinions)):
if agentOpinions[i] > 1:
agentOpinions[i] = 1
elif agentOpinions[i] < 0:
agentOpinions[i] = 0
if (distributionMode == 4) :
agentOpinions = np.random.rand(SIZE_OF_AGENTS)
def generateSimplisticMatrix(agentOpinions):
confidenceMatrix = []
for i in range (len(agentOpinions)):
distribution = np.random.dirichlet(np.ones(len(agentOpinions)), 1)
confidenceMatrix.append(distribution[0])
return confidenceMatrix
def generateSusceptibilityMatrix(agentOpinions, selfConfidence):
confidenceMatrix = []
for i in range (len(agentOpinions)):
random = np.random.dirichlet(np.ones(len(agentOpinions) - 1), 1)
distribution = (random[0]).tolist()
distribution.insert(i, selfConfidence[i])
for j in range (len(distribution)):
if (j == i):
continue
else:
distribution[j] = distribution[j] * (1 - selfConfidence[i])
confidenceMatrix.append(distribution)
return confidenceMatrix
def generateBoundedMatrix(agentOpinions, epsilon):
confidenceMatrix = []
for i in range (len(agentOpinions)):
boundedAgents = np.zeros(len(agentOpinions)) #an array that will be 0 for xi-xj > epsilon
validAgents = 0
for j in range (len(agentOpinions)):
if (np.abs(agentOpinions[i] - agentOpinions[j]) <= epsilon):
boundedAgents[j] = 1
validAgents += 1
distribution = np.random.dirichlet(np.ones(validAgents), 1)
iterator = 0
for n in range (len(boundedAgents)):
if (iterator == validAgents):
break
elif (boundedAgents[n] == 1):
boundedAgents[n] = distribution[0][iterator]
iterator += 1
confidenceMatrix.append(boundedAgents)
return confidenceMatrix
#neightbour restriction
def generateBoundedProxMatrix(agentOpinions, epsilon, proximityLimit):
confidenceMatrix = []
for i in range (len(agentOpinions)):
boundedAgents = np.zeros(len(agentOpinions))
validAgents = 0
if (i + proximityLimit + 1 >= len(agentOpinions)):
startIndex = len(agentOpinions) - i - proximityLimit - 1
endIndex = startIndex + (proximityLimit * 2) + 1
else:
startIndex = i - proximityLimit
endIndex = i + proximityLimit + 1
for x in range (startIndex, endIndex):
if (np.abs(agentOpinions[x] - agentOpinions[i]) <= epsilon):
boundedAgents[x] = 1
validAgents += 1
distribution = (np.random.dirichlet(np.ones(validAgents), 1))[0]
iterator = 0
for n in range (len(boundedAgents)):
if (iterator == validAgents):
break
elif (boundedAgents[n] == 1):
boundedAgents[n] = distribution[iterator]
iterator += 1
confidenceMatrix.append(boundedAgents)
return confidenceMatrix
def modifyAgents(weightMatrix, agentOpinions):
for n in range (len(agentOpinions)):
agentOpinions[n] = getNextOpinion(weightMatrix[n], agentOpinions)
def getNextOpinion(weight, agentOpinions):
nextOpinion = 0
for n in range(len(agentOpinions)):
nextOpinion += weight[n] * agentOpinions[n]
return nextOpinion
# agentOpinions = np.linspace(0,1,SIZE_OF_AGENTS)
aOpinions_time = np.empty((SIZE_OF_AGENTS,NUMBER_OF_TRIALS))
plt.clf()
print("1 = Simplistic Model")
print("2 = Social Susceptibility Model")
print("3 = Bounded Confidence Model")
print("4 = Bounded Confidence and Proximity Model")
modelChoice = int(input("Enter choice of model: "))
while (modelChoice != 1 and modelChoice != 2 and modelChoice != 3 and modelChoice != 4):
modelChoice = int(input("Please input 1, 2, 3, or 4: "))
for n in range(NUMBER_OF_TRIALS) :
for j in range(SIZE_OF_AGENTS) :
aOpinions_time[j,n] = agentOpinions[j]
if modelChoice == 1:
matrix = generateSimplisticMatrix(agentOpinions)
if modelChoice == 2:
matrix = generateSusceptibilityMatrix(agentOpinions, selfConfidence)
if modelChoice == 3:
matrix = generateBoundedMatrix(agentOpinions, epsilon)
if modelChoice == 4:
matrix = generateBoundedProxMatrix(agentOpinions, epsilon, proximity)
modifyAgents(matrix, agentOpinions)
plt.plot(np.arange(n), aOpinions_time[n][:n])
#print(aOpinions_time)
plt.xlabel("Time Step")
plt.ylabel("Opinion Value")
if tutorialMode == 'y':
plt.xlim([0,50])
else:
plt.xlim([0,10])
plt.ylim([0,1])
plt.show()
| [
"numpy.random.normal",
"numpy.abs",
"numpy.random.rand",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"numpy.linspace",
"numpy.empty",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matplotlib.pypl... | [((6322, 6366), 'numpy.empty', 'np.empty', (['(SIZE_OF_AGENTS, NUMBER_OF_TRIALS)'], {}), '((SIZE_OF_AGENTS, NUMBER_OF_TRIALS))\n', (6330, 6366), True, 'import numpy as np\n'), ((6368, 6377), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6375, 6377), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Step"""'], {}), "('Time Step')\n", (7394, 7407), True, 'import matplotlib.pyplot as plt\n'), ((7408, 7435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Opinion Value"""'], {}), "('Opinion Value')\n", (7418, 7435), True, 'import matplotlib.pyplot as plt\n'), ((7508, 7524), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (7516, 7524), True, 'import matplotlib.pyplot as plt\n'), ((7524, 7534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7532, 7534), True, 'import matplotlib.pyplot as plt\n'), ((516, 549), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'SIZE_OF_AGENTS'], {}), '(0, 1, SIZE_OF_AGENTS)\n', (527, 549), True, 'import numpy as np\n'), ((644, 674), 'numpy.random.rand', 'np.random.rand', (['SIZE_OF_AGENTS'], {}), '(SIZE_OF_AGENTS)\n', (658, 674), True, 'import numpy as np\n'), ((1005, 1035), 'numpy.random.rand', 'np.random.rand', (['SIZE_OF_AGENTS'], {}), '(SIZE_OF_AGENTS)\n', (1019, 1035), True, 'import numpy as np\n'), ((7464, 7481), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 50]'], {}), '([0, 50])\n', (7472, 7481), True, 'import matplotlib.pyplot as plt\n'), ((7491, 7508), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 10]'], {}), '([0, 10])\n', (7499, 7508), True, 'import matplotlib.pyplot as plt\n'), ((1525, 1568), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'SIZE_OF_AGENTS'], {}), '(mu, sigma, SIZE_OF_AGENTS)\n', (1541, 1568), True, 'import numpy as np\n'), ((2127, 2196), 'numpy.random.normal', 'np.random.normal', (['mu1', 'sig1', '(SIZE_OF_AGENTS // 2 + SIZE_OF_AGENTS % 2)'], {}), '(mu1, sig1, SIZE_OF_AGENTS // 2 + SIZE_OF_AGENTS % 2)\n', (2143, 2196), True, 'import numpy as np\n'), ((2265, 2313), 'numpy.random.normal', 'np.random.normal', (['mu2', 'sig2', '(SIZE_OF_AGENTS // 2)'], {}), '(mu2, sig2, SIZE_OF_AGENTS // 2)\n', (2281, 2313), True, 'import numpy as np\n'), ((2347, 2404), 'numpy.concatenate', 'np.concatenate', (['(normOpinions1, normOpinions2)'], {'axis': 'None'}), '((normOpinions1, normOpinions2), axis=None)\n', (2361, 2404), True, 'import numpy as np\n'), ((2710, 2740), 'numpy.random.rand', 'np.random.rand', (['SIZE_OF_AGENTS'], {}), '(SIZE_OF_AGENTS)\n', (2724, 2740), True, 'import numpy as np\n'), ((7322, 7334), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7331, 7334), True, 'import numpy as np\n'), ((1840, 1873), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'SIZE_OF_AGENTS'], {}), '(0, 1, SIZE_OF_AGENTS)\n', (1851, 1873), True, 'import numpy as np\n'), ((4159, 4179), 'numpy.ones', 'np.ones', (['validAgents'], {}), '(validAgents)\n', (4166, 4179), True, 'import numpy as np\n'), ((3980, 4023), 'numpy.abs', 'np.abs', (['(agentOpinions[i] - agentOpinions[j])'], {}), '(agentOpinions[i] - agentOpinions[j])\n', (3986, 4023), True, 'import numpy as np\n'), ((5242, 5285), 'numpy.abs', 'np.abs', (['(agentOpinions[x] - agentOpinions[i])'], {}), '(agentOpinions[x] - agentOpinions[i])\n', (5248, 5285), True, 'import numpy as np\n'), ((5430, 5450), 'numpy.ones', 'np.ones', (['validAgents'], {}), '(validAgents)\n', (5437, 5450), True, 'import numpy as np\n')] |
from typing import Optional, Union, Callable
import numpy as np
from caput import memh5
from cora.util.cosmology import Cosmology
from cora.util import units, cubicspline as cs
from draco.core import containers
from ..util.nputil import FloatArrayLike
class InterpolatedFunction(memh5.BasicCont):
"""A container for interpolated 1D functions.
This is intended to allow saving to disk of functions which are expensive to
generate.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We also want to make this happens when an object is created directly and not
# just via `from_file`
self._finish_setup()
def _finish_setup(self):
# Create the function cache dict
self._function_cache = {}
def get_function(self, name: str) -> Callable[[FloatArrayLike], FloatArrayLike]:
"""Get the named function.
Parameters
----------
name
The name of the function to return.
Returns
-------
function
"""
# Return immediately from cache if available
if name not in self._function_cache:
# Check if the underlying data is actually present
if name not in self:
raise ValueError(f"Function {name} unknown.")
dset = self[name]
if len(dset.attrs["axis"]) != 1:
raise RuntimeError("Can only return a single value.")
# Get the abscissa
axis = dset.attrs["axis"][0]
x = self.index_map[axis]
# Get the ordinate
f = dset[:]
interpolation_type = dset.attrs["type"]
data = np.dstack([x, f])[0]
if interpolation_type == "linear":
self._function_cache[name] = cs.Interpolater(data)
elif interpolation_type == "log":
self._function_cache[name] = cs.LogInterpolater(data)
elif interpolation_type == "sinh":
x_t = dset.attrs["x_t"]
f_t = dset.attrs["f_t"]
self._function_cache[name] = cs.SinhInterpolater(data, x_t, f_t)
else: # Unrecognized interpolation type
raise RuntimeError(
f"Unrecognized interpolation type {interpolation_type}"
)
return self._function_cache[name]
def add_function(
self, name: str, x: np.ndarray, f: np.ndarray, type: str = "linear", **kwargs
):
"""Add a function to the container.
Parameters
----------
name
The name of the function to add. This is used to retrieve it later using
`get_function`.
x
The abscissa.
f
The ordinate.
type
The type of the interpolation. Valid options are "linear", "log" or
"sinh".If the latter kwargs accepts additional keys for the `x_t` and
`f_t` parameters. See `sinh_interpolate` for details. By default
use "linear" interpolation.
"""
if name in self:
raise ValueError(f"Function {name} already exists.")
xname = f"x_{name}"
self.create_index_map(xname, x)
dset = self.create_dataset(name, data=f)
dset.attrs["axis"] = [xname]
dset.attrs["type"] = type
# Copy over any kwargs containing extra info for the interpolation
for key, val in kwargs.items():
dset.attrs[key] = val
class CosmologyContainer(containers.ContainerBase):
"""A baseclass for a container that is referenced to a background Cosmology.
Parameters
----------
cosmology
An explicit cosmology instance or dict representation. If not set, the cosmology
*must* get set via `attrs_from`.
"""
def __init__(self, cosmology: Union[Cosmology, dict, None] = None, *args, **kwargs):
super().__init__(*args, **kwargs)
cosmo_dict = self._resolve_args(cosmology, **kwargs)
self.attrs["cosmology"] = cosmo_dict
@staticmethod
def _resolve_args(
cosmology: Union[Cosmology, dict, None] = None,
attrs_from: Optional[containers.ContainerBase] = None,
**kwargs,
):
"""Try and extract a Cosmology dict representation from the parameters.
Useful as subclasses sometimes need access *before* the full class is setup.
"""
# Insert the Cosmological parameters
if cosmology is None:
if attrs_from is not None and "cosmology" in attrs_from.attrs:
cosmology = attrs_from.attrs["cosmology"]
else:
raise ValueError("A cosmology must be supplied.")
elif not isinstance(cosmology, (Cosmology, dict)):
raise TypeError("cosmology argument must be a Cosmology instance.")
if isinstance(cosmology, Cosmology):
cosmology = cosmology.to_dict()
return cosmology
_cosmology_instance = None
@property
def cosmology(self):
"""The background cosmology."""
if self._cosmology_instance is None:
self._cosmology_instance = Cosmology(**self.attrs["cosmology"])
return self._cosmology_instance
class FZXContainer(CosmologyContainer):
"""Container with a comoving radial axis.
This can be specified either directly with a grid in comoving distance, or in
redshift, or 21 cm line frequency. One of these will be considered as defining
the primary axis, and implicitly defines the others. `freq` is the highest
priority, followed by `redshift` and finally the comoving distance `chi`.
Parameters
----------
freq
The radial axis given as 21cm line frequencies.
redshift
The radial axis given as a redshift.
chi
The radial axis given as a comoving distance in Mpc/h.
"""
# Chi is the only required axis, and must be used for any datasets
_axes = ("chi",)
def __init__(
self,
freq: Optional[np.ndarray] = None,
redshift: Optional[np.ndarray] = None,
*args,
**kwargs,
):
# Insert the Cosmological parameters
cosmology = Cosmology(**CosmologyContainer._resolve_args(**kwargs))
# If none of the high priority radial axes are set directly, see if any exist
# in an axes_from object
if freq is None and redshift is None and "axes_from" in kwargs:
if "freq" in kwargs["axes_from"].index_map:
freq = kwargs["axes_from"].index_map["freq"]
elif "redshift" in kwargs["axes_from"].index_map:
redshift = kwargs["axes_from"].index_map["redshift"]
# Go through the high priority axes, and if present generate the lower priority
# ones
if freq is not None:
redshift = units.nu21 / freq - 1.0
if redshift is not None:
kwargs["chi"] = cosmology.comoving_distance(redshift)
super().__init__(*args, **kwargs)
# Create the additional radial axes (if present) and determine the primary
# radial axis.
# NOTE: this must be done *after* the call to `super().__init__(...)` such that
# the container internals are defined
radial_axis = "chi"
if redshift is not None:
self.create_index_map("redshift", redshift)
radial_axis = "redshift"
if freq is not None:
self.create_index_map("freq", freq)
radial_axis = "freq"
# Set the cosmology and radial axis attributes
self.attrs["primary_radial_axis"] = radial_axis
@property
def chi(self):
"""The comoving distance to each radial slice in Mpc / h."""
return self.index_map["chi"]
@property
def redshift(self):
"""The redshift for each radial slice."""
# TODO: derive this one
if "redshift" not in self.index_map:
raise RuntimeError("Container does not have a redshift axis.")
return self.index_map["redshift"]
@property
def freq(self):
"""The 21cm line frequency for each radial slice."""
# TODO: maybe derive this one
if "freq" not in self.index_map:
raise RuntimeError("Container does not have a 21cm frequency axis.")
return self.index_map["freq"]
class MatterPowerSpectrum(CosmologyContainer, InterpolatedFunction):
"""A container to hold a matter power spectrum.
This object can evaluate a power spectrum at specified wavenumbers (in h / Mpc
units) and redshifts.
Parameters
----------
k
Wavenumbers the power spectrum samples are at (in h / Mpc).
ps
Power spectrum samples.
ps_redshift
The redshift the samples are calculated at. Default is z=0.
"""
def __init__(
self,
k: FloatArrayLike,
ps: FloatArrayLike,
*args,
ps_redshift: float = 0.0,
**kwargs,
):
# Initialise the base classes (which sets the cosmology etc)
super().__init__(*args, **kwargs)
# This shouldn't be necessary, but due to a bug in `draco` where ContainerBase
# does not correctly call its superconstructor we need to do this explicitly
self._finish_setup()
# Add the interpolated function
self.add_function("powerspectrum", k, ps, type="log")
self.attrs["ps_redshift"] = ps_redshift
def powerspectrum(
self, k: FloatArrayLike, z: FloatArrayLike = 0.0
) -> FloatArrayLike:
"""Calculate the power spectrum at given wavenumber and redshift.
Parameters
----------
k
The wavenumber (in h / Mpc) to get the power spectrum at.
z : optional
The redshift to calculate the power spectrum at (default z=0).
Returns
-------
ps
The power spectrum.
"""
c = self.cosmology
Dratio = c.growth_factor(z) / c.growth_factor(self._ps_redshift)
return self.get_function("powerspectrum")(k) * Dratio ** 2
def powerspectrum_at_z(
self, z: FloatArrayLike
) -> Callable[[FloatArrayLike], FloatArrayLike]:
"""Return a function which gives the power spectrum at fixed redshift.
Parameters
----------
z
The redshift to fix the power spectrum at.
Returns
-------
psfunc
A function which calculates the power spectrum at given wavenumbers.
"""
def _ps(k):
return self.powerspectrum(k, z)
return _ps
@property
def _ps_redshift(self):
return self.attrs["ps_redshift"]
class CorrelationFunction(CosmologyContainer, InterpolatedFunction):
"""A container to store correlation functions."""
# TODO: at the moment this has no special functionality, but should eventually
# provide specific access to the correlation functions as well as redshift scaling
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This whole constructor shouldn't be necessary, but due to a bug in `draco`
# where ContainerBase does not correctly call its superconstructor we need to do
# this explicitly
self._finish_setup()
class InitialLSS(FZXContainer, containers.HealpixContainer):
"""Container for holding initial LSS fields used for simulation.
These fields are all implicitly the linear fields at redshift z=0.
"""
_dataset_spec = {
"delta": {
"axes": ["chi", "pixel"],
"dtype": np.float64,
"initialise": True,
"distributed": True,
"distributed_axis": "chi",
},
"phi": {
"axes": ["chi", "pixel"],
"dtype": np.float64,
"initialise": True,
"distributed": True,
"distributed_axis": "chi",
},
}
@property
def delta(self):
"""The linear density field at the initial redshift."""
return self.datasets["delta"]
@property
def phi(self):
r"""The potential field at the initial redshift.
This is not the actual gravitational potential, but the Lagrangian potential
defined by:
.. math:: \nabla^2 \phi = - \delta
This is related to the linear gravitational potential :math:`\phi_G` by:
.. math:: \phi = \frac{3}{3 \mathcal{H}^2} \phi_G
"""
return self.datasets["phi"]
class BiasedLSS(FZXContainer, containers.HealpixContainer):
"""A biased large scale structure field.
Parameters
----------
lightcone : bool, optional
Is the field on the lightcone. If not set, default to True.
fixed_redshift : float, optional
If not on the lightcone what is the fixed redshift.
*args, **kwargs
Passed through to the superclasses.
"""
_dataset_spec = {
"delta": {
"axes": ["chi", "pixel"],
"dtype": np.float64,
"initialise": True,
"distributed": True,
"distributed_axis": "chi",
}
}
def __init__(
self,
*args,
lightcone: Optional[bool] = None,
fixed_redshift: Optional[float] = None,
**kwargs,
):
super().__init__(*args, **kwargs)
# Set lightcone taking into account it might have been set already by an
# `attrs_from` argument to the super constructor
if lightcone is not None:
self.attrs["lightcone"] = lightcone
elif "lightcone" not in self.attrs:
self.attrs["lightcone"] = True
if fixed_redshift is not None:
self.attrs["fixed_redshift"] = fixed_redshift
@property
def lightcone(self) -> bool:
"""Is the field on the lightcone or at fixed redshift."""
return bool(self.attrs["lightcone"])
@property
def fixed_redshift(self) -> Optional[float]:
"""The fixed redshift of the field."""
if "fixed_redshift" in self.attrs:
return float(self.attrs["fixed_redshift"])
return None
@property
def delta(self) -> np.ndarray:
r"""The biased field.
As standard this is interpreted as a density contrast, i.e. the field is
.. math:: \delta = (\rho - \bar{\rho}) / \bar{\rho}
Returns
-------
delta
The biased field as a [redshift, pixel] array.
"""
return self.datasets["delta"]
| [
"numpy.dstack",
"cora.util.cubicspline.LogInterpolater",
"cora.util.cubicspline.SinhInterpolater",
"cora.util.cosmology.Cosmology",
"cora.util.cubicspline.Interpolater"
] | [((5205, 5241), 'cora.util.cosmology.Cosmology', 'Cosmology', ([], {}), "(**self.attrs['cosmology'])\n", (5214, 5241), False, 'from cora.util.cosmology import Cosmology\n'), ((1720, 1737), 'numpy.dstack', 'np.dstack', (['[x, f]'], {}), '([x, f])\n', (1729, 1737), True, 'import numpy as np\n'), ((1834, 1855), 'cora.util.cubicspline.Interpolater', 'cs.Interpolater', (['data'], {}), '(data)\n', (1849, 1855), True, 'from cora.util import units, cubicspline as cs\n'), ((1947, 1971), 'cora.util.cubicspline.LogInterpolater', 'cs.LogInterpolater', (['data'], {}), '(data)\n', (1965, 1971), True, 'from cora.util import units, cubicspline as cs\n'), ((2146, 2181), 'cora.util.cubicspline.SinhInterpolater', 'cs.SinhInterpolater', (['data', 'x_t', 'f_t'], {}), '(data, x_t, f_t)\n', (2165, 2181), True, 'from cora.util import units, cubicspline as cs\n')] |
import os
import sys
import math
import xml.etree.ElementTree as ET
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import utils.quaternion as quat
def chamfer_dist(pc1, pc2):
"""Chamfer distance between two point clouds."""
N = pc1.shape[1]
M = pc2.shape[1]
pc1_expand = pc1.unsqueeze(2).repeat(1, 1, M, 1)
pc2_expand = pc2.unsqueeze(1).repeat(1, N, 1, 1)
pc_diff = pc1_expand - pc2_expand
pc_dist = (pc_diff ** 2).sum(-1)
# pc_dist = torch.sqrt(pc_dist)
# pc_dist = F.smooth_l1_loss(pc1_expand, pc2_expand, reduction='none')
dist1, idx1 = pc_dist.min(2)
dist2, idx2 = pc_dist.min(1)
return dist1, idx1, dist2, idx2, pc_diff
def chamfer_dist_mask(pc1, pc2, mask, val=10.0):
"""Chamfer distance between two point clouds.
The mask indicates the selected points corresponding between the two
point clouds. The 0 values of the mask are set to a high value as to be
ruled out of the minimum."""
N = pc1.shape[1]
M = pc2.shape[1]
pc1_expand = pc1.unsqueeze(2).repeat(1, 1, M, 1)
pc2_expand = pc2.unsqueeze(1).repeat(1, N, 1, 1)
pc_diff = pc1_expand - pc2_expand
pc_dist = (pc_diff ** 2).sum(-1)
pc_dist = torch.sqrt(pc_dist)
pc_dist[mask == 0] = val
dist1, idx1 = pc_dist.min(2)
dist2, idx2 = pc_dist.min(1)
return dist1, idx1, dist2, idx2, pc_diff
def create_barycentric_transform(A):
"""Creates a transformation matrix used to calculate the barycentric
coordinates of a point."""
if len(A.shape) == 2:
T = torch.tensor([[A[0, 0] - A[3, 0], A[1, 0] - A[3, 0], A[2, 0] - A[3, 0]],
[A[0, 1] - A[3, 1], A[1, 1] - A[3, 1], A[2, 1] - A[3, 1]],
[A[0, 2] - A[3, 2], A[1, 2] - A[3, 2], A[2, 2] - A[3, 2]]],
dtype=A.dtype, device=A.device)
if len(A.shape) == 3:
T = torch.zeros(A.shape[0], 3, 3, dtype=A.dtype, device=A.device)
T[:, 0, 0] = A[:, 0, 0] - A[:, 3, 0]
T[:, 0, 1] = A[:, 1, 0] - A[:, 3, 0]
T[:, 0, 2] = A[:, 2, 0] - A[:, 3, 0]
T[:, 1, 0] = A[:, 0, 1] - A[:, 3, 1]
T[:, 1, 1] = A[:, 1, 1] - A[:, 3, 1]
T[:, 1, 2] = A[:, 2, 1] - A[:, 3, 1]
T[:, 2, 0] = A[:, 0, 2] - A[:, 3, 2]
T[:, 2, 1] = A[:, 1, 2] - A[:, 3, 2]
T[:, 2, 2] = A[:, 2, 2] - A[:, 3, 2]
return T
def get_barycentric_coordinates(r, T, r4):
"""Returns the barycentric coordinates of r using transformation T and vertex r4."""
if T.shape[0] == 1:
T_inv = torch.inverse(T)
else:
T_inv = b_inv(T)
coords = T_inv @ (r - r4)
return coords
def b_inv(b_mat):
"""PyTorch batch matrix inverse.
https://stackoverflow.com/questions/46595157/how-to-apply-the-torch-inverse-function-of-pytorch-to-every-sample-in-the-batc
"""
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def load_skeleton(skeleton_path):
"""Loads an Ogre skeletal model from XML.
Args:
skeleton_path (string): Path to skeleton XML file.
Returns:
skeleton (num_bones x 7): Skeleton tensor. The first 3 values are the
position of the bone relative to the parent. The last 4 values are
the rotation relative to the parent bone, represented as a
quaternion.
parent_map (list): A mapping from bone index to parent bone index.
TODO: UPDATE HEADER
"""
tree = ET.parse(skeleton_path)
root = tree.getroot()
# Process bones
bones = root[0]
num_bones = len(root[0])
bone_names = []
rotations = np.zeros((num_bones, 4))
positions = np.zeros((num_bones, 3))
for i in range(num_bones):
bone_names.append(bones[i].attrib['name'])
position = bones[i][0]
rotation = bones[i][1]
axis = rotation[0]
positions[i] = np.array([float(position.attrib['x']),
float(position.attrib['y']),
float(position.attrib['z'])])
rotations[i] = quat.axisangle_to_q([
float(axis.attrib['x']),
float(axis.attrib['y']),
float(axis.attrib['z'])
], float(rotation.attrib['angle']))
# Process hierarchy
bone_hierarchy = root[1]
parent_map = [-1] # The root does not have a parent
for i in range(len(bone_hierarchy)):
parent_map.append(bone_names.index(bone_hierarchy[i].attrib['parent']))
return rotations, positions, parent_map
def load_mesh_data(mesh_path):
"""Loads mesh vertices, bone assignments, and triangle IDs.
Args:
mesh_path - string: Path to the OGRE XML mesh data.
Returns:
mesh_vertices - array (N_v x 3): Mesh vertices, where N_v is the
number of vertices.
bone_weights - array (N_b x N_v): Bone weights, where N_b is the bone
count and N_v is the number of vertices.
triangles - array (N_f x 3): Triangle IDs, where N_f is the number of
triangle faces in the mesh.
"""
tree = ET.parse(mesh_path)
root = tree.getroot()
# Store all bone assignments
bone_assignment_dict = {}
bone_weight_dict = {}
num_bones = 0
for child in root[4]:
key = 'vertex_' + str(child.attrib['vertexindex'])
bone_index = int(child.attrib['boneindex'])
if bone_index > num_bones:
num_bones = bone_index
if key in bone_assignment_dict:
bone_weight_dict[key] = np.append(bone_weight_dict[key], np.array([float(child.attrib['weight'])]))
bone_assignment_dict[key] = np.append(bone_assignment_dict[key], np.array([bone_index]))
else:
bone_weight_dict[key] = np.array([float(child.attrib['weight'])])
bone_assignment_dict[key] = np.array([bone_index])
num_bones += 1 # because num_bones is only as large as the biggest index.
# Store the vertices
mesh_vertices = np.zeros((int(root[0].attrib['vertexcount']), 3))
normals = np.zeros((int(root[0].attrib['vertexcount']), 3))
i = 0
for child in root[0][0]:
mesh_vertices[i, 0] = child[0].attrib['x']
mesh_vertices[i, 1] = child[0].attrib['y']
mesh_vertices[i, 2] = child[0].attrib['z']
normals[i, 0] = child[1].attrib['x']
normals[i, 1] = child[1].attrib['y']
normals[i, 2] = child[1].attrib['z']
i += 1
# Build the bone_weights matrix
# TODO: Testing needed
bone_weights = np.zeros((num_bones, len(mesh_vertices)))
i = 0
for key, value in bone_assignment_dict.items():
bone_assignments = value
bone_weight = bone_weight_dict[key]
bone_weights[bone_assignments, i] = bone_weight
i += 1
triangles_idxs = None
vertex_map = [1, 2, 0]
i = 0
for submesh in root[1]:
for faces in submesh:
num_faces = int(faces.attrib['count'])
if triangles_idxs is None:
triangles_idxs = np.zeros((num_faces, 3), dtype=int)
else:
triangles_idxs = np.append(triangles_idxs, np.zeros((num_faces, 3), dtype=int), axis=0)
for face in faces:
j = 0
for _, value in face.attrib.items():
triangles_idxs[i, vertex_map[j]] = int(value)
j += 1
i += 1
triangles = torch.from_numpy(triangles_idxs.astype(np.int32))
return mesh_vertices, normals, bone_weights, triangles
def crop_and_resize(image, centers, crop_size, scale, mode='nearest'):
"""Crops and resizes the image using `torch.nn.functional.interpolate`.
Args:
image - Tensor (B x C x H x W): The input image.
centers - Tensor (B x 2): Centers of the bounding boxes corresponding
to each image.
crop_size - int: The desired size in which to resize the result.
scale - Tensor (B x 1): Scale factor for each image.
Returns:
cropped_images - Tensor (B x C x crop_size x crop_size): The resulting
cropped and resized images.
TODO: Only works on single images for now.
"""
s = image.shape
assert len(s) == 4, "Image needs to be of shape (B x C x H x W)"
crop_location = centers.to(torch.float32)
crop_size_scaled = math.ceil(float(crop_size) / scale)
y1 = int(crop_location[:, 0] - crop_size_scaled // 2)
y2 = int(y1 + crop_size_scaled)
boxes = torch.tensor([0, 0, crop_size_scaled, crop_size_scaled], dtype=torch.int32)
offset_y = 0
if y1 < 0:
offset_y = -y1
boxes[0] = int(offset_y)
y1 += offset_y
if y2 > s[2]:
offset_y = s[2] - y2
boxes[2] = int(offset_y)
y2 += offset_y
x1 = int(crop_location[:, 1] - crop_size_scaled // 2)
x2 = int(x1 + crop_size_scaled)
offset_x = 0
if x1 < 0:
offset_x = -x1
boxes[1] = int(offset_x)
x1 += offset_x
if x2 > s[3]:
offset_x = s[3] - x2
boxes[3] = int(offset_x)
x2 += offset_x
cropped_images = torch.zeros(s[0], s[1], crop_size_scaled, crop_size_scaled)
cropped_images[:, :, boxes[0]:boxes[2], boxes[1]:boxes[3]] = image[:, :, y1:y2, x1:x2]
cropped_images = F.interpolate(cropped_images, size=crop_size)
return cropped_images
def calculate_padding(input_size, kernel_size, stride):
"""Calculates the amount of padding to add according to Tensorflow's
padding strategy."""
cond = input_size % stride
if cond == 0:
pad = max(kernel_size - stride, 0)
else:
pad = max(kernel_size - cond, 0)
if pad % 2 == 0:
pad_val = pad // 2
padding = (pad_val, pad_val)
else:
pad_val_start = pad // 2
pad_val_end = pad - pad_val_start
padding = (pad_val_start, pad_val_end)
return padding
def plot_acc_curve(joint_errors):
"""Plot number of samples within moving accuracy threshold."""
num_samples = joint_errors.shape[0]
# Reported Accuracy (measured from paper)
x_rep = np.array([0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 75.0])
y_rep = np.array([0.0, 0.0, 0.04, 0.216, 0.43, 0.612, 0.75, 0.836, 0.866])
x_vals = np.linspace(0, 75.0, num=1000)
# y_int = np.interp(x_vals, x_rep, y_rep)
f = interp1d(x_rep, y_rep, kind='cubic')
y_int = f(x_vals)
x = np.linspace(0.0, 75.0, num=1000)
y = np.zeros((len(x)))
for i in range(len(x)):
y[i] = float((joint_errors < x[i]).sum()) / num_samples
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, c='r', label='This model')
ax.plot(x_vals, y_int, c='b', label='Result from paper')
ax.set_xlabel('Maximum allowed distance to GT (mm)')
ax.set_ylabel('Fraction of frames within distance')
ax.grid(True)
ax.legend()
plt.show()
| [
"xml.etree.ElementTree.parse",
"torch.gesv",
"torch.sqrt",
"scipy.interpolate.interp1d",
"torch.tensor",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"torch.nn.functional.interpolate",
"torch.zeros",
"torch.inverse",
"matplotlib.pyplot.show"
] | [((1288, 1307), 'torch.sqrt', 'torch.sqrt', (['pc_dist'], {}), '(pc_dist)\n', (1298, 1307), False, 'import torch\n'), ((2998, 3020), 'torch.gesv', 'torch.gesv', (['eye', 'b_mat'], {}), '(eye, b_mat)\n', (3008, 3020), False, 'import torch\n'), ((3578, 3601), 'xml.etree.ElementTree.parse', 'ET.parse', (['skeleton_path'], {}), '(skeleton_path)\n', (3586, 3601), True, 'import xml.etree.ElementTree as ET\n'), ((3734, 3758), 'numpy.zeros', 'np.zeros', (['(num_bones, 4)'], {}), '((num_bones, 4))\n', (3742, 3758), True, 'import numpy as np\n'), ((3775, 3799), 'numpy.zeros', 'np.zeros', (['(num_bones, 3)'], {}), '((num_bones, 3))\n', (3783, 3799), True, 'import numpy as np\n'), ((5285, 5304), 'xml.etree.ElementTree.parse', 'ET.parse', (['mesh_path'], {}), '(mesh_path)\n', (5293, 5304), True, 'import xml.etree.ElementTree as ET\n'), ((8671, 8746), 'torch.tensor', 'torch.tensor', (['[0, 0, crop_size_scaled, crop_size_scaled]'], {'dtype': 'torch.int32'}), '([0, 0, crop_size_scaled, crop_size_scaled], dtype=torch.int32)\n', (8683, 8746), False, 'import torch\n'), ((9293, 9352), 'torch.zeros', 'torch.zeros', (['s[0]', 's[1]', 'crop_size_scaled', 'crop_size_scaled'], {}), '(s[0], s[1], crop_size_scaled, crop_size_scaled)\n', (9304, 9352), False, 'import torch\n'), ((9465, 9510), 'torch.nn.functional.interpolate', 'F.interpolate', (['cropped_images'], {'size': 'crop_size'}), '(cropped_images, size=crop_size)\n', (9478, 9510), True, 'import torch.nn.functional as F\n'), ((10279, 10340), 'numpy.array', 'np.array', (['[0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 75.0]'], {}), '([0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 75.0])\n', (10287, 10340), True, 'import numpy as np\n'), ((10353, 10419), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.04, 0.216, 0.43, 0.612, 0.75, 0.836, 0.866]'], {}), '([0.0, 0.0, 0.04, 0.216, 0.43, 0.612, 0.75, 0.836, 0.866])\n', (10361, 10419), True, 'import numpy as np\n'), ((10433, 10463), 'numpy.linspace', 'np.linspace', (['(0)', '(75.0)'], {'num': '(1000)'}), '(0, 75.0, num=1000)\n', (10444, 10463), True, 'import numpy as np\n'), ((10518, 10554), 'scipy.interpolate.interp1d', 'interp1d', (['x_rep', 'y_rep'], {'kind': '"""cubic"""'}), "(x_rep, y_rep, kind='cubic')\n", (10526, 10554), False, 'from scipy.interpolate import interp1d\n'), ((10585, 10617), 'numpy.linspace', 'np.linspace', (['(0.0)', '(75.0)'], {'num': '(1000)'}), '(0.0, 75.0, num=1000)\n', (10596, 10617), True, 'import numpy as np\n'), ((10747, 10759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10757, 10759), True, 'import matplotlib.pyplot as plt\n'), ((11047, 11057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11055, 11057), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1862), 'torch.tensor', 'torch.tensor', (['[[A[0, 0] - A[3, 0], A[1, 0] - A[3, 0], A[2, 0] - A[3, 0]], [A[0, 1] - A[3,\n 1], A[1, 1] - A[3, 1], A[2, 1] - A[3, 1]], [A[0, 2] - A[3, 2], A[1, 2] -\n A[3, 2], A[2, 2] - A[3, 2]]]'], {'dtype': 'A.dtype', 'device': 'A.device'}), '([[A[0, 0] - A[3, 0], A[1, 0] - A[3, 0], A[2, 0] - A[3, 0]], [A\n [0, 1] - A[3, 1], A[1, 1] - A[3, 1], A[2, 1] - A[3, 1]], [A[0, 2] - A[3,\n 2], A[1, 2] - A[3, 2], A[2, 2] - A[3, 2]]], dtype=A.dtype, device=A.device)\n', (1642, 1862), False, 'import torch\n'), ((1968, 2029), 'torch.zeros', 'torch.zeros', (['A.shape[0]', '(3)', '(3)'], {'dtype': 'A.dtype', 'device': 'A.device'}), '(A.shape[0], 3, 3, dtype=A.dtype, device=A.device)\n', (1979, 2029), False, 'import torch\n'), ((2623, 2639), 'torch.inverse', 'torch.inverse', (['T'], {}), '(T)\n', (2636, 2639), False, 'import torch\n'), ((6032, 6054), 'numpy.array', 'np.array', (['[bone_index]'], {}), '([bone_index])\n', (6040, 6054), True, 'import numpy as np\n'), ((5876, 5898), 'numpy.array', 'np.array', (['[bone_index]'], {}), '([bone_index])\n', (5884, 5898), True, 'import numpy as np\n'), ((7217, 7252), 'numpy.zeros', 'np.zeros', (['(num_faces, 3)'], {'dtype': 'int'}), '((num_faces, 3), dtype=int)\n', (7225, 7252), True, 'import numpy as np\n'), ((7330, 7365), 'numpy.zeros', 'np.zeros', (['(num_faces, 3)'], {'dtype': 'int'}), '((num_faces, 3), dtype=int)\n', (7338, 7365), True, 'import numpy as np\n')] |
__author__ = 'Prateek'
import numpy as np
from preprocessing import label_encoder
def convert_to_1D(array):
'''
Converts a numpy array into an array of 1 dimension.
:param array: input numpy array
:return: 1D array
'''
return np.ravel(array)
def calAccuracy(true,pred):
'''
:param true: vector containing all the true classes
:param pred: vector containing all the predicted classes
:return: accuracy of classification
'''
true = convert_to_1D(true)
pred = convert_to_1D(pred)
assert (true.shape == pred.shape), "true and pred dimensions do not match."
return (true.shape[0] - np.count_nonzero(np.subtract(true, pred))) / true.shape[0]
def confusionMatrix(true,pred):
'''
:param true: numpy array containing all the true classes.
:param pred: numpy array containing all the predicted classes.
:return : confusion matrix
'''
true = convert_to_1D(true)
pred = convert_to_1D(pred)
assert (true.shape == pred.shape), "true and pred dimensions do not match."
numclass = len(np.unique(true))
# encode the classes with integers ranging from 0 to numclass-1.
labelEncoder = label_encoder()
labelEncoder.fit(true)
true = labelEncoder.transform(true)
pred = labelEncoder.transform(pred)
# create confusion matrix.
# Rows indicate the true class and column indicate the predicted class.
cm = np.array([np.zeros(numclass) for _ in range(numclass)])
for t, p in zip(true, pred):
cm[t][p] += 1
return cm
def F_Score(ytest,pred):
'''
:param ytest: test labels
:param pred: predicted labels
:return: F-score
'''
cm = confusionMatrix(ytest, pred)
numclass = cm.shape[0]
if numclass != 2:
raise ValueError('can not handle multi-class problem as of now')
return (2 * cm[1][1]) / ((2 * cm[1][1] + cm[0][1] + cm[1][0]) * 1.0)
def printSummary(confusionMatrix,numData):
correctlyClassified = sum(confusionMatrix[i][i] for i in range(len(confusionMatrix)))
incorrectlyClassified = numData - correctlyClassified
accuracy = correctlyClassified / numData
print("\n=== Summary ===")
print("Correctly Classified Instances = ", correctlyClassified)
print("Incorrectly Classified Instances = ", incorrectlyClassified)
print("Total Number of Instances = ", numData)
print("Accuracy =", round((accuracy * 100), 2), "%")
def printconfusionMatrix(confusionMatrix):
print("\n=== Confusion Matrix ===")
print("Columns indicate the Predicetd values")
print("Rows indicate the actual values")
for rows in confusionMatrix:
print(rows)
| [
"preprocessing.label_encoder",
"numpy.unique",
"numpy.subtract",
"numpy.zeros",
"numpy.ravel"
] | [((253, 268), 'numpy.ravel', 'np.ravel', (['array'], {}), '(array)\n', (261, 268), True, 'import numpy as np\n'), ((1182, 1197), 'preprocessing.label_encoder', 'label_encoder', ([], {}), '()\n', (1195, 1197), False, 'from preprocessing import label_encoder\n'), ((1076, 1091), 'numpy.unique', 'np.unique', (['true'], {}), '(true)\n', (1085, 1091), True, 'import numpy as np\n'), ((1432, 1450), 'numpy.zeros', 'np.zeros', (['numclass'], {}), '(numclass)\n', (1440, 1450), True, 'import numpy as np\n'), ((660, 683), 'numpy.subtract', 'np.subtract', (['true', 'pred'], {}), '(true, pred)\n', (671, 683), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import os
import matplotlib.pylab as plt
plt.close('all')
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
d = load_obj('./Tensile/infoSimu')
fileExt = r".txt"
fileDir = './Tensile/'
epsilonTarget = 0.01
L = [_ for _ in os.listdir(fileDir) if _.endswith(fileExt)]
nstep = np.loadtxt(fileDir+L[0]).shape[0]
arr = np.zeros((nstep, 3))
# for i, file in enumerate(L):
# if 'ReactionForce' in file:
# arr += np.loadtxt(fileDir+file)[:, 1:4]
# FReac[file] = arr
# else:
posDirichlet = np.loadtxt(fileDir+'CentralBeamDisplacementEnd_x.txt')
Freac = np.loadtxt(fileDir + "ReactionForces.txt")
posDirichlet = posDirichlet[:,1:4] # the first column stores the time
assert posDirichlet[0].shape == (3,)
t = np.loadtxt(fileDir+L[0])[:,0]
RFz = Freac[1:,2]
uDirichlet = posDirichlet - posDirichlet[0]
# length of the strand
h = d['lengthStrand']
# compute the strand axial strain
epsilon = uDirichlet[:,2] / h
if 0.99 < epsilon[-1] / epsilonTarget < 1.01: print("The target strain has not been reached")
filter = t <= 1
plt.figure()
plt.plot(t[filter], epsilon[filter])
ax = plt.gca()
ax.set_xlabel('time')
ax.set_ylabel(r'$\epsilon$ strand $\frac{l}{L}$')
plt.savefig(fileDir+ 'timeVSepsilon')
plt.figure()
plt.plot(epsilon[filter], RFz[filter], label='Sofa model with BFE')
# currve from Costello theory. Seen in the paper of Jiang, 1999
# I just took 2 points on the curve of Fig. 5
slopeCostello = (150E3)/0.011
plt.plot(epsilon[filter], slopeCostello * epsilon[filter], label='Costello', linestyle='--')
ax = plt.gca()
ax.set_xlabel(r'$\epsilon$ strand $\frac{l}{L}$')
ax.set_ylabel(r'$F_{z}$')
ax.legend()
plt.savefig(fileDir+ 'epsilonVSAxialLoad')
plt.pause(0.1) | [
"matplotlib.pylab.gca",
"matplotlib.pylab.savefig",
"pickle.dump",
"os.listdir",
"matplotlib.pylab.figure",
"matplotlib.pylab.pause",
"pickle.load",
"numpy.zeros",
"matplotlib.pylab.plot",
"numpy.loadtxt",
"matplotlib.pylab.close"
] | [((74, 90), 'matplotlib.pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (83, 90), True, 'import matplotlib.pylab as plt\n'), ((514, 534), 'numpy.zeros', 'np.zeros', (['(nstep, 3)'], {}), '((nstep, 3))\n', (522, 534), True, 'import numpy as np\n'), ((706, 762), 'numpy.loadtxt', 'np.loadtxt', (["(fileDir + 'CentralBeamDisplacementEnd_x.txt')"], {}), "(fileDir + 'CentralBeamDisplacementEnd_x.txt')\n", (716, 762), True, 'import numpy as np\n'), ((769, 811), 'numpy.loadtxt', 'np.loadtxt', (["(fileDir + 'ReactionForces.txt')"], {}), "(fileDir + 'ReactionForces.txt')\n", (779, 811), True, 'import numpy as np\n'), ((1241, 1253), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (1251, 1253), True, 'import matplotlib.pylab as plt\n'), ((1254, 1290), 'matplotlib.pylab.plot', 'plt.plot', (['t[filter]', 'epsilon[filter]'], {}), '(t[filter], epsilon[filter])\n', (1262, 1290), True, 'import matplotlib.pylab as plt\n'), ((1296, 1305), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (1303, 1305), True, 'import matplotlib.pylab as plt\n'), ((1378, 1416), 'matplotlib.pylab.savefig', 'plt.savefig', (["(fileDir + 'timeVSepsilon')"], {}), "(fileDir + 'timeVSepsilon')\n", (1389, 1416), True, 'import matplotlib.pylab as plt\n'), ((1417, 1429), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (1427, 1429), True, 'import matplotlib.pylab as plt\n'), ((1430, 1497), 'matplotlib.pylab.plot', 'plt.plot', (['epsilon[filter]', 'RFz[filter]'], {'label': '"""Sofa model with BFE"""'}), "(epsilon[filter], RFz[filter], label='Sofa model with BFE')\n", (1438, 1497), True, 'import matplotlib.pylab as plt\n'), ((1640, 1736), 'matplotlib.pylab.plot', 'plt.plot', (['epsilon[filter]', '(slopeCostello * epsilon[filter])'], {'label': '"""Costello"""', 'linestyle': '"""--"""'}), "(epsilon[filter], slopeCostello * epsilon[filter], label='Costello',\n linestyle='--')\n", (1648, 1736), True, 'import matplotlib.pylab as plt\n'), ((1738, 1747), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (1745, 1747), True, 'import matplotlib.pylab as plt\n'), ((1836, 1879), 'matplotlib.pylab.savefig', 'plt.savefig', (["(fileDir + 'epsilonVSAxialLoad')"], {}), "(fileDir + 'epsilonVSAxialLoad')\n", (1847, 1879), True, 'import matplotlib.pylab as plt\n'), ((1880, 1894), 'matplotlib.pylab.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1889, 1894), True, 'import matplotlib.pylab as plt\n'), ((925, 951), 'numpy.loadtxt', 'np.loadtxt', (['(fileDir + L[0])'], {}), '(fileDir + L[0])\n', (935, 951), True, 'import numpy as np\n'), ((167, 211), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (178, 211), False, 'import pickle\n'), ((290, 304), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (301, 304), False, 'import pickle\n'), ((420, 439), 'os.listdir', 'os.listdir', (['fileDir'], {}), '(fileDir)\n', (430, 439), False, 'import os\n'), ((473, 499), 'numpy.loadtxt', 'np.loadtxt', (['(fileDir + L[0])'], {}), '(fileDir + L[0])\n', (483, 499), True, 'import numpy as np\n')] |
import importlib
import time
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import gaussian_kde
from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect
from ..gw.source import PARAMETER_SETS
class ProposalCycle(object):
def __init__(self, proposal_list):
self.proposal_list = proposal_list
self.weights = [prop.weight for prop in self.proposal_list]
self.normalized_weights = [w / sum(self.weights) for w in self.weights]
self.weighted_proposal_list = [
np.random.choice(self.proposal_list, p=self.normalized_weights)
for _ in range(10 * int(1 / min(self.normalized_weights)))
]
self.nproposals = len(self.weighted_proposal_list)
self._position = 0
@property
def position(self):
return self._position
@position.setter
def position(self, position):
self._position = np.mod(position, self.nproposals)
def get_proposal(self):
prop = self.weighted_proposal_list[self._position]
self.position += 1
return prop
def __str__(self):
string = "ProposalCycle:\n"
for prop in self.proposal_list:
string += f" {prop}\n"
return string
class BaseProposal(object):
_accepted = 0
_rejected = 0
__metaclass__ = ABCMeta
def __init__(self, priors, weight=1, subset=None):
self._str_attrs = ["acceptance_ratio", "n"]
self.parameters = priors.non_fixed_keys
self.weight = weight
self.subset = subset
# Restrict to a subset
if self.subset is not None:
self.parameters = [p for p in self.parameters if p in subset]
self._str_attrs.append("parameters")
self.ndim = len(self.parameters)
self.prior_boundary_dict = {key: priors[key].boundary for key in priors}
self.prior_minimum_dict = {key: np.max(priors[key].minimum) for key in priors}
self.prior_maximum_dict = {key: np.min(priors[key].maximum) for key in priors}
self.prior_width_dict = {key: np.max(priors[key].width) for key in priors}
@property
def accepted(self):
return self._accepted
@accepted.setter
def accepted(self, accepted):
self._accepted = accepted
@property
def rejected(self):
return self._rejected
@rejected.setter
def rejected(self, rejected):
self._rejected = rejected
@property
def acceptance_ratio(self):
if self.n == 0:
return np.nan
else:
return self.accepted / self.n
@property
def n(self):
return self.accepted + self.rejected
def __str__(self):
msg = [f"{type(self).__name__}("]
for attr in self._str_attrs:
val = getattr(self, attr, "N/A")
if isinstance(val, (float, int)):
val = f"{val:1.2g}"
msg.append(f"{attr}:{val},")
return "".join(msg) + ")"
def apply_boundaries(self, point):
for key in self.parameters:
boundary = self.prior_boundary_dict[key]
if boundary is None:
continue
elif boundary == "periodic":
point[key] = self.apply_periodic_boundary(key, point[key])
elif boundary == "reflective":
point[key] = self.apply_reflective_boundary(key, point[key])
else:
raise SamplerError(f"Boundary {boundary} not implemented")
return point
def apply_periodic_boundary(self, key, val):
minimum = self.prior_minimum_dict[key]
width = self.prior_width_dict[key]
return minimum + np.mod(val - minimum, width)
def apply_reflective_boundary(self, key, val):
minimum = self.prior_minimum_dict[key]
width = self.prior_width_dict[key]
val_normalised = (val - minimum) / width
val_normalised_reflected = reflect(np.array(val_normalised))
return minimum + width * val_normalised_reflected
def __call__(self, chain):
sample, log_factor = self.propose(chain)
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
def propose(self, chain):
"""Propose a new point
This method must be overwritten by implemented proposals. The propose
method is called by __call__, then boundaries applied, before returning
the proposed point.
Parameters
----------
chain: bilby.core.sampler.bilby_mcmc.chain.Chain
The chain to use for the proposal
Returns
-------
proposal: bilby.core.sampler.bilby_mcmc.Sample
The proposed point
log_factor: float
The natural-log of the additional factor entering the acceptance
probability to ensure detailed balance. For symmetric proposals,
a value of 0 should be returned.
"""
pass
@staticmethod
def check_dependencies(warn=True):
"""Check the dependencies required to use the proposal
Parameters
----------
warn: bool
If true, print a warning
Returns
-------
check: bool
If true, dependencies exist
"""
return True
class FixedGaussianProposal(BaseProposal):
"""A proposal using a fixed non-correlated Gaussian distribution
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
sigma: float
The scaling factor for proposals
"""
def __init__(self, priors, weight=1, subset=None, sigma=0.01):
super(FixedGaussianProposal, self).__init__(priors, weight, subset)
self.sigmas = {}
for key in self.parameters:
if np.isinf(self.prior_width_dict[key]):
self.prior_width_dict[key] = 1
if isinstance(sigma, float):
self.sigmas[key] = sigma
elif isinstance(sigma, dict):
self.sigmas[key] = sigma[key]
else:
raise SamplerError("FixedGaussianProposal sigma not understood")
def propose(self, chain):
sample = chain.current_sample
for key in self.parameters:
sigma = self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
log_factor = 0
return sample, log_factor
class AdaptiveGaussianProposal(BaseProposal):
def __init__(
self,
priors,
weight=1,
subset=None,
sigma=1,
scale_init=1e0,
stop=1e5,
target_facc=0.234,
):
super(AdaptiveGaussianProposal, self).__init__(priors, weight, subset)
self.sigmas = {}
for key in self.parameters:
if np.isinf(self.prior_width_dict[key]):
self.prior_width_dict[key] = 1
if isinstance(sigma, (float, int)):
self.sigmas[key] = sigma
elif isinstance(sigma, dict):
self.sigmas[key] = sigma[key]
else:
raise SamplerError("AdaptiveGaussianProposal sigma not understood")
self.target_facc = target_facc
self.scale = scale_init
self.stop = stop
self._str_attrs.append("scale")
self._last_accepted = 0
def propose(self, chain):
sample = chain.current_sample
self.update_scale(chain)
if np.random.random() < 1e-3:
factor = 1e1
elif np.random.random() < 1e-4:
factor = 1e2
else:
factor = 1
for key in self.parameters:
sigma = factor * self.scale * self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
log_factor = 0
return sample, log_factor
def update_scale(self, chain):
"""
The adaptation of the scale follows (35)/(36) of https://arxiv.org/abs/1409.7215
"""
if 0 < self.n < self.stop:
s_gamma = (self.stop / self.n) ** 0.2 - 1
if self.accepted > self._last_accepted:
self.scale += s_gamma * (1 - self.target_facc) / 100
else:
self.scale -= s_gamma * self.target_facc / 100
self._last_accepted = self.accepted
self.scale = max(self.scale, 1 / self.stop)
class DifferentialEvolutionProposal(BaseProposal):
"""A proposal using Differential Evolution
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
mode_hopping_frac: float
The fraction of proposals which use 'mode hopping'
"""
def __init__(self, priors, weight=1, subset=None, mode_hopping_frac=0.5):
super(DifferentialEvolutionProposal, self).__init__(priors, weight, subset)
self.mode_hopping_frac = mode_hopping_frac
def propose(self, chain):
theta = chain.current_sample
theta1 = chain.random_sample
theta2 = chain.random_sample
if np.random.rand() > self.mode_hopping_frac:
gamma = 1
else:
# Base jump size
gamma = np.random.normal(0, 2.38 / np.sqrt(2 * self.ndim))
# Scale uniformly in log between 0.1 and 10 times
gamma *= np.exp(np.log(0.1) + np.log(100.0) * np.random.rand())
for key in self.parameters:
theta[key] += gamma * (theta2[key] - theta1[key])
log_factor = 0
return theta, log_factor
class UniformProposal(BaseProposal):
"""A proposal using uniform draws from the prior support
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
"""
def __init__(self, priors, weight=1, subset=None):
super(UniformProposal, self).__init__(priors, weight, subset)
def propose(self, chain):
sample = chain.current_sample
for key in self.parameters:
sample[key] = np.random.uniform(
self.prior_minimum_dict[key], self.prior_maximum_dict[key]
)
log_factor = 0
return sample, log_factor
class PriorProposal(BaseProposal):
"""A proposal using draws from the prior distribution
Note: for priors which use interpolation, this proposal can be problematic
as the proposal gets pickled in multiprocessing. Either, use serial
processing (npool=1) or fall back to a UniformProposal.
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
"""
def __init__(self, priors, weight=1, subset=None):
super(PriorProposal, self).__init__(priors, weight, subset)
self.priors = PriorDict({key: priors[key] for key in self.parameters})
def propose(self, chain):
sample = chain.current_sample
lnp_theta = self.priors.ln_prob(sample.as_dict(self.parameters))
prior_sample = self.priors.sample()
for key in self.parameters:
sample[key] = prior_sample[key]
lnp_thetaprime = self.priors.ln_prob(sample.as_dict(self.parameters))
log_factor = lnp_theta - lnp_thetaprime
return sample, log_factor
_density_estimate_doc = """ A proposal using draws from a {estimator} fit to the chain
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
first_fit: int
The number of steps to take before first fitting the KDE
fit_multiplier: int
The multiplier for the next fit
nsamples_for_density: int
The number of samples to use when fitting the KDE
fallback: bilby.core.sampler.bilby_mcmc.proposal.BaseProposal
A proposal to use before first training
scale_fits: int
A scaling factor for both the initial and subsequent updates
"""
class DensityEstimateProposal(BaseProposal):
def __init__(
self,
priors,
weight=1,
subset=None,
first_fit=1000,
fit_multiplier=10,
nsamples_for_density=1000,
fallback=AdaptiveGaussianProposal,
scale_fits=1,
):
super(DensityEstimateProposal, self).__init__(priors, weight, subset)
self.nsamples_for_density = nsamples_for_density
self.fallback = fallback(priors, weight, subset)
self.fit_multiplier = fit_multiplier * scale_fits
# Counters
self.steps_since_refit = 0
self.next_refit_time = first_fit * scale_fits
self.density = None
self.trained = False
self._str_attrs.append("trained")
density_name = None
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
raise NotImplementedError
def _evaluate(self, point):
raise NotImplementedError
def _sample(self, nsamples=None):
raise NotImplementedError
def refit(self, chain):
current_density = self.density
start = time.time()
# Draw two (possibly overlapping) data sets for training and verification
dataset = []
verification_dataset = []
nsamples_for_density = min(chain.position, self.nsamples_for_density)
for _ in range(nsamples_for_density):
s = chain.random_sample
dataset.append([s[key] for key in self.parameters])
s = chain.random_sample
verification_dataset.append([s[key] for key in self.parameters])
# Fit the density
self.density = self._fit(np.array(dataset).T)
# Print a log message
took = time.time() - start
logger.info(
f"{self.density_name} construction at {self.steps_since_refit} finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
)
# Reset counters for next training
self.steps_since_refit = 0
self.next_refit_time *= self.fit_multiplier
# Verify training hasn't overconstrained
new_draws = np.atleast_2d(self._sample(1000))
verification_dataset = np.array(verification_dataset)
fail_parameters = []
for ii, key in enumerate(self.parameters):
std_draws = np.std(new_draws[:, ii])
std_verification = np.std(verification_dataset[:, ii])
if std_draws < 0.1 * std_verification:
fail_parameters.append(key)
if len(fail_parameters) > 0:
logger.info(
f"{self.density_name} construction failed verification and is discarded"
)
self.density = current_density
else:
self.trained = True
def propose(self, chain):
self.steps_since_refit += 1
# Check if we refit
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.refit(chain)
# If KDE is yet to be fitted, use the fallback
if self.trained is False:
return self.fallback.propose(chain)
# Grab the current sample and it's probability under the KDE
theta = chain.current_sample
ln_p_theta = self._evaluate(list(theta.as_dict(self.parameters).values()))
# Sample and update theta
new_sample = self._sample(1)
for key, val in zip(self.parameters, new_sample):
theta[key] = val
# Calculate the probability of the new sample and the KDE
ln_p_thetaprime = self._evaluate(list(theta.as_dict(self.parameters).values()))
# Calculate Q(theta|theta') / Q(theta'|theta)
log_factor = ln_p_theta - ln_p_thetaprime
return theta, log_factor
class KDEProposal(DensityEstimateProposal):
density_name = "Gaussian KDE"
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
return gaussian_kde(dataset)
def _evaluate(self, point):
return self.density.logpdf(point)[0]
def _sample(self, nsamples=None):
return np.atleast_1d(np.squeeze(self.density.resample(nsamples)))
class GMMProposal(DensityEstimateProposal):
density_name = "Gaussian Mixture Model"
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
from sklearn.mixture import GaussianMixture
density = GaussianMixture(n_components=10)
density.fit(dataset.T)
return density
def _evaluate(self, point):
return np.squeeze(self.density.score_samples(np.atleast_2d(point)))
def _sample(self, nsamples=None):
return np.squeeze(self.density.sample(n_samples=nsamples)[0])
def check_dependencies(warn=True):
if importlib.util.find_spec("sklearn") is None:
if warn:
logger.warning(
"Unable to utilise GMMProposal as sklearn is not installed"
)
return False
else:
return True
class NormalizingFlowProposal(DensityEstimateProposal):
density_name = "Normalizing Flow"
__doc__ = _density_estimate_doc.format(estimator=density_name) + (
"""
js_factor: float
The factor to use in determining the max-JS factor to terminate
training.
max_training_epochs: int
The maximum bumber of traning steps to take
"""
)
def __init__(
self,
priors,
weight=1,
subset=None,
first_fit=1000,
fit_multiplier=10,
max_training_epochs=1000,
scale_fits=1,
nsamples_for_density=1000,
js_factor=10,
fallback=AdaptiveGaussianProposal,
):
super(NormalizingFlowProposal, self).__init__(
priors=priors,
weight=weight,
subset=subset,
first_fit=first_fit,
fit_multiplier=fit_multiplier,
nsamples_for_density=nsamples_for_density,
fallback=fallback,
scale_fits=scale_fits,
)
self.setup_flow()
self.setup_optimizer()
self.max_training_epochs = max_training_epochs
self.js_factor = js_factor
def setup_flow(self):
if self.ndim < 3:
self.setup_basic_flow()
else:
self.setup_NVP_flow()
def setup_NVP_flow(self):
from .flows import NVPFlow
self.flow = NVPFlow(
features=self.ndim,
hidden_features=self.ndim * 2,
num_layers=2,
num_blocks_per_layer=2,
batch_norm_between_layers=True,
batch_norm_within_layers=True,
)
def setup_basic_flow(self):
from .flows import BasicFlow
self.flow = BasicFlow(features=self.ndim)
def setup_optimizer(self):
from torch import optim
self.optimizer = optim.Adam(self.flow.parameters())
def get_training_data(self, chain):
training_data = []
nsamples_for_density = min(chain.position, self.nsamples_for_density)
for _ in range(nsamples_for_density):
s = chain.random_sample
training_data.append([s[key] for key in self.parameters])
return training_data
def _calculate_js(self, validation_samples, training_samples_draw):
# Calculate the maximum JS between the validation and draw
max_js = 0
for i in range(self.ndim):
A = validation_samples[:, i]
B = training_samples_draw[:, i]
xmin = np.min([np.min(A), np.min(B)])
xmax = np.min([np.max(A), np.max(B)])
xval = np.linspace(xmin, xmax, 100)
Apdf = gaussian_kde(A)(xval)
Bpdf = gaussian_kde(B)(xval)
js = jensenshannon(Apdf, Bpdf)
max_js = max(max_js, js)
return np.power(max_js, 2)
def train(self, chain):
logger.info("Starting NF training")
import torch
start = time.time()
training_samples = np.array(self.get_training_data(chain))
validation_samples = np.array(self.get_training_data(chain))
training_tensor = torch.tensor(training_samples, dtype=torch.float32)
max_js_threshold = self.js_factor / self.nsamples_for_density
for epoch in range(1, self.max_training_epochs + 1):
self.optimizer.zero_grad()
loss = -self.flow.log_prob(inputs=training_tensor).mean()
loss.backward()
self.optimizer.step()
# Draw from the current flow
self.flow.eval()
training_samples_draw = (
self.flow.sample(self.nsamples_for_density).detach().numpy()
)
self.flow.train()
if np.mod(epoch, 10) == 0:
max_js_bits = self._calculate_js(
validation_samples, training_samples_draw
)
if max_js_bits < max_js_threshold:
logger.info(
f"Training complete after {epoch} steps, "
f"max_js_bits={max_js_bits:0.5f}<{max_js_threshold}"
)
break
took = time.time() - start
logger.info(
f"Flow training step ({self.steps_since_refit}) finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
)
self.steps_since_refit = 0
self.next_refit_time *= self.fit_multiplier
self.trained = True
def propose(self, chain):
import torch
self.steps_since_refit += 1
theta = chain.current_sample
# Check if we retrain the NF
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.train(chain)
if self.trained is False:
return self.fallback.propose(chain)
self.flow.eval()
theta_prime_T = self.flow.sample(1)
logp_theta_prime = self.flow.log_prob(theta_prime_T).detach().numpy()[0]
theta_T = torch.tensor(
np.atleast_2d([theta[key] for key in self.parameters]), dtype=torch.float32
)
logp_theta = self.flow.log_prob(theta_T).detach().numpy()[0]
log_factor = logp_theta - logp_theta_prime
flow_sample_values = np.atleast_1d(np.squeeze(theta_prime_T.detach().numpy()))
for key, val in zip(self.parameters, flow_sample_values):
theta[key] = val
return theta, float(log_factor)
def check_dependencies(warn=True):
if importlib.util.find_spec("nflows") is None:
if warn:
logger.warning(
"Unable to utilise NormalizingFlowProposal as nflows is not installed"
)
return False
else:
return True
class FixedJumpProposal(BaseProposal):
def __init__(self, priors, jumps=1, subset=None, weight=1, scale=1e-4):
super(FixedJumpProposal, self).__init__(priors, weight, subset)
self.scale = scale
if isinstance(jumps, (int, float)):
self.jumps = {key: jumps for key in self.parameters}
elif isinstance(jumps, dict):
self.jumps = jumps
else:
raise SamplerError("jumps not understood")
def propose(self, chain):
sample = chain.current_sample
for key, jump in self.jumps.items():
sign = np.random.randint(2) * 2 - 1
sample[key] += sign * jump + self.epsilon * self.prior_width_dict[key]
log_factor = 0
return sample, log_factor
@property
def epsilon(self):
return self.scale * np.random.normal()
class BaseGravitationalWaveTransientProposal(BaseProposal):
def __init__(self, priors, weight=1):
super(BaseGravitationalWaveTransientProposal, self).__init__(
priors, weight=weight
)
if "phase" in priors:
self.phase_key = "phase"
elif "delta_phase" in priors:
self.phase_key = "delta_phase"
else:
self.phase_key = None
def get_cos_theta_jn(self, sample):
if "cos_theta_jn" in sample.parameter_keys:
cos_theta_jn = sample["cos_theta_jn"]
elif "theta_jn" in sample.parameter_keys:
cos_theta_jn = np.cos(sample["theta_jn"])
else:
raise SamplerError()
return cos_theta_jn
def get_phase(self, sample):
if "phase" in sample.parameter_keys:
return sample["phase"]
elif "delta_phase" in sample.parameter_keys:
cos_theta_jn = self.get_cos_theta_jn(sample)
delta_phase = sample["delta_phase"]
psi = sample["psi"]
phase = np.mod(delta_phase - np.sign(cos_theta_jn) * psi, 2 * np.pi)
else:
raise SamplerError()
return phase
def get_delta_phase(self, phase, sample):
cos_theta_jn = self.get_cos_theta_jn(sample)
psi = sample["psi"]
delta_phase = phase + np.sign(cos_theta_jn) * psi
return delta_phase
class CorrelatedPolarisationPhaseJump(BaseGravitationalWaveTransientProposal):
def __init__(self, priors, weight=1):
super(CorrelatedPolarisationPhaseJump, self).__init__(priors, weight=weight)
def propose(self, chain):
sample = chain.current_sample
phase = self.get_phase(sample)
alpha = sample["psi"] + phase
beta = sample["psi"] - phase
draw = np.random.random()
if draw < 0.5:
alpha = 3.0 * np.pi * np.random.random()
else:
beta = 3.0 * np.pi * np.random.random() - 2 * np.pi
# Update
sample["psi"] = (alpha + beta) * 0.5
phase = (alpha - beta) * 0.5
if self.phase_key == "delta_phase":
sample["delta_phase"] = self.get_delta_phase(phase, sample)
else:
sample["phase"] = phase
log_factor = 0
return sample, log_factor
class PhaseReversalProposal(BaseGravitationalWaveTransientProposal):
def __init__(self, priors, weight=1, fuzz=True, fuzz_sigma=1e-1):
super(PhaseReversalProposal, self).__init__(priors, weight)
self.fuzz = fuzz
self.fuzz_sigma = fuzz_sigma
if self.phase_key is None:
raise SamplerError(
f"{type(self).__name__} initialised without a phase prior"
)
def propose(self, chain):
sample = chain.current_sample
phase = sample[self.phase_key]
sample[self.phase_key] = np.mod(phase + np.pi + self.epsilon, 2 * np.pi)
log_factor = 0
return sample, log_factor
@property
def epsilon(self):
if self.fuzz:
return np.random.normal(0, self.fuzz_sigma)
else:
return 0
class PolarisationReversalProposal(PhaseReversalProposal):
def __init__(self, priors, weight=1, fuzz=True, fuzz_sigma=1e-3):
super(PolarisationReversalProposal, self).__init__(
priors, weight, fuzz, fuzz_sigma
)
self.fuzz = fuzz
def propose(self, chain):
sample = chain.current_sample
psi = sample["psi"]
sample["psi"] = np.mod(psi + np.pi / 2 + self.epsilon, np.pi)
log_factor = 0
return sample, log_factor
class PhasePolarisationReversalProposal(PhaseReversalProposal):
def __init__(self, priors, weight=1, fuzz=True, fuzz_sigma=1e-1):
super(PhasePolarisationReversalProposal, self).__init__(
priors, weight, fuzz, fuzz_sigma
)
self.fuzz = fuzz
def propose(self, chain):
sample = chain.current_sample
sample[self.phase_key] = np.mod(
sample[self.phase_key] + np.pi + self.epsilon, 2 * np.pi
)
sample["psi"] = np.mod(sample["psi"] + np.pi / 2 + self.epsilon, np.pi)
log_factor = 0
return sample, log_factor
class StretchProposal(BaseProposal):
"""The Goodman & Weare (2010) Stretch proposal for an MCMC chain
Implementation of the Stretch proposal using a sample drawn from the chain.
We assume the form of g(z) from Equation (9) of [1].
References
----------
[1] Goodman & Weare (2010)
https://ui.adsabs.harvard.edu/abs/2010CAMCS...5...65G/abstract
"""
def __init__(self, priors, weight=1, subset=None, scale=2):
super(StretchProposal, self).__init__(priors, weight, subset)
self.scale = scale
def propose(self, chain):
sample = chain.current_sample
# Draw a random sample
rand = chain.random_sample
return _stretch_move(sample, rand, self.scale, self.ndim, self.parameters)
def _stretch_move(sample, complement, scale, ndim, parameters):
# Draw z
u = np.random.rand()
z = (u * (scale - 1) + 1) ** 2 / scale
log_factor = (ndim - 1) * np.log(z)
for key in parameters:
sample[key] = complement[key] + (sample[key] - complement[key]) * z
return sample, log_factor
class EnsembleProposal(BaseProposal):
""" Base EnsembleProposal class for ensemble-based swap proposals """
def __init__(self, priors, weight=1):
super(EnsembleProposal, self).__init__(priors, weight)
def __call__(self, chain, chain_complement):
sample, log_factor = self.propose(chain, chain_complement)
sample = self.apply_boundaries(sample)
return sample, log_factor
class EnsembleStretch(EnsembleProposal):
"""The Goodman & Weare (2010) Stretch proposal for an Ensemble
Implementation of the Stretch proposal using a sample drawn from complement.
We assume the form of g(z) from Equation (9) of [1].
References
----------
[1] Goodman & Weare (2010)
https://ui.adsabs.harvard.edu/abs/2010CAMCS...5...65G/abstract
"""
def __init__(self, priors, weight=1, scale=2):
super(EnsembleStretch, self).__init__(priors, weight)
self.scale = scale
def propose(self, chain, chain_complement):
sample = chain.current_sample
completement = chain_complement[
np.random.randint(len(chain_complement))
].current_sample
return _stretch_move(
sample, completement, self.scale, self.ndim, self.parameters
)
def get_default_ensemble_proposal_cycle(priors):
return ProposalCycle([EnsembleStretch(priors)])
def get_proposal_cycle(string, priors, L1steps=1, warn=True):
big_weight = 10
small_weight = 5
tiny_weight = 0.1
if "gwA" in string:
# Parameters for learning proposals
learning_kwargs = dict(
first_fit=1000, nsamples_for_density=10000, fit_multiplier=2
)
plist = [
AdaptiveGaussianProposal(priors, weight=small_weight),
DifferentialEvolutionProposal(priors, weight=small_weight),
]
if GMMProposal.check_dependencies(warn=warn) is False:
raise SamplerError(
"the gwA proposal_cycle required the GMMProposal dependencies"
)
if priors.intrinsic:
intrinsic = PARAMETER_SETS["intrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=big_weight, subset=intrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=intrinsic
),
KDEProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
),
]
if priors.extrinsic:
extrinsic = PARAMETER_SETS["extrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=small_weight, subset=extrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=extrinsic
),
KDEProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
),
]
if priors.mass:
mass = PARAMETER_SETS["mass"]
plist += [
DifferentialEvolutionProposal(priors, weight=small_weight, subset=mass),
GMMProposal(
priors, weight=small_weight, subset=mass, **learning_kwargs
),
]
if priors.spin:
spin = PARAMETER_SETS["spin"]
plist += [
DifferentialEvolutionProposal(priors, weight=small_weight, subset=spin),
GMMProposal(
priors, weight=small_weight, subset=spin, **learning_kwargs
),
]
if priors.precession:
measured_spin = ["chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"]
plist += [
AdaptiveGaussianProposal(
priors, weight=small_weight, subset=measured_spin
),
]
if priors.mass and priors.spin:
primary_spin_and_q = PARAMETER_SETS["primary_spin_and_q"]
plist += [
DifferentialEvolutionProposal(
priors, weight=small_weight, subset=primary_spin_and_q
),
]
if getattr(priors, "tidal", False):
tidal = PARAMETER_SETS["tidal"]
plist += [
DifferentialEvolutionProposal(
priors, weight=small_weight, subset=tidal
),
PriorProposal(priors, weight=small_weight, subset=tidal),
]
if priors.phase:
plist += [
PhaseReversalProposal(priors, weight=tiny_weight),
]
if priors.phase and "psi" in priors.non_fixed_keys:
plist += [
CorrelatedPolarisationPhaseJump(priors, weight=tiny_weight),
PhasePolarisationReversalProposal(priors, weight=tiny_weight),
]
for key in ["time_jitter", "psi", "phi_12", "tilt_2", "lambda_1", "lambda_2"]:
if key in priors.non_fixed_keys:
plist.append(PriorProposal(priors, subset=[key], weight=tiny_weight))
if "chi_1_in_plane" in priors and "chi_2_in_plane" in priors:
in_plane = ["chi_1_in_plane", "chi_2_in_plane", "phi_12"]
plist.append(UniformProposal(priors, subset=in_plane, weight=tiny_weight))
if any("recalib_" in key for key in priors):
calibration = [key for key in priors if "recalib_" in key]
plist.append(PriorProposal(priors, subset=calibration, weight=small_weight))
else:
plist = [
AdaptiveGaussianProposal(priors, weight=big_weight),
DifferentialEvolutionProposal(priors, weight=big_weight),
UniformProposal(priors, weight=tiny_weight),
KDEProposal(priors, weight=big_weight, scale_fits=L1steps),
]
if GMMProposal.check_dependencies(warn=warn):
plist.append(GMMProposal(priors, weight=big_weight, scale_fits=L1steps))
if NormalizingFlowProposal.check_dependencies(warn=warn):
plist.append(
NormalizingFlowProposal(priors, weight=big_weight, scale_fits=L1steps)
)
plist = remove_proposals_using_string(plist, string)
return ProposalCycle(plist)
def remove_proposals_using_string(plist, string):
mapping = dict(
DE=DifferentialEvolutionProposal,
AG=AdaptiveGaussianProposal,
ST=StretchProposal,
FG=FixedGaussianProposal,
NF=NormalizingFlowProposal,
KD=KDEProposal,
GM=GMMProposal,
PR=PriorProposal,
UN=UniformProposal,
)
for element in string.split("no")[1:]:
if element in mapping:
plist = [p for p in plist if isinstance(p, mapping[element]) is False]
return plist
| [
"numpy.sqrt",
"numpy.random.rand",
"importlib.util.find_spec",
"numpy.log",
"numpy.array",
"numpy.mod",
"scipy.spatial.distance.jensenshannon",
"numpy.atleast_2d",
"scipy.stats.gaussian_kde",
"numpy.random.random",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.isinf",
"numpy.random.... | [((29652, 29668), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (29666, 29668), True, 'import numpy as np\n'), ((1038, 1071), 'numpy.mod', 'np.mod', (['position', 'self.nproposals'], {}), '(position, self.nproposals)\n', (1044, 1071), True, 'import numpy as np\n'), ((13838, 13849), 'time.time', 'time.time', ([], {}), '()\n', (13847, 13849), False, 'import time\n'), ((14990, 15020), 'numpy.array', 'np.array', (['verification_dataset'], {}), '(verification_dataset)\n', (14998, 15020), True, 'import numpy as np\n'), ((16747, 16768), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['dataset'], {}), '(dataset)\n', (16759, 16768), False, 'from scipy.stats import gaussian_kde\n'), ((17218, 17250), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(10)'}), '(n_components=10)\n', (17233, 17250), False, 'from sklearn.mixture import GaussianMixture\n'), ((20679, 20698), 'numpy.power', 'np.power', (['max_js', '(2)'], {}), '(max_js, 2)\n', (20687, 20698), True, 'import numpy as np\n'), ((20811, 20822), 'time.time', 'time.time', ([], {}), '()\n', (20820, 20822), False, 'import time\n'), ((20987, 21038), 'torch.tensor', 'torch.tensor', (['training_samples'], {'dtype': 'torch.float32'}), '(training_samples, dtype=torch.float32)\n', (20999, 21038), False, 'import torch\n'), ((26371, 26389), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (26387, 26389), True, 'import numpy as np\n'), ((27437, 27484), 'numpy.mod', 'np.mod', (['(phase + np.pi + self.epsilon)', '(2 * np.pi)'], {}), '(phase + np.pi + self.epsilon, 2 * np.pi)\n', (27443, 27484), True, 'import numpy as np\n'), ((28085, 28130), 'numpy.mod', 'np.mod', (['(psi + np.pi / 2 + self.epsilon)', 'np.pi'], {}), '(psi + np.pi / 2 + self.epsilon, np.pi)\n', (28091, 28130), True, 'import numpy as np\n'), ((28571, 28635), 'numpy.mod', 'np.mod', (['(sample[self.phase_key] + np.pi + self.epsilon)', '(2 * np.pi)'], {}), '(sample[self.phase_key] + np.pi + self.epsilon, 2 * np.pi)\n', (28577, 28635), True, 'import numpy as np\n'), ((28682, 28737), 'numpy.mod', 'np.mod', (["(sample['psi'] + np.pi / 2 + self.epsilon)", 'np.pi'], {}), "(sample['psi'] + np.pi / 2 + self.epsilon, np.pi)\n", (28688, 28737), True, 'import numpy as np\n'), ((29743, 29752), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (29749, 29752), True, 'import numpy as np\n'), ((657, 720), 'numpy.random.choice', 'np.random.choice', (['self.proposal_list'], {'p': 'self.normalized_weights'}), '(self.proposal_list, p=self.normalized_weights)\n', (673, 720), True, 'import numpy as np\n'), ((2029, 2056), 'numpy.max', 'np.max', (['priors[key].minimum'], {}), '(priors[key].minimum)\n', (2035, 2056), True, 'import numpy as np\n'), ((2116, 2143), 'numpy.min', 'np.min', (['priors[key].maximum'], {}), '(priors[key].maximum)\n', (2122, 2143), True, 'import numpy as np\n'), ((2201, 2226), 'numpy.max', 'np.max', (['priors[key].width'], {}), '(priors[key].width)\n', (2207, 2226), True, 'import numpy as np\n'), ((3801, 3829), 'numpy.mod', 'np.mod', (['(val - minimum)', 'width'], {}), '(val - minimum, width)\n', (3807, 3829), True, 'import numpy as np\n'), ((4064, 4088), 'numpy.array', 'np.array', (['val_normalised'], {}), '(val_normalised)\n', (4072, 4088), True, 'import numpy as np\n'), ((6094, 6130), 'numpy.isinf', 'np.isinf', (['self.prior_width_dict[key]'], {}), '(self.prior_width_dict[key])\n', (6102, 6130), True, 'import numpy as np\n'), ((7112, 7148), 'numpy.isinf', 'np.isinf', (['self.prior_width_dict[key]'], {}), '(self.prior_width_dict[key])\n', (7120, 7148), True, 'import numpy as np\n'), ((7758, 7776), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7774, 7776), True, 'import numpy as np\n'), ((9516, 9532), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9530, 9532), True, 'import numpy as np\n'), ((10617, 10694), 'numpy.random.uniform', 'np.random.uniform', (['self.prior_minimum_dict[key]', 'self.prior_maximum_dict[key]'], {}), '(self.prior_minimum_dict[key], self.prior_maximum_dict[key])\n', (10634, 10694), True, 'import numpy as np\n'), ((14452, 14463), 'time.time', 'time.time', ([], {}), '()\n', (14461, 14463), False, 'import time\n'), ((15125, 15149), 'numpy.std', 'np.std', (['new_draws[:, ii]'], {}), '(new_draws[:, ii])\n', (15131, 15149), True, 'import numpy as np\n'), ((15181, 15216), 'numpy.std', 'np.std', (['verification_dataset[:, ii]'], {}), '(verification_dataset[:, ii])\n', (15187, 15216), True, 'import numpy as np\n'), ((17574, 17609), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""sklearn"""'], {}), "('sklearn')\n", (17598, 17609), False, 'import importlib\n'), ((20473, 20501), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(100)'], {}), '(xmin, xmax, 100)\n', (20484, 20501), True, 'import numpy as np\n'), ((20601, 20626), 'scipy.spatial.distance.jensenshannon', 'jensenshannon', (['Apdf', 'Bpdf'], {}), '(Apdf, Bpdf)\n', (20614, 20626), False, 'from scipy.spatial.distance import jensenshannon\n'), ((22035, 22046), 'time.time', 'time.time', ([], {}), '()\n', (22044, 22046), False, 'import time\n'), ((22961, 23015), 'numpy.atleast_2d', 'np.atleast_2d', (['[theta[key] for key in self.parameters]'], {}), '([theta[key] for key in self.parameters])\n', (22974, 23015), True, 'import numpy as np\n'), ((23442, 23476), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""nflows"""'], {}), "('nflows')\n", (23466, 23476), False, 'import importlib\n'), ((24542, 24560), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (24558, 24560), True, 'import numpy as np\n'), ((27621, 27657), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.fuzz_sigma'], {}), '(0, self.fuzz_sigma)\n', (27637, 27657), True, 'import numpy as np\n'), ((6654, 6671), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6669, 6671), True, 'import numpy as np\n'), ((7823, 7841), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7839, 7841), True, 'import numpy as np\n'), ((8071, 8088), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (8086, 8088), True, 'import numpy as np\n'), ((14385, 14402), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (14393, 14402), True, 'import numpy as np\n'), ((17391, 17411), 'numpy.atleast_2d', 'np.atleast_2d', (['point'], {}), '(point)\n', (17404, 17411), True, 'import numpy as np\n'), ((20521, 20536), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['A'], {}), '(A)\n', (20533, 20536), False, 'from scipy.stats import gaussian_kde\n'), ((20562, 20577), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['B'], {}), '(B)\n', (20574, 20577), False, 'from scipy.stats import gaussian_kde\n'), ((21589, 21606), 'numpy.mod', 'np.mod', (['epoch', '(10)'], {}), '(epoch, 10)\n', (21595, 21606), True, 'import numpy as np\n'), ((25195, 25221), 'numpy.cos', 'np.cos', (["sample['theta_jn']"], {}), "(sample['theta_jn'])\n", (25201, 25221), True, 'import numpy as np\n'), ((25908, 25929), 'numpy.sign', 'np.sign', (['cos_theta_jn'], {}), '(cos_theta_jn)\n', (25915, 25929), True, 'import numpy as np\n'), ((26447, 26465), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (26463, 26465), True, 'import numpy as np\n'), ((9671, 9693), 'numpy.sqrt', 'np.sqrt', (['(2 * self.ndim)'], {}), '(2 * self.ndim)\n', (9678, 9693), True, 'import numpy as np\n'), ((9785, 9796), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (9791, 9796), True, 'import numpy as np\n'), ((20381, 20390), 'numpy.min', 'np.min', (['A'], {}), '(A)\n', (20387, 20390), True, 'import numpy as np\n'), ((20392, 20401), 'numpy.min', 'np.min', (['B'], {}), '(B)\n', (20398, 20401), True, 'import numpy as np\n'), ((20431, 20440), 'numpy.max', 'np.max', (['A'], {}), '(A)\n', (20437, 20440), True, 'import numpy as np\n'), ((20442, 20451), 'numpy.max', 'np.max', (['B'], {}), '(B)\n', (20448, 20451), True, 'import numpy as np\n'), ((24307, 24327), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (24324, 24327), True, 'import numpy as np\n'), ((26513, 26531), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (26529, 26531), True, 'import numpy as np\n'), ((9799, 9812), 'numpy.log', 'np.log', (['(100.0)'], {}), '(100.0)\n', (9805, 9812), True, 'import numpy as np\n'), ((9815, 9831), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9829, 9831), True, 'import numpy as np\n'), ((25642, 25663), 'numpy.sign', 'np.sign', (['cos_theta_jn'], {}), '(cos_theta_jn)\n', (25649, 25663), True, 'import numpy as np\n')] |
import math
import os
import numpy as np
import cv2
import skimage.transform
from scipy.io import loadmat
import scipy.spatial as spatial
import matplotlib.pyplot as plt
import torchvision
from math import cos, sin, atan2, asin
import scipy.misc
def get_vertices(pos):
all_vertices = np.reshape(pos, [resolution**2, -1])
vertices = all_vertices[face_ind, :]
return vertices
def get_landmarks(pos):
kpt = pos[uv_kpt_ind[1,:].astype(np.int32), uv_kpt_ind[0,:].astype(np.int32), :]
return kpt
#region RENDER
def isPointInTri(point, tri_points):
''' Judge whether the point is in the triangle
Method:
http://blackpawn.com/texts/pointinpoly/
Args:
point: (2,). [u, v] or [x, y]
tri_points: (3 vertices, 2 coords). three vertices(2d points) of a triangle.
Returns:
bool: true for in triangle
'''
tp = tri_points
# vectors
v0 = tp[2,:] - tp[0,:]
v1 = tp[1,:] - tp[0,:]
v2 = point - tp[0,:]
# dot products
dot00 = np.dot(v0.T, v0)
dot01 = np.dot(v0.T, v1)
dot02 = np.dot(v0.T, v2)
dot11 = np.dot(v1.T, v1)
dot12 = np.dot(v1.T, v2)
# barycentric coordinates
if dot00*dot11 - dot01*dot01 == 0:
inverDeno = 0
else:
inverDeno = 1/(dot00*dot11 - dot01*dot01)
u = (dot11*dot02 - dot01*dot12)*inverDeno
v = (dot00*dot12 - dot01*dot02)*inverDeno
# check if point in triangle
return (u >= 0) & (v >= 0) & (u + v < 1)
def get_point_weight(point, tri_points):
''' Get the weights of the position
Methods: https://gamedev.stackexchange.com/questions/23743/whats-the-most-efficient-way-to-find-barycentric-coordinates
-m1.compute the area of the triangles formed by embedding the point P inside the triangle
-m2.<NAME>'s book "Real-Time Collision Detection". faster.(used)
Args:
point: (2,). [u, v] or [x, y]
tri_points: (3 vertices, 2 coords). three vertices(2d points) of a triangle.
Returns:
w0: weight of v0
w1: weight of v1
w2: weight of v3
'''
tp = tri_points
# vectors
v0 = tp[2,:] - tp[0,:]
v1 = tp[1,:] - tp[0,:]
v2 = point - tp[0,:]
# dot products
dot00 = np.dot(v0.T, v0)
dot01 = np.dot(v0.T, v1)
dot02 = np.dot(v0.T, v2)
dot11 = np.dot(v1.T, v1)
dot12 = np.dot(v1.T, v2)
# barycentric coordinates
if dot00*dot11 - dot01*dot01 == 0:
inverDeno = 0
else:
inverDeno = 1/(dot00*dot11 - dot01*dot01)
u = (dot11*dot02 - dot01*dot12)*inverDeno
v = (dot00*dot12 - dot01*dot02)*inverDeno
w0 = 1 - u - v
w1 = v
w2 = u
return w0, w1, w2
def rasterize_triangles(vertices, triangles, h, w):
'''
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
h: height
w: width
Returns:
depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
triangle_buffer: [h, w] saves the tri id(-1 for no triangle).
barycentric_weight: [h, w, 3] saves corresponding barycentric weight.
# Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
# h, w is the size of rendering
'''
# initial
depth_buffer = np.zeros([h, w]) - 999999. #+ np.min(vertices[2,:]) - 999999. # set the initial z to the farest position
triangle_buffer = np.zeros([h, w], dtype = np.int32) - 1 # if tri id = -1, the pixel has no triangle correspondance
barycentric_weight = np.zeros([h, w, 3], dtype = np.float32) #
for i in range(triangles.shape[0]):
tri = triangles[i, :] # 3 vertex indices
# the inner bounding box
umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
if umax<umin or vmax<vmin:
continue
for u in range(umin, umax+1):
for v in range(vmin, vmax+1):
if not isPointInTri([u,v], vertices[tri, :2]):
continue
w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2]) # barycentric weight
point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
if point_depth > depth_buffer[v, u]:
depth_buffer[v, u] = point_depth
triangle_buffer[v, u] = i
barycentric_weight[v, u, :] = np.array([w0, w1, w2])
return depth_buffer, triangle_buffer, barycentric_weight
def render_colors_ras(vertices, triangles, colors, h, w, c = 3):
''' render mesh with colors(rasterize triangle first)
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3]
h: height
w: width
c: channel
Returns:
image: [h, w, c]. rendering.
'''
assert vertices.shape[0] == colors.shape[0]
depth_buffer, triangle_buffer, barycentric_weight = rasterize_triangles(vertices, triangles, h, w)
triangle_buffer_flat = np.reshape(triangle_buffer, [-1]) # [h*w]
barycentric_weight_flat = np.reshape(barycentric_weight, [-1, c]) #[h*w, c]
weight = barycentric_weight_flat[:, :, np.newaxis] # [h*w, 3(ver in tri), 1]
colors_flat = colors[triangles[triangle_buffer_flat, :], :] # [h*w(tri id in pixel), 3(ver in tri), c(color in ver)]
colors_flat = weight*colors_flat # [h*w, 3, 3]
colors_flat = np.sum(colors_flat, 1) #[h*w, 3]. add tri.
image = np.reshape(colors_flat, [h, w, c])
# mask = (triangle_buffer[:,:] > -1).astype(np.float32)
# image = image*mask[:,:,np.newaxis]
return image
def render_colors(vertices, triangles, colors, h, w, c = 3):
''' render mesh with colors
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3]
h: height
w: width
Returns:
image: [h, w, c].
'''
assert vertices.shape[0] == colors.shape[0]
# initial
image = np.zeros((h, w, c))
depth_buffer = np.zeros([h, w]) - 999999.
for i in range(triangles.shape[0]):
tri = triangles[i, :] # 3 vertex indices
# the inner bounding box
umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
if umax<umin or vmax<vmin:
continue
for u in range(umin, umax+1):
for v in range(vmin, vmax+1):
if not isPointInTri([u,v], vertices[tri, :2]):
continue
w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2])
point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
if point_depth > depth_buffer[v, u]:
depth_buffer[v, u] = point_depth
image[v, u, :] = w0*colors[tri[0], :] + w1*colors[tri[1], :] + w2*colors[tri[2], :]
return image
#endregion
#region POSE
def isRotationMatrix(R):
''' checks if a matrix is a valid rotation matrix(whether orthogonal or not)
'''
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def matrix2angle(R):
''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
Args:
R: (3,3). rotation matrix
Returns:
x: yaw
y: pitch
z: roll
'''
# assert(isRotationMatrix(R))
if R[2, 0] != 1 or R[2, 0] != -1:
x = asin(R[2, 0])
y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x))
z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x))
else: # Gimbal lock
z = 0 # can be anything
if R[2, 0] == -1:
x = np.pi / 2
y = z + atan2(R[0, 1], R[0, 2])
else:
x = -np.pi / 2
y = -z + atan2(-R[0, 1], -R[0, 2])
return x, y, z
def angle2matrix(angles):
''' get rotation matrix from three rotation angles(radian). The same as in 3DDFA.
Args:
angles: [3,]. x, y, z angles
x: yaw.
y: pitch.
z: roll.
Returns:
R: 3x3. rotation matrix.
'''
# x, y, z = np.deg2rad(angles[0]), np.deg2rad(angles[1]), np.deg2rad(angles[2])
# x, y, z = angles[0], angles[1], angles[2]
y, x, z = angles[0], angles[1], angles[2]
# x
Rx=np.array([
[1, 0, 0],
[0, cos(x), -sin(x)],
[0, sin(x), cos(x)]
])
# y
Ry=np.array([
[ cos(y), 0, sin(y)],
[ 0, 1, 0],
[-sin(y), 0, cos(y)]
])
# z
Rz=np.array([
[cos(z), -sin(z), 0],
[sin(z), cos(z), 0],
[ 0, 0, 1]
])
R = Rz.dot(Ry).dot(Rx)
return R.astype(np.float32)
def P2sRt(P):
''' decomposing camera matrix P.
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t2d: (2,). 2d translation.
'''
t2d = P[:2, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2)) / 2.0
r1 = R1 / np.linalg.norm(R1)
r2 = R2 / np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t2d
def compute_similarity_transform(points_static, points_to_transform):
p0 = np.copy(points_static).T
p1 = np.copy(points_to_transform).T
t0 = -np.mean(p0, axis=1).reshape(3, 1)
t1 = -np.mean(p1, axis=1).reshape(3, 1)
t_final = t1 - t0
p0c = p0 + t0
p1c = p1 + t1
covariance_matrix = p0c.dot(p1c.T) #3 3
U, S, V = np.linalg.svd(covariance_matrix) #U 3 3 S 3 V 3 3
R = U.dot(V) #R 3 3
if np.linalg.det(R) < 0:
R[:, 2] *= -1
rms_d0 = np.sqrt(np.mean(np.linalg.norm(p0c, axis=0) ** 2))
rms_d1 = np.sqrt(np.mean(np.linalg.norm(p1c, axis=0) ** 2))
s = (rms_d0 / rms_d1)
P = np.c_[s * np.eye(3).dot(R), t_final]
temp = np.eye(3).dot(R)
P_= np.c_[s * temp, t_final]
return P
def estimate_pose(vertices):
P = compute_similarity_transform(vertices, canonical_vertices)
s, R, t = P2sRt(P)
pose = matrix2angle(R)
return P, pose, (s, R, t)
def transform_vertices(R, vts):
p = np.copy(vts).T
t = np.mean(p, axis=1).reshape(3, 1)
pc = p - t
vts = np.linalg.inv(R).dot(pc)
vts = vts + t
return vts.T
#endregion
#region WARP
def GetBilinearPixel(imArr, posX, posY, out):
#Get integer and fractional parts of numbers
modXi = int(posX)
modYi = int(posY)
modXf = posX - modXi
modYf = posY - modYi
#Get pixels in four corners
for chan in range(imArr.shape[2]):
bl = imArr[modYi, modXi, chan]
br = imArr[modYi, modXi+1, chan]
tl = imArr[modYi+1, modXi, chan]
tr = imArr[modYi+1, modXi+1, chan]
#Calculate interpolation
b = modXf * br + (1. - modXf) * bl
t = modXf * tr + (1. - modXf) * tl
pxf = modYf * t + (1. - modYf) * b
out[chan] = int(pxf+0.5) #Do fast rounding to integer
return None #Helps with profiling view
def WarpProcessing(inArr, outArr, inTriangle, triAffines, shape):
#Ensure images are 3D arrays
px = np.empty((inArr.shape[2],), dtype=np.int32)
homogCoord = np.ones((3,), dtype=np.float32)
#Calculate ROI in target image
xmin = shape[:,0].min()
xmax = shape[:,0].max()
ymin = shape[:,1].min()
ymax = shape[:,1].max()
xmini = int(xmin)
xmaxi = int(xmax)
ymini = int(ymin)
ymaxi = int(ymax)
#print xmin, xmax, ymin, ymax
#Synthesis shape norm image
for i in range(xmini, xmaxi):
for j in range(ymini, ymaxi):
homogCoord[0] = i
homogCoord[1] = j
#Determine which tesselation triangle contains each pixel in the shape norm image
if i < 0 or i >= outArr.shape[1]: continue
if j < 0 or j >= outArr.shape[0]: continue
#Determine which triangle the destination pixel occupies
tri = inTriangle[i,j]
if tri == -1:
continue
#Calculate position in the input image
affine = triAffines[tri]
outImgCoord = np.dot(affine, homogCoord)
#Check destination pixel is within the image
if outImgCoord[0] < 0 or outImgCoord[0] >= inArr.shape[1]:
for chan in range(px.shape[0]): outArr[j,i,chan] = 0
continue
if outImgCoord[1] < 0 or outImgCoord[1] >= inArr.shape[0]:
for chan in range(px.shape[0]): outArr[j,i,chan] = 0
continue
#Nearest neighbour
#outImgL[i,j] = inImgL[int(round(inImgCoord[0])),int(round(inImgCoord[1]))]
#Copy pixel from source to destination by bilinear sampling
#print i,j,outImgCoord[0:2],im.size
GetBilinearPixel(inArr, outImgCoord[0], outImgCoord[1], px)
for chan in range(px.shape[0]):
outArr[j,i,chan] = px[chan]
#print outImgL[i,j]
return None
def PiecewiseAffineTransform(srcIm, srcPoints, dstPoints):
#Convert input to correct types
#Split input shape into mesh
tess = spatial.Delaunay(dstPoints)
#Calculate ROI in target image
xmin, xmax = dstPoints[:,0].min(), dstPoints[:,0].max()
ymin, ymax = dstPoints[:,1].min(), dstPoints[:,1].max()
#print xmin, xmax, ymin, ymax
#Determine which tesselation triangle contains each pixel in the shape norm image
inTessTriangle = np.ones((srcIm.shape[0],srcIm.shape[1]), dtype=np.int) * -1
for i in range(int(xmin), int(xmax+1.)):
for j in range(int(ymin), int(ymax+1.)):
if i < 0 or i >= inTessTriangle.shape[0]:
continue
if j < 0 or j >= inTessTriangle.shape[1]:
continue
normSpaceCoord = (float(i),float(j))
simp = tess.find_simplex([normSpaceCoord])
inTessTriangle[i,j] = simp
#Find affine mapping from input positions to mean shape
triAffines = []
for i, tri in enumerate(tess.vertices):
meanVertPos = np.hstack((srcPoints[tri], np.ones((3,1)))).transpose()
shapeVertPos = np.hstack((dstPoints[tri,:], np.ones((3,1)))).transpose()
affine = np.dot(meanVertPos, np.linalg.inv(shapeVertPos))
triAffines.append(affine)
#Prepare arrays, check they are 3D
targetArr = np.copy(srcIm)
srcIm = srcIm.reshape(srcIm.shape[0], srcIm.shape[1], srcIm.shape[2])
targetArr = targetArr.reshape(targetArr.shape[0], targetArr.shape[1], srcIm.shape[2])
#Calculate pixel colours
WarpProcessing(srcIm, targetArr, inTessTriangle, triAffines, dstPoints)
#Convert single channel images to 2D
if targetArr.shape[2] == 1:
targetArr = targetArr.reshape((targetArr.shape[0],targetArr.shape[1]))
return targetArr
#endregion
def warpPerspective(image, rect):
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = int(widthA) if int(widthA) > int(widthB) else int(widthB)
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = int(heightA) if int(heightA) > int(heightB) else int(heightB)
dst = np.array([
[ 0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[ 0, maxHeight - 1]], dtype = "float32")
P = np.array([
[ -tl[0], -tl[1], -1, 0, 0, 0, tl[0]*dst[0][0], tl[1]*dst[0][0], dst[0][0]],
[ 0, 0, 0, -tl[0], -tl[1], -1, tl[0]*dst[0][1], tl[1]*dst[0][1], dst[0][1]],
[ -tr[0], -tr[1], -1, 0, 0, 0, tr[0]*dst[1][0], tr[1]*dst[1][0], dst[1][0]],
[ 0, 0, 0, -tr[0], -tr[1], -1, tr[0]*dst[1][1], tr[1]*dst[1][1], dst[1][1]],
[ -br[0], -br[1], -1, 0, 0, 0, br[0]*dst[2][0], br[1]*dst[2][0], dst[2][0]],
[ 0, 0, 0, -br[0], -br[1], -1, br[0]*dst[2][1], br[1]*dst[2][1], dst[2][1]],
[ -bl[0], -bl[1], -1, 0, 0, 0, bl[0]*dst[3][0], bl[1]*dst[3][0], dst[3][0]],
[ 0, 0, 0, -bl[0], -bl[1], -1, bl[0]*dst[3][1], bl[1]*dst[3][1], dst[3][1]],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype = "float32")
arr_01 = np.array([
[0],
[0],
[0],
[0],
[0],
[0],
[0],
[0],
[1]], dtype = "float32")
P_1 = np.linalg.inv(P)
H = P_1.dot(arr_01)
M = np.array([
[ H[0], H[1], H[2]],
[ H[3], H[4], H[5]],
[ H[6], H[7], H[8]]], dtype = "float32")
M_imp = np.array([
[ H[0][0], H[1][0], H[2][0]],
[ H[3][0], H[4][0], H[5][0]],
[ H[6][0], H[7][0], H[8][0]]], dtype = "float32")
warped = cv2.warpPerspective(image, M, (resolution, resolution))
return warped
def flip_texture(tex, isPoseLeft=True):
X = tex.shape[0]
Y = tex.shape[1]
new_tex = np.empty_like(tex)
if isPoseLeft:
for y in range(Y//2):
for x in range(X):
new_tex[x,Y - y - 1] = tex[x,Y - y - 1]
new_tex[x,y] = tex[x,Y - y - 1]
else:
for y in range(Y//2):
for x in range(X):
new_tex[x,y] = tex[x,y]
new_tex[x,Y - y - 1] = tex[x,y]
return new_tex
def create_scatter(img, vts, kpt, isMesh=False):
img = cv2.resize(img, (256,256))
if isMesh:
x, y, z = vts.transpose()
for i in range(0, x.shape[0], 1):
img = cv2.circle(img, (int(x[i]), int(y[i])), 1, (255, 0, 0), -1)
x, y, z = kpt.transpose().astype(np.int32)
for i in range(0, x.shape[0], 1):
if i in face_contour_ind:
img = cv2.circle(img, (int(x[i]), int(y[i])), 5, (255, 0, 0), -1)
else:
img = cv2.circle(img, (int(x[i]), int(y[i])), 5, (255, 255, 255), -1)
return img
def show_result(*img, columns, rows):
fig=plt.figure(figsize=(8, 8))
for i in range(1, columns*rows +1):
show_img = img[i - 1]
fig.add_subplot(rows, columns, i)
plt.imshow(show_img)
plt.savefig('FaceRotation_Demo.png')
plt.show()
# Set profiling angle
phi_delta = 20/180*math.pi
gamma_delta = 10/180*math.pi
theta_delta = 0
# Load Sample
working_folder = str(os.path.abspath(os.getcwd()))
img = cv2.imread(os.path.join(working_folder, "result/store", "image00013.jpg"))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
pos = np.load(os.path.join(working_folder, "result/store", "image00013.npy")).astype(np.float32)
tex = cv2.remap(img, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
# Load Model
M_face_contour = loadmat(os.path.join(working_folder, 'test.synface/Model_face_contour_trimed.mat'))
M_fullmod_contour = loadmat(os.path.join(working_folder, 'test.synface/Model_fullmod_contour.mat'))
M_tri_mouth = loadmat(os.path.join(working_folder, 'test.synface/Model_tri_mouth.mat'))
M_keypoints = loadmat(os.path.join(working_folder, 'test.synface/Model_keypoints.mat'))
tri = np.loadtxt(os.path.join(working_folder, 'test.synface/triangles.txt')).astype(np.int32)
tri_plus = np.concatenate((tri, np.asarray(M_tri_mouth["tri_mouth"], dtype=np.int32).T))
layer_width = [0.1, 0.15, 0.2, 0.25, 0.3]
FLAGS = {
"model" : os.path.join(working_folder, "train_log/_checkpoint_epoch_80.pth.tar"),
"data_path" : os.path.join(working_folder, "data"),
"uv_kpt_ind_path" : os.path.join(working_folder, "data/processing/Data/UV/uv_kpt_ind.txt"),
"face_ind_path" : os.path.join(working_folder, "data/processing/Data/UV/face_ind.txt"),
"triangles_path" : os.path.join(working_folder, "data/processing/Data/UV/triangles.txt"),
"canonical_vts_path": os.path.join(working_folder, "data/processing/Data/UV/canonical_vertices.npy"),
"result_path" : os.path.join(working_folder, "result/usr"),
"uv_kpt_path" : os.path.join(working_folder, "data/processing/Data/UV/uv_kpt_ind.txt"),
"device" : "cuda",
"devices_id" : [0],
"batch_size" : 16,
"workers" : 8
}
uv_kpt_ind = np.loadtxt(FLAGS["uv_kpt_ind_path"]).astype(np.int32)
kpt_ind = np.array([8444, 8529, 8702, 8763, 9168, 9203, 9246, 9281, 10877, 11016, 13407, 13611, 13694, 13866, 13931, 14857, 14908, 15325, 15424, 15589, 15652, 15826, 15907, 16851, 20049, 22396, 22509, 22621, 26188, 26209, 26682, 26693, 27175, 27792, 28003, 30014, 30021, 30250, 30926, 30949, 31618, 31838, 31849, 33074, 33158, 33277, 33375, 33395, 33412, 33413, 33608, 33617, 34680, 34699, 35110, 35115, 35119, 38077, 38268, 41382, 41547, 42101, 42132, 42234, 42307, 42506, 42627, 42986], dtype=np.int)
face_ind = np.loadtxt(FLAGS["face_ind_path"]).astype(np.int32)
triangles = np.loadtxt(FLAGS["triangles_path"]).astype(np.int32)
canonical_vertices = np.load(FLAGS["canonical_vts_path"])
resolution = 256
# Construct 3D Face
vts = get_vertices(pos)
clr = get_vertices(tex)
kpt = get_landmarks(pos)
face_contour_ind = list(range(0, 28))
sct_img = create_scatter(img, vts, kpt)
ren_img = render_colors(vts, triangles, clr, resolution, resolution).astype(np.uint8)
P, pose, (s, R, t) = estimate_pose(vts)
### pose (0.336254458754315, -0.032371088523203216, -0.3484050027460824)
new_pose = (phi_delta, gamma_delta, theta_delta)
rot_vts = transform_vertices(angle2matrix(new_pose), vts)
rot_pos = transform_vertices(angle2matrix(new_pose), np.reshape(pos, [resolution**2, -1]))
rot_pos = np.reshape(rot_pos, [resolution, resolution, -1])
# inp_tex = flip_texture(tex, isPoseLeft=True)
# rot_clr = get_vertices(inp_tex)
rot_img = render_colors(rot_vts, triangles, clr, resolution, resolution).astype(np.uint8)
# brg_points = np.array([
# [0, 0, 1], [resolution//2, 0, 1], [resolution - 1, 0, 1],
# [0, resolution//2, 1], [resolution - 1, resolution//2, 1],
# [0, resolution -1, 1], [resolution//2, resolution -1, 1], [resolution - 1, resolution -1, 1]
# ], dtype=np.float32)
brg_points = np.array([
[ 0, 0, 1], [resolution - 1, 0, 1],
[resolution - 1, resolution -1, 1], [0 , resolution -1, 1]
], dtype=np.float32)
background = np.zeros_like(img) #Need to find a method to warp the background follow the face
rot_img = np.where(np.sum(rot_img, axis=2, keepdims=True) > 0, rot_img, background)
# show_result(img, sct_img, ren_img, rot_img, columns=2, rows=2)
# Image Meshing
# texture = cv2.remap(img, uv_position_map[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
# np.save(os.path.join("/home/viet/Projects/Pycharm/SPRNet/result/usr", "image1_texture.jpg"), tex)
clean_tex = cv2.remap(ren_img, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
miss_tex = cv2.remap(rot_img, rot_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
scipy.misc.imsave(os.path.join("/home/viet/Projects/Pycharm/SPRNet/result/usr", "image1_texture.jpg"), clean_tex)
scipy.misc.imsave(os.path.join("/home/viet/Projects/Pycharm/SPRNet/result/usr", "image1_misstexture.jpg"), miss_tex)
# Rotating and Anchor Adjustment
# Get Rotating Result
| [
"numpy.sqrt",
"math.cos",
"numpy.array",
"cv2.warpPerspective",
"numpy.linalg.norm",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.reshape",
"numpy.cross",
"numpy.asarray",
"math.sin",
"numpy.max",
"numpy.dot",
"numpy.empty",
"numpy.concatenate",
"numpy.min",
"numpy.identity",
"... | [((19286, 19322), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (19298, 19322), False, 'import cv2\n'), ((21309, 21831), 'numpy.array', 'np.array', (['[8444, 8529, 8702, 8763, 9168, 9203, 9246, 9281, 10877, 11016, 13407, 13611,\n 13694, 13866, 13931, 14857, 14908, 15325, 15424, 15589, 15652, 15826, \n 15907, 16851, 20049, 22396, 22509, 22621, 26188, 26209, 26682, 26693, \n 27175, 27792, 28003, 30014, 30021, 30250, 30926, 30949, 31618, 31838, \n 31849, 33074, 33158, 33277, 33375, 33395, 33412, 33413, 33608, 33617, \n 34680, 34699, 35110, 35115, 35119, 38077, 38268, 41382, 41547, 42101, \n 42132, 42234, 42307, 42506, 42627, 42986]'], {'dtype': 'np.int'}), '([8444, 8529, 8702, 8763, 9168, 9203, 9246, 9281, 10877, 11016, \n 13407, 13611, 13694, 13866, 13931, 14857, 14908, 15325, 15424, 15589, \n 15652, 15826, 15907, 16851, 20049, 22396, 22509, 22621, 26188, 26209, \n 26682, 26693, 27175, 27792, 28003, 30014, 30021, 30250, 30926, 30949, \n 31618, 31838, 31849, 33074, 33158, 33277, 33375, 33395, 33412, 33413, \n 33608, 33617, 34680, 34699, 35110, 35115, 35119, 38077, 38268, 41382, \n 41547, 42101, 42132, 42234, 42307, 42506, 42627, 42986], dtype=np.int)\n', (21317, 21831), True, 'import numpy as np\n'), ((21973, 22009), 'numpy.load', 'np.load', (["FLAGS['canonical_vts_path']"], {}), "(FLAGS['canonical_vts_path'])\n", (21980, 22009), True, 'import numpy as np\n'), ((22753, 22802), 'numpy.reshape', 'np.reshape', (['rot_pos', '[resolution, resolution, -1]'], {}), '(rot_pos, [resolution, resolution, -1])\n', (22763, 22802), True, 'import numpy as np\n'), ((23489, 23618), 'numpy.array', 'np.array', (['[[0, 0, 1], [resolution - 1, 0, 1], [resolution - 1, resolution - 1, 1], [0,\n resolution - 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 1], [resolution - 1, 0, 1], [resolution - 1, resolution - \n 1, 1], [0, resolution - 1, 1]], dtype=np.float32)\n', (23497, 23618), True, 'import numpy as np\n'), ((23813, 23831), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (23826, 23831), True, 'import numpy as np\n'), ((289, 327), 'numpy.reshape', 'np.reshape', (['pos', '[resolution ** 2, -1]'], {}), '(pos, [resolution ** 2, -1])\n', (299, 327), True, 'import numpy as np\n'), ((1013, 1029), 'numpy.dot', 'np.dot', (['v0.T', 'v0'], {}), '(v0.T, v0)\n', (1019, 1029), True, 'import numpy as np\n'), ((1042, 1058), 'numpy.dot', 'np.dot', (['v0.T', 'v1'], {}), '(v0.T, v1)\n', (1048, 1058), True, 'import numpy as np\n'), ((1071, 1087), 'numpy.dot', 'np.dot', (['v0.T', 'v2'], {}), '(v0.T, v2)\n', (1077, 1087), True, 'import numpy as np\n'), ((1100, 1116), 'numpy.dot', 'np.dot', (['v1.T', 'v1'], {}), '(v1.T, v1)\n', (1106, 1116), True, 'import numpy as np\n'), ((1129, 1145), 'numpy.dot', 'np.dot', (['v1.T', 'v2'], {}), '(v1.T, v2)\n', (1135, 1145), True, 'import numpy as np\n'), ((2223, 2239), 'numpy.dot', 'np.dot', (['v0.T', 'v0'], {}), '(v0.T, v0)\n', (2229, 2239), True, 'import numpy as np\n'), ((2252, 2268), 'numpy.dot', 'np.dot', (['v0.T', 'v1'], {}), '(v0.T, v1)\n', (2258, 2268), True, 'import numpy as np\n'), ((2281, 2297), 'numpy.dot', 'np.dot', (['v0.T', 'v2'], {}), '(v0.T, v2)\n', (2287, 2297), True, 'import numpy as np\n'), ((2310, 2326), 'numpy.dot', 'np.dot', (['v1.T', 'v1'], {}), '(v1.T, v1)\n', (2316, 2326), True, 'import numpy as np\n'), ((2339, 2355), 'numpy.dot', 'np.dot', (['v1.T', 'v2'], {}), '(v1.T, v2)\n', (2345, 2355), True, 'import numpy as np\n'), ((3491, 3528), 'numpy.zeros', 'np.zeros', (['[h, w, 3]'], {'dtype': 'np.float32'}), '([h, w, 3], dtype=np.float32)\n', (3499, 3528), True, 'import numpy as np\n'), ((5147, 5180), 'numpy.reshape', 'np.reshape', (['triangle_buffer', '[-1]'], {}), '(triangle_buffer, [-1])\n', (5157, 5180), True, 'import numpy as np\n'), ((5219, 5258), 'numpy.reshape', 'np.reshape', (['barycentric_weight', '[-1, c]'], {}), '(barycentric_weight, [-1, c])\n', (5229, 5258), True, 'import numpy as np\n'), ((5541, 5563), 'numpy.sum', 'np.sum', (['colors_flat', '(1)'], {}), '(colors_flat, 1)\n', (5547, 5563), True, 'import numpy as np\n'), ((5597, 5631), 'numpy.reshape', 'np.reshape', (['colors_flat', '[h, w, c]'], {}), '(colors_flat, [h, w, c])\n', (5607, 5631), True, 'import numpy as np\n'), ((6105, 6124), 'numpy.zeros', 'np.zeros', (['(h, w, c)'], {}), '((h, w, c))\n', (6113, 6124), True, 'import numpy as np\n'), ((7334, 7349), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (7346, 7349), True, 'import numpy as np\n'), ((7373, 7386), 'numpy.dot', 'np.dot', (['Rt', 'R'], {}), '(Rt, R)\n', (7379, 7386), True, 'import numpy as np\n'), ((7395, 7424), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'R.dtype'}), '(3, dtype=R.dtype)\n', (7406, 7424), True, 'import numpy as np\n'), ((7433, 7469), 'numpy.linalg.norm', 'np.linalg.norm', (['(I - shouldBeIdentity)'], {}), '(I - shouldBeIdentity)\n', (7447, 7469), True, 'import numpy as np\n'), ((9646, 9662), 'numpy.cross', 'np.cross', (['r1', 'r2'], {}), '(r1, r2)\n', (9654, 9662), True, 'import numpy as np\n'), ((9672, 9703), 'numpy.concatenate', 'np.concatenate', (['(r1, r2, r3)', '(0)'], {}), '((r1, r2, r3), 0)\n', (9686, 9703), True, 'import numpy as np\n'), ((10077, 10109), 'numpy.linalg.svd', 'np.linalg.svd', (['covariance_matrix'], {}), '(covariance_matrix)\n', (10090, 10109), True, 'import numpy as np\n'), ((11584, 11627), 'numpy.empty', 'np.empty', (['(inArr.shape[2],)'], {'dtype': 'np.int32'}), '((inArr.shape[2],), dtype=np.int32)\n', (11592, 11627), True, 'import numpy as np\n'), ((11642, 11673), 'numpy.ones', 'np.ones', (['(3,)'], {'dtype': 'np.float32'}), '((3,), dtype=np.float32)\n', (11649, 11673), True, 'import numpy as np\n'), ((13288, 13315), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['dstPoints'], {}), '(dstPoints)\n', (13304, 13315), True, 'import scipy.spatial as spatial\n'), ((14498, 14512), 'numpy.copy', 'np.copy', (['srcIm'], {}), '(srcIm)\n', (14505, 14512), True, 'import numpy as np\n'), ((15046, 15098), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (15053, 15098), True, 'import numpy as np\n'), ((15121, 15173), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (15128, 15173), True, 'import numpy as np\n'), ((15273, 15325), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (15280, 15325), True, 'import numpy as np\n'), ((15348, 15400), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (15355, 15400), True, 'import numpy as np\n'), ((15496, 15606), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (15504, 15606), True, 'import numpy as np\n'), ((15695, 16431), 'numpy.array', 'np.array', (['[[-tl[0], -tl[1], -1, 0, 0, 0, tl[0] * dst[0][0], tl[1] * dst[0][0], dst[0]\n [0]], [0, 0, 0, -tl[0], -tl[1], -1, tl[0] * dst[0][1], tl[1] * dst[0][1\n ], dst[0][1]], [-tr[0], -tr[1], -1, 0, 0, 0, tr[0] * dst[1][0], tr[1] *\n dst[1][0], dst[1][0]], [0, 0, 0, -tr[0], -tr[1], -1, tr[0] * dst[1][1],\n tr[1] * dst[1][1], dst[1][1]], [-br[0], -br[1], -1, 0, 0, 0, br[0] *\n dst[2][0], br[1] * dst[2][0], dst[2][0]], [0, 0, 0, -br[0], -br[1], -1,\n br[0] * dst[2][1], br[1] * dst[2][1], dst[2][1]], [-bl[0], -bl[1], -1, \n 0, 0, 0, bl[0] * dst[3][0], bl[1] * dst[3][0], dst[3][0]], [0, 0, 0, -\n bl[0], -bl[1], -1, bl[0] * dst[3][1], bl[1] * dst[3][1], dst[3][1]], [0,\n 0, 0, 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""float32"""'}), "([[-tl[0], -tl[1], -1, 0, 0, 0, tl[0] * dst[0][0], tl[1] * dst[0][0\n ], dst[0][0]], [0, 0, 0, -tl[0], -tl[1], -1, tl[0] * dst[0][1], tl[1] *\n dst[0][1], dst[0][1]], [-tr[0], -tr[1], -1, 0, 0, 0, tr[0] * dst[1][0],\n tr[1] * dst[1][0], dst[1][0]], [0, 0, 0, -tr[0], -tr[1], -1, tr[0] *\n dst[1][1], tr[1] * dst[1][1], dst[1][1]], [-br[0], -br[1], -1, 0, 0, 0,\n br[0] * dst[2][0], br[1] * dst[2][0], dst[2][0]], [0, 0, 0, -br[0], -br\n [1], -1, br[0] * dst[2][1], br[1] * dst[2][1], dst[2][1]], [-bl[0], -bl\n [1], -1, 0, 0, 0, bl[0] * dst[3][0], bl[1] * dst[3][0], dst[3][0]], [0,\n 0, 0, -bl[0], -bl[1], -1, bl[0] * dst[3][1], bl[1] * dst[3][1], dst[3][\n 1]], [0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype='float32')\n", (15703, 16431), True, 'import numpy as np\n'), ((16899, 16971), 'numpy.array', 'np.array', (['[[0], [0], [0], [0], [0], [0], [0], [0], [1]]'], {'dtype': '"""float32"""'}), "([[0], [0], [0], [0], [0], [0], [0], [0], [1]], dtype='float32')\n", (16907, 16971), True, 'import numpy as np\n'), ((17057, 17073), 'numpy.linalg.inv', 'np.linalg.inv', (['P'], {}), '(P)\n', (17070, 17073), True, 'import numpy as np\n'), ((17106, 17197), 'numpy.array', 'np.array', (['[[H[0], H[1], H[2]], [H[3], H[4], H[5]], [H[6], H[7], H[8]]]'], {'dtype': '"""float32"""'}), "([[H[0], H[1], H[2]], [H[3], H[4], H[5]], [H[6], H[7], H[8]]],\n dtype='float32')\n", (17114, 17197), True, 'import numpy as np\n'), ((17338, 17457), 'numpy.array', 'np.array', (['[[H[0][0], H[1][0], H[2][0]], [H[3][0], H[4][0], H[5][0]], [H[6][0], H[7][0\n ], H[8][0]]]'], {'dtype': '"""float32"""'}), "([[H[0][0], H[1][0], H[2][0]], [H[3][0], H[4][0], H[5][0]], [H[6][0\n ], H[7][0], H[8][0]]], dtype='float32')\n", (17346, 17457), True, 'import numpy as np\n'), ((17598, 17653), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(resolution, resolution)'], {}), '(image, M, (resolution, resolution))\n', (17617, 17653), False, 'import cv2\n'), ((17769, 17787), 'numpy.empty_like', 'np.empty_like', (['tex'], {}), '(tex)\n', (17782, 17787), True, 'import numpy as np\n'), ((18226, 18253), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {}), '(img, (256, 256))\n', (18236, 18253), False, 'import cv2\n'), ((18777, 18803), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (18787, 18803), True, 'import matplotlib.pyplot as plt\n'), ((18949, 18985), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""FaceRotation_Demo.png"""'], {}), "('FaceRotation_Demo.png')\n", (18960, 18985), True, 'import matplotlib.pyplot as plt\n'), ((18990, 19000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18998, 19000), True, 'import matplotlib.pyplot as plt\n'), ((19200, 19262), 'os.path.join', 'os.path.join', (['working_folder', '"""result/store"""', '"""image00013.jpg"""'], {}), "(working_folder, 'result/store', 'image00013.jpg')\n", (19212, 19262), False, 'import os\n'), ((19633, 19707), 'os.path.join', 'os.path.join', (['working_folder', '"""test.synface/Model_face_contour_trimed.mat"""'], {}), "(working_folder, 'test.synface/Model_face_contour_trimed.mat')\n", (19645, 19707), False, 'import os\n'), ((19739, 19809), 'os.path.join', 'os.path.join', (['working_folder', '"""test.synface/Model_fullmod_contour.mat"""'], {}), "(working_folder, 'test.synface/Model_fullmod_contour.mat')\n", (19751, 19809), False, 'import os\n'), ((19841, 19905), 'os.path.join', 'os.path.join', (['working_folder', '"""test.synface/Model_tri_mouth.mat"""'], {}), "(working_folder, 'test.synface/Model_tri_mouth.mat')\n", (19853, 19905), False, 'import os\n'), ((19937, 20001), 'os.path.join', 'os.path.join', (['working_folder', '"""test.synface/Model_keypoints.mat"""'], {}), "(working_folder, 'test.synface/Model_keypoints.mat')\n", (19949, 20001), False, 'import os\n'), ((20311, 20381), 'os.path.join', 'os.path.join', (['working_folder', '"""train_log/_checkpoint_epoch_80.pth.tar"""'], {}), "(working_folder, 'train_log/_checkpoint_epoch_80.pth.tar')\n", (20323, 20381), False, 'import os\n'), ((20417, 20453), 'os.path.join', 'os.path.join', (['working_folder', '"""data"""'], {}), "(working_folder, 'data')\n", (20429, 20453), False, 'import os\n'), ((20480, 20550), 'os.path.join', 'os.path.join', (['working_folder', '"""data/processing/Data/UV/uv_kpt_ind.txt"""'], {}), "(working_folder, 'data/processing/Data/UV/uv_kpt_ind.txt')\n", (20492, 20550), False, 'import os\n'), ((20586, 20654), 'os.path.join', 'os.path.join', (['working_folder', '"""data/processing/Data/UV/face_ind.txt"""'], {}), "(working_folder, 'data/processing/Data/UV/face_ind.txt')\n", (20598, 20654), False, 'import os\n'), ((20690, 20759), 'os.path.join', 'os.path.join', (['working_folder', '"""data/processing/Data/UV/triangles.txt"""'], {}), "(working_folder, 'data/processing/Data/UV/triangles.txt')\n", (20702, 20759), False, 'import os\n'), ((20795, 20873), 'os.path.join', 'os.path.join', (['working_folder', '"""data/processing/Data/UV/canonical_vertices.npy"""'], {}), "(working_folder, 'data/processing/Data/UV/canonical_vertices.npy')\n", (20807, 20873), False, 'import os\n'), ((20900, 20942), 'os.path.join', 'os.path.join', (['working_folder', '"""result/usr"""'], {}), "(working_folder, 'result/usr')\n", (20912, 20942), False, 'import os\n'), ((20978, 21048), 'os.path.join', 'os.path.join', (['working_folder', '"""data/processing/Data/UV/uv_kpt_ind.txt"""'], {}), "(working_folder, 'data/processing/Data/UV/uv_kpt_ind.txt')\n", (20990, 21048), False, 'import os\n'), ((22689, 22727), 'numpy.reshape', 'np.reshape', (['pos', '[resolution ** 2, -1]'], {}), '(pos, [resolution ** 2, -1])\n', (22699, 22727), True, 'import numpy as np\n'), ((24662, 24749), 'os.path.join', 'os.path.join', (['"""/home/viet/Projects/Pycharm/SPRNet/result/usr"""', '"""image1_texture.jpg"""'], {}), "('/home/viet/Projects/Pycharm/SPRNet/result/usr',\n 'image1_texture.jpg')\n", (24674, 24749), False, 'import os\n'), ((24776, 24867), 'os.path.join', 'os.path.join', (['"""/home/viet/Projects/Pycharm/SPRNet/result/usr"""', '"""image1_misstexture.jpg"""'], {}), "('/home/viet/Projects/Pycharm/SPRNet/result/usr',\n 'image1_misstexture.jpg')\n", (24788, 24867), False, 'import os\n'), ((3240, 3256), 'numpy.zeros', 'np.zeros', (['[h, w]'], {}), '([h, w])\n', (3248, 3256), True, 'import numpy as np\n'), ((3367, 3399), 'numpy.zeros', 'np.zeros', (['[h, w]'], {'dtype': 'np.int32'}), '([h, w], dtype=np.int32)\n', (3375, 3399), True, 'import numpy as np\n'), ((6144, 6160), 'numpy.zeros', 'np.zeros', (['[h, w]'], {}), '([h, w])\n', (6152, 6160), True, 'import numpy as np\n'), ((7825, 7838), 'math.asin', 'asin', (['R[2, 0]'], {}), '(R[2, 0])\n', (7829, 7838), False, 'from math import cos, sin, atan2, asin\n'), ((9585, 9603), 'numpy.linalg.norm', 'np.linalg.norm', (['R1'], {}), '(R1)\n', (9599, 9603), True, 'import numpy as np\n'), ((9618, 9636), 'numpy.linalg.norm', 'np.linalg.norm', (['R2'], {}), '(R2)\n', (9632, 9636), True, 'import numpy as np\n'), ((9805, 9827), 'numpy.copy', 'np.copy', (['points_static'], {}), '(points_static)\n', (9812, 9827), True, 'import numpy as np\n'), ((9839, 9867), 'numpy.copy', 'np.copy', (['points_to_transform'], {}), '(points_to_transform)\n', (9846, 9867), True, 'import numpy as np\n'), ((10158, 10174), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (10171, 10174), True, 'import numpy as np\n'), ((10696, 10708), 'numpy.copy', 'np.copy', (['vts'], {}), '(vts)\n', (10703, 10708), True, 'import numpy as np\n'), ((13608, 13663), 'numpy.ones', 'np.ones', (['(srcIm.shape[0], srcIm.shape[1])'], {'dtype': 'np.int'}), '((srcIm.shape[0], srcIm.shape[1]), dtype=np.int)\n', (13615, 13663), True, 'import numpy as np\n'), ((18924, 18944), 'matplotlib.pyplot.imshow', 'plt.imshow', (['show_img'], {}), '(show_img)\n', (18934, 18944), True, 'import matplotlib.pyplot as plt\n'), ((19153, 19164), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19162, 19164), False, 'import os\n'), ((21233, 21269), 'numpy.loadtxt', 'np.loadtxt', (["FLAGS['uv_kpt_ind_path']"], {}), "(FLAGS['uv_kpt_ind_path'])\n", (21243, 21269), True, 'import numpy as np\n'), ((21824, 21858), 'numpy.loadtxt', 'np.loadtxt', (["FLAGS['face_ind_path']"], {}), "(FLAGS['face_ind_path'])\n", (21834, 21858), True, 'import numpy as np\n'), ((21898, 21933), 'numpy.loadtxt', 'np.loadtxt', (["FLAGS['triangles_path']"], {}), "(FLAGS['triangles_path'])\n", (21908, 21933), True, 'import numpy as np\n'), ((23930, 23968), 'numpy.sum', 'np.sum', (['rot_img'], {'axis': '(2)', 'keepdims': '(True)'}), '(rot_img, axis=2, keepdims=True)\n', (23936, 23968), True, 'import numpy as np\n'), ((9524, 9542), 'numpy.linalg.norm', 'np.linalg.norm', (['R1'], {}), '(R1)\n', (9538, 9542), True, 'import numpy as np\n'), ((9545, 9563), 'numpy.linalg.norm', 'np.linalg.norm', (['R2'], {}), '(R2)\n', (9559, 9563), True, 'import numpy as np\n'), ((10414, 10423), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (10420, 10423), True, 'import numpy as np\n'), ((10719, 10737), 'numpy.mean', 'np.mean', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (10726, 10737), True, 'import numpy as np\n'), ((10777, 10793), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (10790, 10793), True, 'import numpy as np\n'), ((12438, 12464), 'numpy.dot', 'np.dot', (['affine', 'homogCoord'], {}), '(affine, homogCoord)\n', (12444, 12464), True, 'import numpy as np\n'), ((14381, 14408), 'numpy.linalg.inv', 'np.linalg.inv', (['shapeVertPos'], {}), '(shapeVertPos)\n', (14394, 14408), True, 'import numpy as np\n'), ((19353, 19415), 'os.path.join', 'os.path.join', (['working_folder', '"""result/store"""', '"""image00013.npy"""'], {}), "(working_folder, 'result/store', 'image00013.npy')\n", (19365, 19415), False, 'import os\n'), ((20036, 20094), 'os.path.join', 'os.path.join', (['working_folder', '"""test.synface/triangles.txt"""'], {}), "(working_folder, 'test.synface/triangles.txt')\n", (20048, 20094), False, 'import os\n'), ((20156, 20208), 'numpy.asarray', 'np.asarray', (["M_tri_mouth['tri_mouth']"], {'dtype': 'np.int32'}), "(M_tri_mouth['tri_mouth'], dtype=np.int32)\n", (20166, 20208), True, 'import numpy as np\n'), ((7867, 7873), 'math.cos', 'cos', (['x'], {}), '(x)\n', (7870, 7873), False, 'from math import cos, sin, atan2, asin\n'), ((7885, 7891), 'math.cos', 'cos', (['x'], {}), '(x)\n', (7888, 7891), False, 'from math import cos, sin, atan2, asin\n'), ((7921, 7927), 'math.cos', 'cos', (['x'], {}), '(x)\n', (7924, 7927), False, 'from math import cos, sin, atan2, asin\n'), ((7939, 7945), 'math.cos', 'cos', (['x'], {}), '(x)\n', (7942, 7945), False, 'from math import cos, sin, atan2, asin\n'), ((8078, 8101), 'math.atan2', 'atan2', (['R[0, 1]', 'R[0, 2]'], {}), '(R[0, 1], R[0, 2])\n', (8083, 8101), False, 'from math import cos, sin, atan2, asin\n'), ((8164, 8189), 'math.atan2', 'atan2', (['(-R[0, 1])', '(-R[0, 2])'], {}), '(-R[0, 1], -R[0, 2])\n', (8169, 8189), False, 'from math import cos, sin, atan2, asin\n'), ((8749, 8755), 'math.cos', 'cos', (['x'], {}), '(x)\n', (8752, 8755), False, 'from math import cos, sin, atan2, asin\n'), ((8792, 8798), 'math.sin', 'sin', (['x'], {}), '(x)\n', (8795, 8798), False, 'from math import cos, sin, atan2, asin\n'), ((8801, 8807), 'math.cos', 'cos', (['x'], {}), '(x)\n', (8804, 8807), False, 'from math import cos, sin, atan2, asin\n'), ((8879, 8885), 'math.cos', 'cos', (['y'], {}), '(y)\n', (8882, 8885), False, 'from math import cos, sin, atan2, asin\n'), ((8890, 8896), 'math.sin', 'sin', (['y'], {}), '(y)\n', (8893, 8896), False, 'from math import cos, sin, atan2, asin\n'), ((8974, 8980), 'math.cos', 'cos', (['y'], {}), '(y)\n', (8977, 8980), False, 'from math import cos, sin, atan2, asin\n'), ((9051, 9057), 'math.cos', 'cos', (['z'], {}), '(z)\n', (9054, 9057), False, 'from math import cos, sin, atan2, asin\n'), ((9093, 9099), 'math.sin', 'sin', (['z'], {}), '(z)\n', (9096, 9099), False, 'from math import cos, sin, atan2, asin\n'), ((9102, 9108), 'math.cos', 'cos', (['z'], {}), '(z)\n', (9105, 9108), False, 'from math import cos, sin, atan2, asin\n'), ((9881, 9900), 'numpy.mean', 'np.mean', (['p0'], {'axis': '(1)'}), '(p0, axis=1)\n', (9888, 9900), True, 'import numpy as np\n'), ((9925, 9944), 'numpy.mean', 'np.mean', (['p1'], {'axis': '(1)'}), '(p1, axis=1)\n', (9932, 9944), True, 'import numpy as np\n'), ((10232, 10259), 'numpy.linalg.norm', 'np.linalg.norm', (['p0c'], {'axis': '(0)'}), '(p0c, axis=0)\n', (10246, 10259), True, 'import numpy as np\n'), ((10296, 10323), 'numpy.linalg.norm', 'np.linalg.norm', (['p1c'], {'axis': '(0)'}), '(p1c, axis=0)\n', (10310, 10323), True, 'import numpy as np\n'), ((3694, 3718), 'numpy.min', 'np.min', (['vertices[tri, 0]'], {}), '(vertices[tri, 0])\n', (3700, 3718), True, 'import numpy as np\n'), ((3757, 3781), 'numpy.max', 'np.max', (['vertices[tri, 0]'], {}), '(vertices[tri, 0])\n', (3763, 3781), True, 'import numpy as np\n'), ((3822, 3846), 'numpy.min', 'np.min', (['vertices[tri, 1]'], {}), '(vertices[tri, 1])\n', (3828, 3846), True, 'import numpy as np\n'), ((3885, 3909), 'numpy.max', 'np.max', (['vertices[tri, 1]'], {}), '(vertices[tri, 1])\n', (3891, 3909), True, 'import numpy as np\n'), ((4548, 4570), 'numpy.array', 'np.array', (['[w0, w1, w2]'], {}), '([w0, w1, w2])\n', (4556, 4570), True, 'import numpy as np\n'), ((6326, 6350), 'numpy.min', 'np.min', (['vertices[tri, 0]'], {}), '(vertices[tri, 0])\n', (6332, 6350), True, 'import numpy as np\n'), ((6389, 6413), 'numpy.max', 'np.max', (['vertices[tri, 0]'], {}), '(vertices[tri, 0])\n', (6395, 6413), True, 'import numpy as np\n'), ((6454, 6478), 'numpy.min', 'np.min', (['vertices[tri, 1]'], {}), '(vertices[tri, 1])\n', (6460, 6478), True, 'import numpy as np\n'), ((6517, 6541), 'numpy.max', 'np.max', (['vertices[tri, 1]'], {}), '(vertices[tri, 1])\n', (6523, 6541), True, 'import numpy as np\n'), ((8759, 8765), 'math.sin', 'sin', (['x'], {}), '(x)\n', (8762, 8765), False, 'from math import cos, sin, atan2, asin\n'), ((8963, 8969), 'math.sin', 'sin', (['y'], {}), '(y)\n', (8966, 8969), False, 'from math import cos, sin, atan2, asin\n'), ((9060, 9066), 'math.sin', 'sin', (['z'], {}), '(z)\n', (9063, 9066), False, 'from math import cos, sin, atan2, asin\n'), ((10376, 10385), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (10382, 10385), True, 'import numpy as np\n'), ((14234, 14249), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (14241, 14249), True, 'import numpy as np\n'), ((14315, 14330), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (14322, 14330), True, 'import numpy as np\n')] |
from flare.framework.algorithm import Algorithm
from flare.common import common_functions as comf
from torch.distributions import Categorical
import torch
import torch.optim as optim
import numpy as np
from copy import deepcopy
class SimpleAC(Algorithm):
"""
A simple Actor-Critic that has a feedforward policy network and
a single discrete action.
learn() requires keywords: "action", "reward", "v_value"
"""
def __init__(self,
model,
hyperparas=dict(lr=1e-4),
gpu_id=-1,
discount_factor=0.99):
super(SimpleAC, self).__init__(model, hyperparas, gpu_id)
self.optim = optim.RMSprop(model.parameters(), lr=hyperparas["lr"])
self.discount_factor = discount_factor
def learn(self, inputs, next_inputs, states, next_states, next_episode_end,
actions, rewards):
self.optim.zero_grad()
action = actions["action"]
reward = rewards["reward"]
values = self.model.value(inputs, states)
value = values["v_value"]
with torch.no_grad():
next_values = self.model.value(next_inputs, next_states)
next_value = next_values["v_value"] * next_episode_end[
"next_episode_end"]
assert value.size() == next_value.size()
critic_value = reward + self.discount_factor * next_value
td_error = (critic_value - value).squeeze(-1)
value_cost = td_error**2
dist, _ = self.model.policy(inputs, states)
dist = dist["action"]
assert isinstance(dist, Categorical)
pg_cost = -dist.log_prob(action.squeeze(-1))
cost = value_cost + pg_cost * td_error.detach()
avg_cost = cost.mean(0)
avg_cost.backward()
self.optim.step()
return dict(avg_cost=avg_cost, cost=cost)
def predict(self, inputs, states):
return self._rl_predict(self.model, inputs, states)
class SimpleQ(Algorithm):
"""
A simple Q-learning that has a feedforward policy network and a single discrete action.
learn() requires keywords: "action", "reward", "q_value"
"""
def __init__(self,
model,
hyperparas=dict(lr=1e-4),
gpu_id=-1,
discount_factor=0.99,
exploration_end_batches=0,
exploration_end_rate=0.1,
update_ref_interval=100):
super(SimpleQ, self).__init__(model, hyperparas, gpu_id)
self.discount_factor = discount_factor
self.gpu_id = gpu_id
assert update_ref_interval > 0
self.update_ref_interval = update_ref_interval
self.total_batches = 0
## create a reference model
self.ref_model = deepcopy(model)
## setup exploration
if exploration_end_batches > 0:
self.exploration_rate = 1.0
self.exploration_end_rate = exploration_end_rate
self.exploration_rate_delta \
= (1 - exploration_end_rate) / exploration_end_batches
else:
self.exploration_rate = 0.0
self.optim = optim.RMSprop(model.parameters(), lr=hyperparas["lr"])
def predict(self, inputs, states):
"""
Override the base predict() function to put the exploration rate in inputs
"""
distributions, states = self.model.policy(inputs, states)
actions = {}
for key, dist in distributions.iteritems():
assert isinstance(dist, Categorical)
if np.random.uniform(0, 1) < self.exploration_rate:
## if to explore, we generate a uniform categorical distribution
## we don't have to normalize the probs because Categorical will do that inside
dist = Categorical(torch.ones_like(dist.probs))
actions[key] = dist.sample().unsqueeze(-1)
return actions, states
def learn(self, inputs, next_inputs, states, next_states, next_episode_end,
actions, rewards):
self.optim.zero_grad()
if self.total_batches % self.update_ref_interval == 0:
## copy parameters from self.model to self.ref_model
self.ref_model.load_state_dict(self.model.state_dict())
self.total_batches += 1
action = actions["action"]
reward = rewards["reward"]
values = self.model.value(inputs, states)
q_value = values["q_value"]
with torch.no_grad():
next_values = self.ref_model.value(next_inputs, next_states)
next_q_value = next_values["q_value"] * next_episode_end[
"next_episode_end"]
next_value, _ = next_q_value.max(-1)
next_value = next_value.unsqueeze(-1)
assert q_value.size() == next_q_value.size()
value = comf.idx_select(q_value, action)
critic_value = reward + self.discount_factor * next_value
td_error = (critic_value - value).squeeze(-1)
cost = td_error**2
avg_cost = cost.mean(0)
avg_cost.backward()
self.optim.step()
if self.exploration_rate > 0:
## decrease the exploration rate by a small delta value
self.exploration_rate = max(
self.exploration_rate - self.exploration_rate_delta,
self.exploration_end_rate)
return dict(avg_cost=avg_cost, cost=cost)
| [
"torch.ones_like",
"copy.deepcopy",
"flare.common.common_functions.idx_select",
"numpy.random.uniform",
"torch.no_grad"
] | [((2781, 2796), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (2789, 2796), False, 'from copy import deepcopy\n'), ((4850, 4882), 'flare.common.common_functions.idx_select', 'comf.idx_select', (['q_value', 'action'], {}), '(q_value, action)\n', (4865, 4882), True, 'from flare.common import common_functions as comf\n'), ((1098, 1113), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1111, 1113), False, 'import torch\n'), ((4484, 4499), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4497, 4499), False, 'import torch\n'), ((3561, 3584), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3578, 3584), True, 'import numpy as np\n'), ((3822, 3849), 'torch.ones_like', 'torch.ones_like', (['dist.probs'], {}), '(dist.probs)\n', (3837, 3849), False, 'import torch\n')] |
"""File import/export functions.
"""
import copy
import datetime
import math
import re
from typing import List, Optional, TextIO, Tuple, Union
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
import click
import numpy as np
import svgpathtools as svg
import svgwrite
from svgwrite.extensions import Inkscape
from .config import CONFIG_MANAGER, PaperConfig, PlotterConfig
from .model import LineCollection, VectorData, as_vector
from .utils import UNITS, convert_length
__all__ = ["read_svg", "read_multilayer_svg", "write_svg", "write_hpgl"]
_COLORS = [
"#00f",
"#080",
"#f00",
"#0cc",
"#0f0",
"#c0c",
"#cc0",
"black",
]
def _calculate_page_size(
root: Element,
) -> Tuple[Optional[float], Optional[float], float, float, float, float]:
"""Interpret the viewBox, width and height attribs and compute proper scaling coefficients.
Args:
root: SVG's root element
Returns:
tuple of width, height, scale X, scale Y, offset X, offset Y
"""
width = height = None
if "viewBox" in root.attrib:
# A view box is defined so we must correctly scale from user coordinates
# https://css-tricks.com/scale-svg/
# TODO: we should honor the `preserveAspectRatio` attribute
viewbox_min_x, viewbox_min_y, viewbox_width, viewbox_height = [
float(s) for s in root.attrib["viewBox"].split()
]
width = convert_length(root.attrib.get("width", viewbox_width))
height = convert_length(root.attrib.get("height", viewbox_height))
scale_x = width / viewbox_width
scale_y = height / viewbox_height
offset_x = -viewbox_min_x
offset_y = -viewbox_min_y
else:
scale_x = 1
scale_y = 1
offset_x = 0
offset_y = 0
return width, height, scale_x, scale_y, offset_x, offset_y
def _convert_flattened_paths(
paths: List,
quantization: float,
scale_x: float,
scale_y: float,
offset_x: float,
offset_y: float,
simplify: bool,
) -> "LineCollection":
"""Convert a list of FlattenedPaths to a :class:`LineCollection`.
Args:
paths: list of FlattenedPaths
quantization: maximum length of linear elements to approximate curve paths
scale_x, scale_y: scale factor to apply
offset_x, offset_y: offset to apply
simplify: should Shapely's simplify be run
Returns:
new :class:`LineCollection` instance containing the converted geometries
"""
lc = LineCollection()
for result in paths:
# Here we load the sub-part of the path element. If such sub-parts are connected,
# we merge them in a single line (e.g. line string, etc.). If there are disconnection
# in the path (e.g. multiple "M" commands), we create several lines
sub_paths: List[List[complex]] = []
for elem in result:
if isinstance(elem, svg.Line):
coords = [elem.start, elem.end]
else:
# This is a curved element that we approximate with small segments
step = int(math.ceil(elem.length() / quantization))
coords = [elem.start]
coords.extend(elem.point((i + 1) / step) for i in range(step - 1))
coords.append(elem.end)
# merge to last sub path if first coordinates match
if sub_paths:
if sub_paths[-1][-1] == coords[0]:
sub_paths[-1].extend(coords[1:])
else:
sub_paths.append(coords)
else:
sub_paths.append(coords)
for sub_path in sub_paths:
path = np.array(sub_path)
# transform
path += offset_x + 1j * offset_y
path.real *= scale_x
path.imag *= scale_y
lc.append(path)
if simplify:
mls = lc.as_mls()
lc = LineCollection(mls.simplify(tolerance=quantization))
return lc
def read_svg(
filename: str, quantization: float, simplify: bool = False, return_size: bool = False
) -> Union["LineCollection", Tuple["LineCollection", float, float]]:
"""Read a SVG file an return its content as a :class:`LineCollection` instance.
All curved geometries are chopped in segments no longer than the value of *quantization*.
Optionally, the geometries are simplified using Shapely, using the value of *quantization*
as tolerance.
Args:
filename: path of the SVG file
quantization: maximum size of segment used to approximate curved geometries
simplify: run Shapely's simplify on loaded geometry
return_size: if True, return a size 3 Tuple containing the geometries and the SVG
width and height
Returns:
imported geometries, and optionally width and height of the SVG
"""
doc = svg.Document(filename)
width, height, scale_x, scale_y, offset_x, offset_y = _calculate_page_size(doc.root)
lc = _convert_flattened_paths(
doc.paths(), quantization, scale_x, scale_y, offset_x, offset_y, simplify,
)
if return_size:
if width is None or height is None:
_, _, width, height = lc.bounds() or 0, 0, 0, 0
return lc, width, height
else:
return lc
def read_multilayer_svg(
filename: str, quantization: float, simplify: bool = False, return_size: bool = False
) -> Union["VectorData", Tuple["VectorData", float, float]]:
"""Read a multilayer SVG file and return its content as a :class:`VectorData` instance
retaining the SVG's layer structure.
Each top-level group is considered a layer. All non-group, top-level elements are imported
in layer 1.
Groups are matched to layer ID according their `inkscape:label` attribute, their `id`
attribute or their appearing order, in that order of priority. Labels are stripped of
non-numeric characters and the remaining is used as layer ID. Lacking numeric characters,
the appearing order is used. If the label is 0, its changed to 1.
All curved geometries are chopped in segments no longer than the value of *quantization*.
Optionally, the geometries are simplified using Shapely, using the value of *quantization*
as tolerance.
Args:
filename: path of the SVG file
quantization: maximum size of segment used to approximate curved geometries
simplify: run Shapely's simplify on loaded geometry
return_size: if True, return a size 3 Tuple containing the geometries and the SVG
width and height
Returns:
imported geometries, and optionally width and height of the SVG
"""
doc = svg.Document(filename)
width, height, scale_x, scale_y, offset_x, offset_y = _calculate_page_size(doc.root)
vector_data = VectorData()
# non-group top level elements are loaded in layer 1
top_level_elements = doc.paths(group_filter=lambda x: x is doc.root)
if top_level_elements:
vector_data.add(
_convert_flattened_paths(
top_level_elements,
quantization,
scale_x,
scale_y,
offset_x,
offset_y,
simplify,
),
1,
)
for i, g in enumerate(doc.root.iterfind("svg:g", svg.SVG_NAMESPACE)):
# compute a decent layer ID
lid_str = re.sub(
"[^0-9]", "", g.get("{http://www.inkscape.org/namespaces/inkscape}label") or ""
)
if not lid_str:
lid_str = re.sub("[^0-9]", "", g.get("id") or "")
if lid_str:
lid = int(lid_str)
if lid == 0:
lid = 1
else:
lid = i + 1
vector_data.add(
_convert_flattened_paths(
doc.paths_from_group(g, g),
quantization,
scale_x,
scale_y,
offset_x,
offset_y,
simplify,
),
lid,
)
if return_size:
if width is None or height is None:
_, _, width, height = vector_data.bounds() or 0, 0, 0, 0
return vector_data, width, height
else:
return vector_data
def _line_to_path(dwg: svgwrite.Drawing, lines: Union[np.ndarray, LineCollection]):
"""Convert a line into a SVG path element.
Accepts either a single line or a :py:class:`LineCollection`.
Args:
lines: line(s) to convert to path
Returns:
(svgwrite element): path element
"""
if isinstance(lines, np.ndarray):
lines = [lines]
def single_line_to_path(line: np.ndarray) -> str:
if line[0] == line[-1] and len(line) > 2:
closed = True
line = line[:-1]
else:
closed = False
return (
"M" + " L".join(f"{x},{y}" for x, y in as_vector(line)) + (" Z" if closed else "")
)
return dwg.path(" ".join(single_line_to_path(line) for line in lines))
def write_svg(
output: TextIO,
vector_data: VectorData,
page_format: Tuple[float, float] = (0.0, 0.0),
center: bool = False,
source_string: str = "",
single_path: bool = False,
layer_label_format: str = "%d",
show_pen_up: bool = False,
color_mode: str = "none",
) -> None:
"""Create a SVG from a :py:class:`VectorData` instance.
If no page format is provided (or (0, 0) is passed), the SVG generated has bounds tightly
fitted around the geometries. Otherwise the provided size (in pixel) is used.
By default, no translation is applied on the geometry. If `center=True`, geometries are
moved to the center of the page.
No scaling or rotation is applied to geometries.
Layers are named after `layer_label_format`, which may contain a C-style format specifier
such as `%d` which will be replaced by the layer number.
If `single_path=True`, a single compound path is written per layer. Otherwise, each path
is exported individually.
For previsualisation purposes, pen-up trajectories can be added to the SVG and path can
be colored individually (``color_mode="path"``) or layer-by-layer (``color_mode="layer"``).
Args:
output: text-mode IO stream where SVG code will be written
vector_data: geometries to be written
page_format: page (width, height) tuple in pixel, or (0, 0) for tight fit
center: center geometries on page before export
source_string: value of the `source` metadata
single_path: export geometries as a single compound path instead of multiple
individual paths
layer_label_format: format string for layer label naming
show_pen_up: add paths for the pen-up trajectories
color_mode: "none" (no formatting), "layer" (one color per layer), "path" (one color
per path) (``color_mode="path"`` implies ``single_path=False``)
"""
# compute bounds
bounds = vector_data.bounds()
if bounds is None:
# empty geometry, we provide fake bounds
bounds = (0, 0, 1, 1)
tight = page_format == (0.0, 0.0)
if not tight:
size = page_format
else:
size = (bounds[2] - bounds[0], bounds[3] - bounds[1])
if center:
corrected_vector_data = copy.deepcopy(vector_data)
corrected_vector_data.translate(
(size[0] - (bounds[2] - bounds[0])) / 2.0 - bounds[0],
(size[1] - (bounds[3] - bounds[1])) / 2.0 - bounds[1],
)
elif tight:
corrected_vector_data = copy.deepcopy(vector_data)
corrected_vector_data.translate(-bounds[0], -bounds[1])
else:
corrected_vector_data = vector_data
# output SVG
size_cm = tuple(f"{round(s / UNITS['cm'], 8)}cm" for s in size)
dwg = svgwrite.Drawing(size=size_cm, profile="tiny", debug=False)
inkscape = Inkscape(dwg)
dwg.attribs.update(
{
"viewBox": f"0 0 {size[0]} {size[1]}",
"xmlns:dc": "http://purl.org/dc/elements/1.1/",
"xmlns:cc": "http://creativecommons.org/ns#",
"xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
}
)
# add metadata
metadata = ElementTree.Element("rdf:RDF")
work = ElementTree.SubElement(metadata, "cc:Work")
fmt = ElementTree.SubElement(work, "dc:format")
fmt.text = "image/svg+xml"
source = ElementTree.SubElement(work, "dc:source")
source.text = source_string
date = ElementTree.SubElement(work, "dc:date")
date.text = datetime.datetime.now().isoformat()
dwg.set_metadata(metadata)
color_idx = 0
if show_pen_up:
group = inkscape.layer(label="% pen up trajectories")
group.attribs["fill"] = "none"
group.attribs["stroke"] = "black"
group.attribs["style"] = "display:inline; stroke-opacity: 50%; stroke-width: 0.5"
group.attribs["id"] = "pen_up_trajectories"
for layer in corrected_vector_data.layers.values():
group.add(_line_to_path(dwg, layer.pen_up_trajectories()))
dwg.add(group)
for layer_id in sorted(corrected_vector_data.layers.keys()):
layer = corrected_vector_data.layers[layer_id]
group = inkscape.layer(label=str(layer_label_format % layer_id))
group.attribs["fill"] = "none"
if color_mode == "layer":
group.attribs["stroke"] = _COLORS[color_idx % len(_COLORS)]
color_idx += 1
else:
group.attribs["stroke"] = "black"
group.attribs["style"] = "display:inline"
group.attribs["id"] = f"layer{layer_id}"
if single_path and color_mode != "path":
group.add(_line_to_path(dwg, layer))
else:
for line in layer:
path = _line_to_path(dwg, line)
if color_mode == "path":
path.attribs["stroke"] = _COLORS[color_idx % len(_COLORS)]
color_idx += 1
group.add(path)
dwg.add(group)
dwg.write(output, pretty=True)
def _get_hpgl_config(
device: Optional[str], page_format: str
) -> Tuple[PlotterConfig, PaperConfig]:
if device is None:
device = CONFIG_MANAGER.get_command_config("write").get("default_hpgl_device", None)
plotter_config = CONFIG_MANAGER.get_plotter_config(str(device))
if plotter_config is None:
raise ValueError(f"no configuration available for plotter '{device}'")
paper_config = plotter_config.paper_config(page_format)
if paper_config is None:
raise ValueError(
f"no configuration available for paper size '{page_format}' with plotter "
f"'{device}'"
)
return plotter_config, paper_config
def write_hpgl(
output: TextIO,
vector_data: VectorData,
page_format: str,
landscape: bool,
center: bool,
device: Optional[str],
velocity: Optional[float],
quiet: bool = False,
) -> None:
"""Create a HPGL file from the :class:`VectorData` instance.
The ``device``/``page_format`` combination must be defined in the built-in or user-provided
config files or an exception will be raised.
By default, no translation is applied on the geometry. If `center=True`, geometries are
moved to the center of the page.
No scaling or rotation is applied to geometries.
Args:
output: text-mode IO stream where SVG code will be written
vector_data: geometries to be written
page_format: page format string (it must be configured for the selected device)
landscape: if True, the geometries are generated in landscape orientation
center: center geometries on page before export
device: name of the device to use (the corresponding config must exists). If not
provided, a default device must be configured, which will be used.
velocity: if provided, a VS command will be generated with the corresponding value
quiet: if True, do not print the plotter/paper info strings
"""
# empty HPGL is acceptable there are no geometries to plot
if vector_data.is_empty():
return
plotter_config, paper_config = _get_hpgl_config(device, page_format)
if not quiet:
if plotter_config.info:
# use of echo instead of print needed for testability
# https://github.com/pallets/click/issues/1678
click.echo(plotter_config.info, err=True)
if paper_config.info:
click.echo(paper_config.info, err=True)
# are plotter coordinate placed in landscape or portrait orientation?
coords_landscape = paper_config.paper_size[0] > paper_config.paper_size[1]
# vector data preprocessing:
# - make a copy
# - deal with orientation mismatch
# - optionally center on paper
# - convert to plotter units
# - crop to plotter limits
vector_data = copy.deepcopy(vector_data)
if landscape != coords_landscape:
vector_data.rotate(-math.pi / 2)
vector_data.translate(0, paper_config.paper_size[1])
if paper_config.rotate_180:
vector_data.scale(-1, -1)
vector_data.translate(*paper_config.paper_size)
if center:
bounds = vector_data.bounds()
if bounds is not None:
vector_data.translate(
(paper_config.paper_size[0] - (bounds[2] - bounds[0])) / 2.0 - bounds[0],
(paper_config.paper_size[1] - (bounds[3] - bounds[1])) / 2.0 - bounds[1],
)
vector_data.translate(-paper_config.origin_location[0], -paper_config.origin_location[1])
unit_per_pixel = 1 / plotter_config.plotter_unit_length
vector_data.scale(
unit_per_pixel, -unit_per_pixel if paper_config.y_axis_up else unit_per_pixel
)
vector_data.crop(
paper_config.x_range[0],
paper_config.y_range[0],
paper_config.x_range[1],
paper_config.y_range[1],
)
# output HPGL
def complex_to_str(p: complex) -> str:
return f"{int(round(p.real))},{int(round(p.imag))}"
output.write("IN;DF;")
if velocity is not None:
output.write(f"VS{velocity};")
if paper_config.set_ps is not None:
output.write(f"PS{int(paper_config.set_ps)};")
for layer_id in sorted(vector_data.layers.keys()):
pen_id = 1 + (layer_id - 1) % plotter_config.pen_count
output.write(f"SP{pen_id};")
for line in vector_data.layers[layer_id]:
if len(line) < 2:
continue
output.write(f"PU{complex_to_str(line[0])};")
output.write(f"PD")
output.write(",".join(complex_to_str(p) for p in line[1:]))
output.write(";")
output.write(
f"PU{paper_config.final_pu_params if paper_config.final_pu_params else ''};"
)
output.write("SP0;IN;\n")
| [
"svgwrite.Drawing",
"xml.etree.ElementTree.Element",
"numpy.array",
"datetime.datetime.now",
"svgwrite.extensions.Inkscape",
"click.echo",
"copy.deepcopy",
"xml.etree.ElementTree.SubElement",
"svgpathtools.Document"
] | [((4909, 4931), 'svgpathtools.Document', 'svg.Document', (['filename'], {}), '(filename)\n', (4921, 4931), True, 'import svgpathtools as svg\n'), ((6725, 6747), 'svgpathtools.Document', 'svg.Document', (['filename'], {}), '(filename)\n', (6737, 6747), True, 'import svgpathtools as svg\n'), ((11882, 11941), 'svgwrite.Drawing', 'svgwrite.Drawing', ([], {'size': 'size_cm', 'profile': '"""tiny"""', 'debug': '(False)'}), "(size=size_cm, profile='tiny', debug=False)\n", (11898, 11941), False, 'import svgwrite\n'), ((11957, 11970), 'svgwrite.extensions.Inkscape', 'Inkscape', (['dwg'], {}), '(dwg)\n', (11965, 11970), False, 'from svgwrite.extensions import Inkscape\n'), ((12297, 12327), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""rdf:RDF"""'], {}), "('rdf:RDF')\n", (12316, 12327), False, 'from xml.etree import ElementTree\n'), ((12339, 12382), 'xml.etree.ElementTree.SubElement', 'ElementTree.SubElement', (['metadata', '"""cc:Work"""'], {}), "(metadata, 'cc:Work')\n", (12361, 12382), False, 'from xml.etree import ElementTree\n'), ((12393, 12434), 'xml.etree.ElementTree.SubElement', 'ElementTree.SubElement', (['work', '"""dc:format"""'], {}), "(work, 'dc:format')\n", (12415, 12434), False, 'from xml.etree import ElementTree\n'), ((12479, 12520), 'xml.etree.ElementTree.SubElement', 'ElementTree.SubElement', (['work', '"""dc:source"""'], {}), "(work, 'dc:source')\n", (12501, 12520), False, 'from xml.etree import ElementTree\n'), ((12564, 12603), 'xml.etree.ElementTree.SubElement', 'ElementTree.SubElement', (['work', '"""dc:date"""'], {}), "(work, 'dc:date')\n", (12586, 12603), False, 'from xml.etree import ElementTree\n'), ((16974, 17000), 'copy.deepcopy', 'copy.deepcopy', (['vector_data'], {}), '(vector_data)\n', (16987, 17000), False, 'import copy\n'), ((11381, 11407), 'copy.deepcopy', 'copy.deepcopy', (['vector_data'], {}), '(vector_data)\n', (11394, 11407), False, 'import copy\n'), ((3715, 3733), 'numpy.array', 'np.array', (['sub_path'], {}), '(sub_path)\n', (3723, 3733), True, 'import numpy as np\n'), ((11641, 11667), 'copy.deepcopy', 'copy.deepcopy', (['vector_data'], {}), '(vector_data)\n', (11654, 11667), False, 'import copy\n'), ((12620, 12643), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12641, 12643), False, 'import datetime\n'), ((16486, 16527), 'click.echo', 'click.echo', (['plotter_config.info'], {'err': '(True)'}), '(plotter_config.info, err=True)\n', (16496, 16527), False, 'import click\n'), ((16570, 16609), 'click.echo', 'click.echo', (['paper_config.info'], {'err': '(True)'}), '(paper_config.info, err=True)\n', (16580, 16609), False, 'import click\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import CCA
from sklearn.metrics import confusion_matrix
import functools
def find_correlation_cca_method1(signal, reference_signals, n_components=2):
r"""
Perform canonical correlation analysis (CCA)
Reference: https://github.com/aaravindravi/Brain-computer-interfaces/blob/master/notebook_12_class_cca.ipynb
Args:
signal : ndarray, shape (channel,time)
Input signal in time domain
reference_signals : ndarray, shape (len(flick_freq),2*num_harmonics,time)
Required sinusoidal reference templates corresponding to the flicker frequency for SSVEP classification
n_components : int, default: 2
number of components to keep (for sklearn.cross_decomposition.CCA)
Returns:
result : array, size: len(flick_freq)
Probability for each reference signals
Dependencies:
CCA : sklearn.cross_decomposition.CCA
np : numpy package
"""
cca = CCA(n_components)
corr = np.zeros(n_components)
result = np.zeros(reference_signals.shape[0])
for freq_idx in range(0, reference_signals.shape[0]):
cca_x = signal.T
cca_y = np.squeeze(reference_signals[freq_idx, :, :]).T
cca.fit(cca_x, cca_y)
a, b = cca.transform(cca_x, cca_y)
for ind_val in range(0, n_components):
corr[ind_val] = np.corrcoef(a[:, ind_val], b[:, ind_val])[0, 1]
result[freq_idx] = np.max(corr)
return result
def calculate_cca(dat_x, dat_y, time_axis=-2):
r"""
Calculate the Canonical Correlation Analysis (CCA).
This method calculates the canonical correlation coefficient and
corresponding weights which maximize a correlation coefficient
between linear combinations of the two specified multivariable
signals.
Reference: https://github.com/venthur/wyrm/blob/master/wyrm/processing.py
Reference: http://en.wikipedia.org/wiki/Canonical_correlation
Args:
dat_x : continuous Data object
these data should have the same length on the time axis.
dat_y : continuous Data object
these data should have the same length on the time axis.
time_axis : int, optional
the index of the time axis in ``dat_x`` and ``dat_y``.
Returns:
rho : float
the canonical correlation coefficient.
w_x, w_y : 1d array
the weights for mapping from the specified multivariable signals
to canonical variables.
Raises:
AssertionError :
If:
* ``dat_x`` and ``dat_y`` is not continuous Data object
* the length of ``dat_x`` and ``dat_y`` is different on the
``time_axis``
Dependencies:
functools : functools package
np : numpy package
"""
assert (len(dat_x.data.shape) == len(dat_y.data.shape) == 2 and
dat_x.data.shape[time_axis] == dat_y.data.shape[time_axis])
if time_axis == 0 or time_axis == -2:
x = dat_x.copy()
y = dat_y.copy()
else:
x = dat_x.T.copy()
y = dat_y.T.copy()
# calculate covariances and it's inverses
x -= x.mean(axis=0)
y -= y.mean(axis=0)
n = x.shape[0]
c_xx = np.dot(x.T, x) / n
c_yy = np.dot(y.T, y) / n
c_xy = np.dot(x.T, y) / n
c_yx = np.dot(y.T, x) / n
ic_xx = np.linalg.pinv(c_xx)
ic_yy = np.linalg.pinv(c_yy)
# calculate w_x
w, v = np.linalg.eig(functools.reduce(np.dot, [ic_xx, c_xy, ic_yy, c_yx]))
w_x = v[:, np.argmax(w)].real
w_x = w_x / np.sqrt(functools.reduce(np.dot, [w_x.T, c_xx, w_x]))
# calculate w_y
w, v = np.linalg.eig(functools.reduce(np.dot, [ic_yy, c_yx, ic_xx, c_xy]))
w_y = v[:, np.argmax(w)].real
w_y = w_y / np.sqrt(functools.reduce(np.dot, [w_y.T, c_yy, w_y]))
# calculate rho
rho = abs(functools.reduce(np.dot, [w_x.T, c_xy, w_y]))
return rho, w_x, w_y
def find_correlation_cca_method2(signal, reference_signals):
r"""
Perform canonical correlation analysis (CCA)
Args:
signal : ndarray, shape (channel,time)
Input signal in time domain
reference_signals : ndarray, shape (len(flick_freq),2*num_harmonics,time)
Required sinusoidal reference templates corresponding to the flicker frequency for SSVEP classification
Returns:
result : array, size: len(flick_freq)
Probability for each reference signals
Dependencies:
np : numpy package
calculate_cca : function
"""
result = np.zeros(reference_signals.shape[0])
for freq_idx in range(0, reference_signals.shape[0]):
dat_y = np.squeeze(reference_signals[freq_idx, :, :]).T
rho, w_x, w_y = calculate_cca(signal.T, dat_y)
result[freq_idx] = rho
return result
def perform_cca(signal, reference_frequencies, labels=None):
r"""
Perform canonical correlation analysis (CCA)
Args:
signal : ndarray, shape (trial,channel,time) or (trial,channel,segment,time)
Input signal in time domain
reference_frequencies : ndarray, shape (len(flick_freq),2*num_harmonics,time)
Required sinusoidal reference templates corresponding to the flicker frequency for SSVEP classification
labels : ndarray shape (classes,)
True labels of `signal`. Index of the classes must be match the sequence of `reference_frequencies`
Returns:
predicted_class : ndarray, size: (classes,)
Predicted classes according to reference_frequencies
accuracy : double
If `labels` are given, `accuracy` denote classification accuracy
Dependencies:
confusion_matrix : sklearn.metrics.confusion_matrix
find_correlation_cca_method1 : function
find_correlation_cca_method2 : function
"""
assert (len(signal.shape) == 3 or len(signal.shape) == 4), "signal shape must be 3 or 4 dimension"
actual_class = []
predicted_class = []
accuracy = None
for trial in range(0, signal.shape[0]):
if len(signal.shape) == 3:
if labels is not None:
actual_class.append(labels[trial])
tmp_signal = signal[trial, :, :]
result = find_correlation_cca_method2(tmp_signal, reference_frequencies)
predicted_class.append(np.argmax(result))
if len(signal.shape) == 4:
for segment in range(0, signal.shape[2]):
if labels is not None:
actual_class.append(labels[trial])
tmp_signal = signal[trial, :, segment, :]
result = find_correlation_cca_method2(tmp_signal, reference_frequencies)
predicted_class.append(np.argmax(result))
actual_class = np.array(actual_class)
predicted_class = np.array(predicted_class)
if labels is not None:
# creating a confusion matrix of true versus predicted classification labels
c_mat = confusion_matrix(actual_class, predicted_class)
# computing the accuracy from the confusion matrix
accuracy = np.divide(np.trace(c_mat), np.sum(np.sum(c_mat)))
return predicted_class, accuracy
| [
"numpy.trace",
"numpy.linalg.pinv",
"sklearn.cross_decomposition.CCA",
"functools.reduce",
"numpy.corrcoef",
"numpy.argmax",
"numpy.max",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.sum",
"sklearn.metrics.confusion_matrix"
] | [((1041, 1058), 'sklearn.cross_decomposition.CCA', 'CCA', (['n_components'], {}), '(n_components)\n', (1044, 1058), False, 'from sklearn.cross_decomposition import CCA\n'), ((1070, 1092), 'numpy.zeros', 'np.zeros', (['n_components'], {}), '(n_components)\n', (1078, 1092), True, 'import numpy as np\n'), ((1106, 1142), 'numpy.zeros', 'np.zeros', (['reference_signals.shape[0]'], {}), '(reference_signals.shape[0])\n', (1114, 1142), True, 'import numpy as np\n'), ((3439, 3459), 'numpy.linalg.pinv', 'np.linalg.pinv', (['c_xx'], {}), '(c_xx)\n', (3453, 3459), True, 'import numpy as np\n'), ((3472, 3492), 'numpy.linalg.pinv', 'np.linalg.pinv', (['c_yy'], {}), '(c_yy)\n', (3486, 3492), True, 'import numpy as np\n'), ((4631, 4667), 'numpy.zeros', 'np.zeros', (['reference_signals.shape[0]'], {}), '(reference_signals.shape[0])\n', (4639, 4667), True, 'import numpy as np\n'), ((6857, 6879), 'numpy.array', 'np.array', (['actual_class'], {}), '(actual_class)\n', (6865, 6879), True, 'import numpy as np\n'), ((6902, 6927), 'numpy.array', 'np.array', (['predicted_class'], {}), '(predicted_class)\n', (6910, 6927), True, 'import numpy as np\n'), ((1513, 1525), 'numpy.max', 'np.max', (['corr'], {}), '(corr)\n', (1519, 1525), True, 'import numpy as np\n'), ((3318, 3332), 'numpy.dot', 'np.dot', (['x.T', 'x'], {}), '(x.T, x)\n', (3324, 3332), True, 'import numpy as np\n'), ((3348, 3362), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (3354, 3362), True, 'import numpy as np\n'), ((3378, 3392), 'numpy.dot', 'np.dot', (['x.T', 'y'], {}), '(x.T, y)\n', (3384, 3392), True, 'import numpy as np\n'), ((3408, 3422), 'numpy.dot', 'np.dot', (['y.T', 'x'], {}), '(y.T, x)\n', (3414, 3422), True, 'import numpy as np\n'), ((3538, 3590), 'functools.reduce', 'functools.reduce', (['np.dot', '[ic_xx, c_xy, ic_yy, c_yx]'], {}), '(np.dot, [ic_xx, c_xy, ic_yy, c_yx])\n', (3554, 3590), False, 'import functools\n'), ((3741, 3793), 'functools.reduce', 'functools.reduce', (['np.dot', '[ic_yy, c_yx, ic_xx, c_xy]'], {}), '(np.dot, [ic_yy, c_yx, ic_xx, c_xy])\n', (3757, 3793), False, 'import functools\n'), ((3933, 3977), 'functools.reduce', 'functools.reduce', (['np.dot', '[w_x.T, c_xy, w_y]'], {}), '(np.dot, [w_x.T, c_xy, w_y])\n', (3949, 3977), False, 'import functools\n'), ((7057, 7104), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['actual_class', 'predicted_class'], {}), '(actual_class, predicted_class)\n', (7073, 7104), False, 'from sklearn.metrics import confusion_matrix\n'), ((1242, 1287), 'numpy.squeeze', 'np.squeeze', (['reference_signals[freq_idx, :, :]'], {}), '(reference_signals[freq_idx, :, :])\n', (1252, 1287), True, 'import numpy as np\n'), ((3650, 3694), 'functools.reduce', 'functools.reduce', (['np.dot', '[w_x.T, c_xx, w_x]'], {}), '(np.dot, [w_x.T, c_xx, w_x])\n', (3666, 3694), False, 'import functools\n'), ((3853, 3897), 'functools.reduce', 'functools.reduce', (['np.dot', '[w_y.T, c_yy, w_y]'], {}), '(np.dot, [w_y.T, c_yy, w_y])\n', (3869, 3897), False, 'import functools\n'), ((4742, 4787), 'numpy.squeeze', 'np.squeeze', (['reference_signals[freq_idx, :, :]'], {}), '(reference_signals[freq_idx, :, :])\n', (4752, 4787), True, 'import numpy as np\n'), ((7193, 7208), 'numpy.trace', 'np.trace', (['c_mat'], {}), '(c_mat)\n', (7201, 7208), True, 'import numpy as np\n'), ((1438, 1479), 'numpy.corrcoef', 'np.corrcoef', (['a[:, ind_val]', 'b[:, ind_val]'], {}), '(a[:, ind_val], b[:, ind_val])\n', (1449, 1479), True, 'import numpy as np\n'), ((3607, 3619), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (3616, 3619), True, 'import numpy as np\n'), ((3810, 3822), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (3819, 3822), True, 'import numpy as np\n'), ((6427, 6444), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (6436, 6444), True, 'import numpy as np\n'), ((7217, 7230), 'numpy.sum', 'np.sum', (['c_mat'], {}), '(c_mat)\n', (7223, 7230), True, 'import numpy as np\n'), ((6818, 6835), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (6827, 6835), True, 'import numpy as np\n')] |
from base.base_test import BaseTest
from tqdm import tqdm
import numpy as np
from utils.utils_plotting import *
import cv2
import gc
def accuracy(a, b):
c = np.equal(a, b).astype(float)
acc = sum(c) / len(c)
return acc
def print_alphas_conv(alp):
for i in range(np.shape(alp)[1]):
curr_im = alp[:,i,:,:,:]
def normalize_im(im):
im_new = im.astype(np.float64)
minimum = im_new.min()
maximum = im_new.max()
im_norm = 255 * (im_new - minimum) / (maximum - minimum)
im_norm = im_norm.astype(np.uint8)
return im_norm
class ExampleTesterPlotAttention(BaseTest):
def __init__(self, sess, model, data_test, config, logger):
super(ExampleTesterPlotAttention, self).__init__(sess, model, config, logger)
self.data_test = data_test
# calculate number of training and validation steps per epochs
self.num_iter_data = data_test.len_lines // self.config.batch_size
def test(self):
losses_val = []
accs_add_val = []
accs_mul_val = []
predictions_add_val = []
predictions_mul_val = []
gt_classes_val = []
loop_test = tqdm(range(self.num_iter_data))
# iterate over steps (batches)
for _ in loop_test:
accu_add, accu_mul, loss, predictions_add, predictions_mul, gt_classes, a = self.test_step()
losses_val.append(loss)
accs_add_val.append(accu_add)
accs_mul_val.append(accu_mul)
# collect also the actual predictions to create confusion matrix
predictions_add_val = np.append(predictions_add_val, predictions_add)
predictions_mul_val = np.append(predictions_mul_val, predictions_mul)
gt_classes_val = np.append(gt_classes_val, gt_classes)
loss_val_epoch = np.mean(losses_val)
accs_add_val_epoch = np.mean(accs_add_val)
accs_mul_val_epoch = np.mean(accs_mul_val)
cur_it = self.model.global_step_tensor.eval(self.sess)
summaries_dict = {
'loss_validation': loss_val_epoch,
'accuracy_add_validation': accs_add_val_epoch,
'accuracy_multiply_validation': accs_mul_val_epoch
}
self.logger.summarize(cur_it, summaries_dict=summaries_dict)
labels = sorted(self.data_test.label_dict, key=self.data_test.label_dict.get)
self.logger.confusion_mat(cur_it, labels, [self.data_test.label_dict_inv[int(i)] for i in gt_classes_val],
[self.data_test.label_dict_inv[int(i)] for i in predictions_add_val],
[self.data_test.label_dict_inv[int(i)] for i in predictions_mul_val], 'test')
print_alphas_conv(a)
def test_step(self):
prob = 1.0
batch_frames, batch_labels, _ = self.data_test.next_batch()
feed_dict = {
self.model.is_training: False,
self.model.input_img: batch_frames,
self.model.ys: batch_labels,
self.model.prob: prob
}
fc_score, conv_score, loss, a, a_fc, im_outputs, temp_atten_out, w2, b2, conv_img_out, conv_img_drop = \
self.sess.run([self.model.fc_pred,
self.model.conv_pred,
self.model.loss,
self.model.alphas,
self.model.alphas_fc,
self.model.im_outputs,
self.model.temp_attention,
self.model.weights2,
self.model.biases2,
self.model.conv_img_out,
self.model.conv_img_drop], feed_dict)
#todo: remove
plot_input_images(batch_frames, 'images')
plot_attention(temp_atten_out, 'atten')
plot_h_abs(im_outputs)
# calc accuracy of the batch
fc_score = np.reshape(np.array(fc_score), (self.config.batch_size, self.config.n_classes)) # (batch_size, n_classes)
conv_score = np.reshape(np.array(conv_score), (self.config.batch_size, self.config.n_classes))
gt_classes = np.nonzero(batch_labels)[1]
# fusion by addition
fus_add = np.add(fc_score, conv_score)
predictions_add = np.argmax(fus_add, axis=1)
accu_add = accuracy(predictions_add, gt_classes)
# fusion by multiplication
fus_mul = np.multiply(fc_score, conv_score)
predictions_mul = np.argmax(fus_mul, axis=1)
accu_mul = accuracy(predictions_mul, gt_classes)
return accu_add, accu_mul, loss, predictions_add, predictions_mul, gt_classes, a
| [
"numpy.mean",
"numpy.multiply",
"numpy.add",
"numpy.argmax",
"numpy.equal",
"numpy.append",
"numpy.array",
"numpy.nonzero",
"numpy.shape"
] | [((1821, 1840), 'numpy.mean', 'np.mean', (['losses_val'], {}), '(losses_val)\n', (1828, 1840), True, 'import numpy as np\n'), ((1870, 1891), 'numpy.mean', 'np.mean', (['accs_add_val'], {}), '(accs_add_val)\n', (1877, 1891), True, 'import numpy as np\n'), ((1921, 1942), 'numpy.mean', 'np.mean', (['accs_mul_val'], {}), '(accs_mul_val)\n', (1928, 1942), True, 'import numpy as np\n'), ((4868, 4896), 'numpy.add', 'np.add', (['fc_score', 'conv_score'], {}), '(fc_score, conv_score)\n', (4874, 4896), True, 'import numpy as np\n'), ((4923, 4949), 'numpy.argmax', 'np.argmax', (['fus_add'], {'axis': '(1)'}), '(fus_add, axis=1)\n', (4932, 4949), True, 'import numpy as np\n'), ((5061, 5094), 'numpy.multiply', 'np.multiply', (['fc_score', 'conv_score'], {}), '(fc_score, conv_score)\n', (5072, 5094), True, 'import numpy as np\n'), ((5121, 5147), 'numpy.argmax', 'np.argmax', (['fus_mul'], {'axis': '(1)'}), '(fus_mul, axis=1)\n', (5130, 5147), True, 'import numpy as np\n'), ((163, 177), 'numpy.equal', 'np.equal', (['a', 'b'], {}), '(a, b)\n', (171, 177), True, 'import numpy as np\n'), ((281, 294), 'numpy.shape', 'np.shape', (['alp'], {}), '(alp)\n', (289, 294), True, 'import numpy as np\n'), ((1598, 1645), 'numpy.append', 'np.append', (['predictions_add_val', 'predictions_add'], {}), '(predictions_add_val, predictions_add)\n', (1607, 1645), True, 'import numpy as np\n'), ((1680, 1727), 'numpy.append', 'np.append', (['predictions_mul_val', 'predictions_mul'], {}), '(predictions_mul_val, predictions_mul)\n', (1689, 1727), True, 'import numpy as np\n'), ((1757, 1794), 'numpy.append', 'np.append', (['gt_classes_val', 'gt_classes'], {}), '(gt_classes_val, gt_classes)\n', (1766, 1794), True, 'import numpy as np\n'), ((4571, 4589), 'numpy.array', 'np.array', (['fc_score'], {}), '(fc_score)\n', (4579, 4589), True, 'import numpy as np\n'), ((4699, 4719), 'numpy.array', 'np.array', (['conv_score'], {}), '(conv_score)\n', (4707, 4719), True, 'import numpy as np\n'), ((4792, 4816), 'numpy.nonzero', 'np.nonzero', (['batch_labels'], {}), '(batch_labels)\n', (4802, 4816), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .base_representation import BaseRepresentation
from .pose_estimation_representation import PoseEstimationRepresentation
class PoseEstimation3dRepresentation(BaseRepresentation):
def __init__(self, identifier='', x_values=None, y_values=None, visibility=None, labels=None,
x_3d_values=None, y_3d_values=None, z_3d_values=None, fx=None):
super().__init__(identifier)
self.pose_2d = PoseEstimationRepresentation(identifier, x_values, y_values, visibility, labels)
self.x_3d_values = x_3d_values if np.size(x_3d_values) > 0 else np.array([])
self.y_3d_values = y_3d_values if np.size(y_3d_values) > 0 else np.array([])
self.z_3d_values = z_3d_values if np.size(z_3d_values) > 0 else np.array([])
self.fx = fx
@property
def bboxes(self):
if self.size == 0:
return []
x_mins, y_mins, x_maxs, y_maxs = [], [], [], []
for box_id in range(self.pose_2d.x_values.shape[0]):
x_mins.append(np.min(self.pose_2d.x_values[box_id][self.pose_2d.visibility[box_id] > 0]))
x_maxs.append(np.max(self.pose_2d.x_values[box_id][self.pose_2d.visibility[box_id] > 0]))
y_mins.append(np.min(self.pose_2d.y_values[box_id][self.pose_2d.visibility[box_id] > 0]))
y_maxs.append(np.max(self.pose_2d.y_values[box_id][self.pose_2d.visibility[box_id] > 0]))
return [[x_min, y_min, x_max, y_max] for x_min, y_min, x_max, y_max in zip(x_mins, y_mins, x_maxs, y_maxs)]
@property
def size(self):
return len(self.pose_2d.x_values)
class PoseEstimation3dAnnotation(PoseEstimation3dRepresentation):
pass
class PoseEstimation3dPrediction(PoseEstimation3dRepresentation):
def __init__(self, identifier='', x_values=None, y_values=None, visibility=None, scores=None,
x_3d_values=None, y_3d_values=None, z_3d_values=None, labels=None, translations=None):
super().__init__(identifier, x_values, y_values, visibility, labels, x_3d_values, y_3d_values, z_3d_values)
self.scores = scores if scores is not None and np.size(scores) else np.array([])
self.translations = translations if translations is not None and np.size(translations) else np.array([])
| [
"numpy.max",
"numpy.array",
"numpy.size",
"numpy.min"
] | [((1174, 1186), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1182, 1186), True, 'import numpy as np\n'), ((1259, 1271), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1267, 1271), True, 'import numpy as np\n'), ((1344, 1356), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1352, 1356), True, 'import numpy as np\n'), ((2721, 2733), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2729, 2733), True, 'import numpy as np\n'), ((2834, 2846), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2842, 2846), True, 'import numpy as np\n'), ((1144, 1164), 'numpy.size', 'np.size', (['x_3d_values'], {}), '(x_3d_values)\n', (1151, 1164), True, 'import numpy as np\n'), ((1229, 1249), 'numpy.size', 'np.size', (['y_3d_values'], {}), '(y_3d_values)\n', (1236, 1249), True, 'import numpy as np\n'), ((1314, 1334), 'numpy.size', 'np.size', (['z_3d_values'], {}), '(z_3d_values)\n', (1321, 1334), True, 'import numpy as np\n'), ((1607, 1681), 'numpy.min', 'np.min', (['self.pose_2d.x_values[box_id][self.pose_2d.visibility[box_id] > 0]'], {}), '(self.pose_2d.x_values[box_id][self.pose_2d.visibility[box_id] > 0])\n', (1613, 1681), True, 'import numpy as np\n'), ((1709, 1783), 'numpy.max', 'np.max', (['self.pose_2d.x_values[box_id][self.pose_2d.visibility[box_id] > 0]'], {}), '(self.pose_2d.x_values[box_id][self.pose_2d.visibility[box_id] > 0])\n', (1715, 1783), True, 'import numpy as np\n'), ((1811, 1885), 'numpy.min', 'np.min', (['self.pose_2d.y_values[box_id][self.pose_2d.visibility[box_id] > 0]'], {}), '(self.pose_2d.y_values[box_id][self.pose_2d.visibility[box_id] > 0])\n', (1817, 1885), True, 'import numpy as np\n'), ((1913, 1987), 'numpy.max', 'np.max', (['self.pose_2d.y_values[box_id][self.pose_2d.visibility[box_id] > 0]'], {}), '(self.pose_2d.y_values[box_id][self.pose_2d.visibility[box_id] > 0])\n', (1919, 1987), True, 'import numpy as np\n'), ((2700, 2715), 'numpy.size', 'np.size', (['scores'], {}), '(scores)\n', (2707, 2715), True, 'import numpy as np\n'), ((2807, 2828), 'numpy.size', 'np.size', (['translations'], {}), '(translations)\n', (2814, 2828), True, 'import numpy as np\n')] |
from pathlib import Path
from astropy.io import fits
import numpy as np
import pickle
#calibration io
def save_image(data, imname):
hdu = fits.PrimaryHDU(data)
hdu.writeto(imname, overwrite = True)
return None
def load_calib_img(calib_dir, img_number, style = 'wirc', img_type = ''):
fname = get_img_name(calib_dir, img_number, style = style,
img_type = img_type)
with fits.open(fname) as hdul:
data = hdul[0].data
return data
def save_multicomponent_frame(mcf, dump_dir):
frame = pickle.dump(mcf, open(
dump_dir + 'multicomponent_frame.p', 'wb'))
return frame
def load_multicomponent_frame(dump_dir):
mcf = pickle.load(open(dump_dir + 'multicomponent_frame.p', 'rb'))
return mcf
def load_calib_files(flat, dark, bp, hp, nonlinearity_fname = None):
with fits.open(flat) as hdul:
flat = hdul[0].data
with fits.open(dark) as hdul:
dark = hdul[0].data
with fits.open(bp) as hdul:
bp = np.array(hdul[0].data, dtype = 'bool')
with fits.open(hp) as hdul:
hp = np.array(hdul[0].data, dtype = 'bool')
if nonlinearity_fname is not None:
with fits.open(nonlinearity_fname) as hdu:
nonlinearity_array = hdu[1].data
correct_nonlinearity = True
else:
nonlinearity_array = None
correct_nonlinearity = False
return flat, dark, bp, hp, nonlinearity_array, correct_nonlinearity
def get_science_img_list(science_ranges):
to_extract = np.array([])
for seq in science_ranges:
range_i = np.arange(seq[0], seq[1] + 1,
dtype = int)
to_extract = np.append(to_extract, range_i)
return np.array(to_extract, dtype = int)
def load_bkgs(dump_dir):
fname = dump_dir + 'bkgs.p'
return pickle.load(open(fname, 'rb'))
##directories
def init_phot_dirs(dump_dir, img_dir, rads):
dump_dir_phot = dump_dir + 'phot/'
Path(dump_dir_phot).mkdir(exist_ok = True)
for rad in rads:
dump_dir_temp = dump_dir + 'phot/' + str(rad) + '/'
Path(dump_dir_temp).mkdir(exist_ok = True)
return dump_dir_phot
def init_output_direcs(path, test_name):
"""Initializes all output directories"""
calib_dir = path + 'calibrated_' + test_name + '/'
dump_dir = path + 'dump_files_' + test_name + '/'
img_dir = path + 'image_files_' + test_name + '/'
Path(calib_dir).mkdir(exist_ok = True)
Path(dump_dir).mkdir(exist_ok = True)
Path(img_dir).mkdir(exist_ok = True)
print("OUTPUT DIRECTORIES INITIALIZED")
return calib_dir, dump_dir, img_dir
##filenames
def get_img_name(direc, number, style = 'wirc', img_type = ''):
"""Gets the image name in WIRC convention
This function will take an image directory and number and
gives the image name.
Args:
number (int): The image number.
direc (str): The directory in which the image is stored.
style (str): Either 'wirc' or 'image' to precede the number.
img_type (str): For instance 'master_dark', etc.
Returns:
str: The correct path to the required file.
"""
num = str(number)
num_zeros = 4 - len(num)
if img_type != '':
img_type = '_' + img_type
return direc + style + '0'*num_zeros + num + img_type + '.fits'
def get_bkg_file_name(direc, bkg_num, style = 'wirc'):
return get_img_name(direc, bkg_num, style = style,
img_type = 'calibrated_background')
def get_calib_file_names(direc, dark_num, flat_num, style = 'wirc'):
bp = get_img_name(direc, flat_num, style = style, img_type = 'bp_map')
hp = get_img_name(direc, dark_num, style = style, img_type = 'hp_map')
dark = get_img_name(direc, dark_num, style = style,
img_type = 'combined_dark')
flat = get_img_name(direc, flat_num, style = style,
img_type = 'combined_flat')
return bp, hp, dark, flat
def load_phot_data(dump_dir, aperture):
phot_dir = f'phot/{aperture}/'
x = pickle.load(open(dump_dir + 'bjd.p', 'rb'))
ys = pickle.load(open(dump_dir + phot_dir + 'raw_phot.p', 'rb'))
yerrs = pickle.load(open(dump_dir + phot_dir + 'errs.p', 'rb'))
yerrs /= ys
temp = ys.T/np.median(ys, axis = 1)
ys = temp.T
bkgs = pickle.load(open(dump_dir + 'bkgs.p', 'rb'))
centroid_x = pickle.load(open(dump_dir + phot_dir + 'xpos.p', 'rb'))
centroid_y = pickle.load(open(dump_dir + phot_dir + 'ypos.p', 'rb'))
airmass = pickle.load(open(dump_dir + 'AIRMASS.p', 'rb'))
widths = pickle.load(open(dump_dir + phot_dir + 'widths.p', 'rb'))
return x, ys, yerrs, bkgs, centroid_x, centroid_y, \
airmass, widths
def save_phot_data(dump_dir, xpos, ypos, widths, raw_phot, errs, tag = ''):
if tag != '':
tag = '_' + tag
save_files = ['xpos', 'ypos', 'widths', 'raw_phot', 'errs']
save_files = [dump_dir + fname + tag + '.p' for fname in
save_files]
pickle.dump(xpos, open(save_files[0], 'wb'))
pickle.dump(ypos, open(save_files[1], 'wb'))
pickle.dump(widths, open(save_files[2], 'wb'))
pickle.dump(raw_phot, open(save_files[3], 'wb'))
pickle.dump(errs, open(save_files[4], 'wb'))
return save_files
def save_covariates(dump_dir, covariate_dict):
for key in covariate_dict.keys():
fname = f'{dump_dir}{key}.p'
pickle.dump(np.array(covariate_dict[key]), open(fname, 'wb'))
return None
| [
"numpy.median",
"astropy.io.fits.PrimaryHDU",
"pathlib.Path",
"numpy.append",
"numpy.array",
"astropy.io.fits.open",
"numpy.arange"
] | [((141, 162), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['data'], {}), '(data)\n', (156, 162), False, 'from astropy.io import fits\n'), ((1382, 1394), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1390, 1394), True, 'import numpy as np\n'), ((1535, 1566), 'numpy.array', 'np.array', (['to_extract'], {'dtype': 'int'}), '(to_extract, dtype=int)\n', (1543, 1566), True, 'import numpy as np\n'), ((379, 395), 'astropy.io.fits.open', 'fits.open', (['fname'], {}), '(fname)\n', (388, 395), False, 'from astropy.io import fits\n'), ((791, 806), 'astropy.io.fits.open', 'fits.open', (['flat'], {}), '(flat)\n', (800, 806), False, 'from astropy.io import fits\n'), ((844, 859), 'astropy.io.fits.open', 'fits.open', (['dark'], {}), '(dark)\n', (853, 859), False, 'from astropy.io import fits\n'), ((897, 910), 'astropy.io.fits.open', 'fits.open', (['bp'], {}), '(bp)\n', (906, 910), False, 'from astropy.io import fits\n'), ((927, 963), 'numpy.array', 'np.array', (['hdul[0].data'], {'dtype': '"""bool"""'}), "(hdul[0].data, dtype='bool')\n", (935, 963), True, 'import numpy as np\n'), ((972, 985), 'astropy.io.fits.open', 'fits.open', (['hp'], {}), '(hp)\n', (981, 985), False, 'from astropy.io import fits\n'), ((1002, 1038), 'numpy.array', 'np.array', (['hdul[0].data'], {'dtype': '"""bool"""'}), "(hdul[0].data, dtype='bool')\n", (1010, 1038), True, 'import numpy as np\n'), ((1435, 1475), 'numpy.arange', 'np.arange', (['seq[0]', '(seq[1] + 1)'], {'dtype': 'int'}), '(seq[0], seq[1] + 1, dtype=int)\n', (1444, 1475), True, 'import numpy as np\n'), ((1496, 1526), 'numpy.append', 'np.append', (['to_extract', 'range_i'], {}), '(to_extract, range_i)\n', (1505, 1526), True, 'import numpy as np\n'), ((3848, 3869), 'numpy.median', 'np.median', (['ys'], {'axis': '(1)'}), '(ys, axis=1)\n', (3857, 3869), True, 'import numpy as np\n'), ((1085, 1114), 'astropy.io.fits.open', 'fits.open', (['nonlinearity_fname'], {}), '(nonlinearity_fname)\n', (1094, 1114), False, 'from astropy.io import fits\n'), ((1760, 1779), 'pathlib.Path', 'Path', (['dump_dir_phot'], {}), '(dump_dir_phot)\n', (1764, 1779), False, 'from pathlib import Path\n'), ((2184, 2199), 'pathlib.Path', 'Path', (['calib_dir'], {}), '(calib_dir)\n', (2188, 2199), False, 'from pathlib import Path\n'), ((2224, 2238), 'pathlib.Path', 'Path', (['dump_dir'], {}), '(dump_dir)\n', (2228, 2238), False, 'from pathlib import Path\n'), ((2263, 2276), 'pathlib.Path', 'Path', (['img_dir'], {}), '(img_dir)\n', (2267, 2276), False, 'from pathlib import Path\n'), ((4922, 4951), 'numpy.array', 'np.array', (['covariate_dict[key]'], {}), '(covariate_dict[key])\n', (4930, 4951), True, 'import numpy as np\n'), ((1877, 1896), 'pathlib.Path', 'Path', (['dump_dir_temp'], {}), '(dump_dir_temp)\n', (1881, 1896), False, 'from pathlib import Path\n')] |
import numpy as np
import random
random.seed(2301)
np.random.seed(795118)
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
class Agent():
"""
Class that models a reinforcement learning agent.
"""
def __init__(self, number_of_algorithms, epsilon=0.2, alpha=0.01, gamma=0.7):
self.n_actions = number_of_algorithms
# 20 algoritmi
self.time_portions = [0.01, 0.0127, 0.0162, 0.0206, 0.0263, 0.0335, 0.0428, 0.0545, 0.0695, 0.0885, 0.1128, 0.1438, 0.1832, 0.2335, 0.2976, 0.3792, 0.4, 0.4832, 0.5, 0.6158, 0.7, 0.7847, 0.85, 0.9, 0.97, 1.02]
self.time_portions = [self.time_portions[i]-0.01 for i in range(len(self.time_portions))]
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
#self.Q = np.random.rand(len(self.time_portions)-1, self.n_actions)
def reset(self, dataset_meta_features, algorithms_meta_features):
"""
Reset the agents' memory for a new dataset
Parameters
----------
dataset_meta_features : dict of {str : str}
The meta-features of the dataset at hand, including:
'usage' : name of the competition
'name' : name of the dataset
'task' : type of the task
'target_type' : target type
'feat_type' : feature type
'metric' : evaluatuon metric used
'time_budget' : time budget for training and testing
'feat_num' : number of features
'target_num' : number of targets
'label_num' : number of labels
'train_num' : number of training examples
'valid_num' : number of validation examples
'test_num' : number of test examples
'has_categorical' : presence or absence of categorical variables
'has_missing' : presence or absence of missing values
'is_sparse' : full matrices or sparse matrices
algorithms_meta_features : dict of dict of {str : str}
The meta_features of all algorithms
Examples
----------
>>> dataset_meta_features
{'usage': 'Meta-learningchallenge2022', 'name': 'Erik', 'task': 'regression',
'target_type': 'Binary', 'feat_type': 'Mixed', 'metric': 'f1_metric',
'time_budget': '600', 'feat_num': '9', 'target_num': '6', 'label_num': '10',
'train_num': '17', 'valid_num': '87', 'test_num': '72', 'has_categorical': '1',
'has_missing': '0', 'is_sparse': '1'}
>>> algorithms_meta_features
{'0': {'meta_feature_0': '0', 'meta_feature_1': '0.1'},
'1': {'meta_feature_0': '1', 'meta_feature_1': '0.2'},
'2': {'meta_feature_0': '0', 'meta_feature_1': '0.3'},
'3': {'meta_feature_0': '1', 'meta_feature_1': '0.4'},
...
'18': {'meta_feature_0': '1', 'meta_feature_1': '0.9'},
'19': {'meta_feature_0': '0', 'meta_feature_1': '1.0'},
}
"""
self.dataset_metadata = dataset_meta_features
self.algorithms_metadata = algorithms_meta_features
self.validation_last_scores = [0.0 for i in range(self.n_actions)]
self.validation_time_seen = [0.0 for i in range(self.n_actions)]
#self.Q = np.random.rand(len(self.time_portions) - 1, self.n_actions)
self.time_used = 1
self.time_budget = float(dataset_meta_features['time_budget'])
if self.time_budget not in self.time_budgets_state:
distances = [abs(self.time_budget-self.time_budgets_state[i]) for i in range(len(self.time_budgets_state))]
self.time_budget_position = np.argmin(distances)
else:
self.time_budget_position = self.time_budgets_state.index(float(self.time_budget))
ds_features = np.array(self._ds_to_vec(dataset_meta_features, self.ordered_features)).reshape(1, -1)
ds_features = self.scaler.transform(ds_features)
self.cluster_label = self.cluster.predict(ds_features)
def reset_for_train(self, dataset_meta_features, algorithms_meta_features):
"""
Reset the agents' memory for a new dataset
Parameters
----------
dataset_meta_features : dict of {str : str}
The meta-features of the dataset at hand, including:
'usage' : name of the competition
'name' : name of the dataset
'task' : type of the task
'target_type' : target type
'feat_type' : feature type
'metric' : evaluatuon metric used
'time_budget' : time budget for training and testing
'feat_num' : number of features
'target_num' : number of targets
'label_num' : number of labels
'train_num' : number of training examples
'valid_num' : number of validation examples
'test_num' : number of test examples
'has_categorical' : presence or absence of categorical variables
'has_missing' : presence or absence of missing values
'is_sparse' : full matrices or sparse matrices
algorithms_meta_features : dict of dict of {str : str}
The meta_features of all algorithms
Examples
----------
>>> dataset_meta_features
{'usage': 'Meta-learningchallenge2022', 'name': 'Erik', 'task': 'regression',
'target_type': 'Binary', 'feat_type': 'Mixed', 'metric': 'f1_metric',
'time_budget': '600', 'feat_num': '9', 'target_num': '6', 'label_num': '10',
'train_num': '17', 'valid_num': '87', 'test_num': '72', 'has_categorical': '1',
'has_missing': '0', 'is_sparse': '1'}
>>> algorithms_meta_features
{'0': {'meta_feature_0': '0', 'meta_feature_1': '0.1'},
'1': {'meta_feature_0': '1', 'meta_feature_1': '0.2'},
'2': {'meta_feature_0': '0', 'meta_feature_1': '0.3'},
'3': {'meta_feature_0': '1', 'meta_feature_1': '0.4'},
...
'18': {'meta_feature_0': '1', 'meta_feature_1': '0.9'},
'19': {'meta_feature_0': '0', 'meta_feature_1': '1.0'},
}
"""
self.dataset_metadata = dataset_meta_features
self.algorithms_metadata = algorithms_meta_features
self.validation_last_scores = [0.0 for i in range(self.n_actions)]
self.validation_time_seen = [0.0 for i in range(self.n_actions)]
#self.Q = np.random.rand(len(self.time_portions) - 1, self.n_actions)
self.time_used = 1
self.time_budget = float(dataset_meta_features['time_budget'])
if self.time_budget not in self.time_budgets_state:
distances = [abs(self.time_budget-self.time_budgets_state[i]) for i in range(len(self.time_budgets_state))]
self.time_budget_position = np.argmin(distances)
else:
self.time_budget_position = self.time_budgets_state.index(float(self.time_budget))
def meta_train(self, datasets_meta_features, algorithms_meta_features, validation_learning_curves,
test_learning_curves):
self.train_datasets_ids = [k for k in test_learning_curves][:25]
self.ordered_features = self._ds_ordered(datasets_meta_features[random.choice(self.train_datasets_ids)])
self.cluster = self.kmeans_clustering(datasets_meta_features)
self.cluster_labels = self.cluster.labels_
self.time_budgets_state = []
for ds in datasets_meta_features:
if float(datasets_meta_features[ds]['time_budget']) not in self.time_budgets_state:
self.time_budgets_state.append(float(datasets_meta_features[ds]['time_budget']))
self.Q = np.random.rand(12, len(self.time_portions) - 1, self.n_actions)
maxit = 5000
for iteration in range(maxit):
for idx, episode in enumerate(self.train_datasets_ids):
self.dataset_num = episode
self.counters = {i: 0.0 for i in range(self.n_actions)} # Counters keeping track of the time has been spent for each algorithm
dataset_meta_features = datasets_meta_features[episode]
self.cluster_label = self.cluster_labels[idx]
self.total_time_budget = float(dataset_meta_features['time_budget'])
self.remaining_time_budget = self.total_time_budget
self.list_algorithms = [k for k in test_learning_curves[episode].keys()]
self.reset_for_train(dataset_meta_features, algorithms_meta_features)
#print(
# "\n#===================== Start META-TRAINING on dataset: " + episode + " =====================#")
#print( "\n#---Dataset meta-features = " + str(datasets_meta_features[episode]))
#print( "\n#---Algorithms meta-features = " + str(algorithms_meta_features))
observation = None
for it in range(len(self.time_portions)-1):
# === Get the agent's suggestion
(best_algo, next_algo, delta_t) = self.suggest_for_train(observation)
action = (best_algo, next_algo, delta_t)
self.timestamps = test_learning_curves[episode][self.list_algorithms[next_algo]].timestamps
self.scores = test_learning_curves[episode][self.list_algorithms[next_algo]].scores
R_test_C_A, C_A = self.get_last_point_within_delta_t(delta_t, self.validation_time_seen[next_algo])
self.validation_time_seen[next_algo] = C_A
observation = (next_algo, C_A, R_test_C_A)
# print( "------------------")
# print( "A_star = " + str(action[0]))
# print( "A = " + str(action[1]))
# print( "delta_t = " + str(action[2]))
# print( "remaining_time_budget = " + str((1.01-self.time_portions[self.time_used-1])*self.time_budget))
# print( "observation = " + str(observation))
#print( "[+]Finished META-TRAINING phase")
# #self.reset(datasets_meta_features[episode], algorithms_meta_features)
def suggest_for_train(self, observation):
next_algo_to_reveal = self.get_action_eps_greedy(self.cluster_label, self.time_used-1)
delta_t = (self.time_portions[self.time_used]-self.time_portions[self.time_used-1])*self.time_budget
if observation == None:
best_algo_for_test = None
self.time_used += 1
self.old_score = 0
else:
A, C_A, R_validation_C_A = observation
self.validation_last_scores[A] = R_validation_C_A
self.validation_time_seen[A] = C_A
weight = ((1.01-self.time_portions[self.time_used])*self.time_budget)
reward = (np.max([R_validation_C_A, self.old_score])-self.old_score)
#self.time_used += 1
self.update_Q(old_state=[self.cluster_label, self.time_used - 2], action=A, reward = reward, new_state=[self.cluster_label, self.time_used - 1])
self.time_used += 1
best_algo_for_test = np.argmax(self.validation_last_scores)
self.old_score = np.max([R_validation_C_A, self.old_score])
action = (best_algo_for_test, next_algo_to_reveal, delta_t)
return action
def suggest(self, observation):
next_algo_to_reveal = self.get_action_greedy(self.cluster_label, self.time_used-1)
delta_t = (self.time_portions[self.time_used]-self.time_portions[self.time_used-1])*self.time_budget
if observation == None:
best_algo_for_test = None
self.time_used += 1
self.old_score = 0
else:
A, C_A, R_validation_C_A = observation
self.validation_last_scores[A] = R_validation_C_A
self.validation_time_seen[A] = C_A
weight = ((1.01-self.time_portions[self.time_used])*self.time_budget)
reward = (np.max([R_validation_C_A, self.old_score])-self.old_score)
#self.time_used += 1
#self.update_Q(old_state=[ self.cluster_label, self.time_used - 2], action=A, reward = reward, new_state=[self.cluster_label, self.time_used - 1])
self.time_used += 1
best_algo_for_test = np.argmax(self.validation_last_scores)
self.old_score = np.max([R_validation_C_A, self.old_score])
action = (best_algo_for_test, next_algo_to_reveal, delta_t)
return action
def get_action_eps_greedy(self, r, c):
"""
Epsilon-greedy sampling of next action given the current state.
Parameters
----------
r: int
Current `y` position in the labyrinth
c: int
Current `x` position in the labyrinth
Returns
-------
action: int
Action sampled according to epsilon-greedy policy.
"""
eps = random.random()
if eps < self.epsilon:
return random.randint(0, self.n_actions - 1)
else:
return self.Q[r, c].argmax()
def get_action_greedy(self, r, c):
"""
Greedy sampling of next action given the current state.
Parameters
----------
r: int
Current `y` position in the labyrinth
c: int
Current `x` position in the labyrinth
Returns
-------
action: int
Action sampled according to greedy policy.
"""
return self.Q[r, c].argmax()
def update_Q(self, old_state, action, reward, new_state):
"""
Update action-value function Q
Parameters
----------
old_state: tuple
Previous state of the Environment
action: int
Action performed to go from `old_state` to `new_state`
reward: int
Reward got after action `action`
new_state: tuple
Next state of the Environment
Returns
-------
None
"""
self.Q[old_state[0], old_state[1], action] = \
self.Q[old_state[0], old_state[1], action] + \
self.alpha * (reward + self.gamma * self.Q[new_state[0], new_state[1]].max() - \
self.Q[old_state[0], old_state[1], action])
def get_last_point_within_delta_t(self, delta_t, C):
"""
Return the last achievable point on the learning curve given the allocated time budget delta_t
Parameters
----------
delta_t : float
Allocated time budget given by the agent.
C : float
The timestamp of the last point on the learning curve (x-coordinate of current position on the learning curve)
Returns
----------
score : float
The last achievable score within delta_t
timestamp : float
The timestamp associated with the last achievable score
Examples
----------
>>> lc.get_last_point_within_delta_t(50, 151.73)
score = 0.5
timestamp = 151.73
"""
temp_time = C + delta_t
for i in range(len(self.timestamps)):
if temp_time < self.timestamps[i]:
if i == 0: # if delta_t is not enough to get the first point, the agent wasted it for nothing!
score, timestamp = 0.0, 0.0
else: # return the last achievable point
score, timestamp = self.scores[i - 1], self.timestamps[i - 1]
return score, timestamp
# If the last point on the learning curve is already reached, return it
score, timestamp = self.scores[-1], self.timestamps[-1]
return score, timestamp
def _ds_ordered(self, ds):
self.ordered_features = []
for k in ds.keys():
self.ordered_features.append(k)
return self.ordered_features
def kmeans_dist(self, ds_vec1, ds_vec2):
dist = 0
for i in range(len(ds_vec1)):
if ds_vec1[i] == ds_vec2[i]:
dist +=1
return dist
def _ds_to_vec(self, ds, ordered_features):
conver = {
"usage": None,
"name": None,
"task": {
"binary.classification": '0',
"multiclass.classification": '1',
"multilabel.classification": '2',
"regression": '3'
},
"target_type": {
"Binary": '0',
"Categorical": '1',
"Numerical": '2',
},
"feat_type": {
"Binary": '0',
"Categorical": '1',
"Numerical": '2',
"Mixed": '3',
},
"metric": {
"bac_metric": '0',
"auc_metric": '1',
"f1_metric": '2',
"pac_metric": '3',
"abs_metric": '4',
"r2_metric": '5',
'a_metric': '6',
},
}
dv = []
for keys in ordered_features:
for k, v in ds.items():
cd = conver.get(k, {})
if cd is None:
continue
item = cd.get(v, None)
if item is None:
item = ds[k]
if k == keys:
dv.append(item)
return dv
def _algo_to_vec(self, algo):
algov = []
for k, v in algo.items():
algov.append(algo[k])
return algov
def kmeans_clustering(self, datasets_meta_features):
ds_features = [self._ds_to_vec(datasets_meta_features[i], self.ordered_features) for i in self.train_datasets_ids]
ds_features = np.array(ds_features)
# ds_features = ds_features.astype('float64')
# self.features_mean = []
# for i in range(ds_features.shape[1]):
# mean = np.mean(ds_features[:, i])
# self.features_mean.append(mean)
# for j in range(ds_features.shape[0]):
# if mean!=0:
# ds_features[j, i] = ds_features[j, i]/(mean)
self.scaler = StandardScaler()
# transform data
ds_features = self.scaler.fit_transform(ds_features)
kmeans = KMeans(n_clusters = 12, random_state=0).fit(ds_features)
print('hello')
return kmeans | [
"sklearn.cluster.KMeans",
"random.choice",
"numpy.argmax",
"random.seed",
"numpy.max",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.random.seed",
"numpy.argmin",
"random.random",
"random.randint"
] | [((35, 52), 'random.seed', 'random.seed', (['(2301)'], {}), '(2301)\n', (46, 52), False, 'import random\n'), ((54, 76), 'numpy.random.seed', 'np.random.seed', (['(795118)'], {}), '(795118)\n', (68, 76), True, 'import numpy as np\n'), ((12488, 12503), 'random.random', 'random.random', ([], {}), '()\n', (12501, 12503), False, 'import random\n'), ((16798, 16819), 'numpy.array', 'np.array', (['ds_features'], {}), '(ds_features)\n', (16806, 16819), True, 'import numpy as np\n'), ((17178, 17194), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (17192, 17194), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3739, 3759), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (3748, 3759), True, 'import numpy as np\n'), ((6784, 6804), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (6793, 6804), True, 'import numpy as np\n'), ((10815, 10853), 'numpy.argmax', 'np.argmax', (['self.validation_last_scores'], {}), '(self.validation_last_scores)\n', (10824, 10853), True, 'import numpy as np\n'), ((10878, 10920), 'numpy.max', 'np.max', (['[R_validation_C_A, self.old_score]'], {}), '([R_validation_C_A, self.old_score])\n', (10884, 10920), True, 'import numpy as np\n'), ((11903, 11941), 'numpy.argmax', 'np.argmax', (['self.validation_last_scores'], {}), '(self.validation_last_scores)\n', (11912, 11941), True, 'import numpy as np\n'), ((11966, 12008), 'numpy.max', 'np.max', (['[R_validation_C_A, self.old_score]'], {}), '([R_validation_C_A, self.old_score])\n', (11972, 12008), True, 'import numpy as np\n'), ((12546, 12583), 'random.randint', 'random.randint', (['(0)', '(self.n_actions - 1)'], {}), '(0, self.n_actions - 1)\n', (12560, 12583), False, 'import random\n'), ((7200, 7238), 'random.choice', 'random.choice', (['self.train_datasets_ids'], {}), '(self.train_datasets_ids)\n', (7213, 7238), False, 'import random\n'), ((10521, 10563), 'numpy.max', 'np.max', (['[R_validation_C_A, self.old_score]'], {}), '([R_validation_C_A, self.old_score])\n', (10527, 10563), True, 'import numpy as np\n'), ((11607, 11649), 'numpy.max', 'np.max', (['[R_validation_C_A, self.old_score]'], {}), '([R_validation_C_A, self.old_score])\n', (11613, 11649), True, 'import numpy as np\n'), ((17291, 17328), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(12)', 'random_state': '(0)'}), '(n_clusters=12, random_state=0)\n', (17297, 17328), False, 'from sklearn.cluster import KMeans\n')] |
import scipy.linalg as spla
import numpy as np
M11 = 1.01
M12 = 1.00
M13 = 1.00
M21 = 1.00
M22 = 1.01
M23 = 1.00
M31 = 1.00
M32 = 1.00
M33 = 1.00
A = np.array([[M11, M12, M13],
[M21, M22, M23],
[M31, M32, M33]])
b = np.array([[4], [7.9999999999999999]])
def np_inv(A, b):
return np.linalg.inv(A)
def np_solve(A, b):
return np.linalg.solve(A, b)
def svd_inv(A, b):
#u, s, v = np.linalg.svd(A)
#Ainv = np.dot(v.transpose(), np.dot(np.diag(s**-1), u.transpose()))
Ainv = np.linalg.pinv(A)
return Ainv
def svd_solve(A, b):
U, S, VT = np.linalg.svd(A)
C = np.dot(U.T, b)
w = np.linalg.solve(np.diag(S), C)
x = VT.T @ w
return x
print("np_inv\n", np_inv(A, b))
print("svd_inv\n", svd_inv(A, b))
#print("np_solve\n", np_solve(A, b))
#print("SVD_solve\n", svd_solve(A, b)) | [
"numpy.linalg.solve",
"numpy.linalg.pinv",
"numpy.diag",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.linalg.svd"
] | [((154, 215), 'numpy.array', 'np.array', (['[[M11, M12, M13], [M21, M22, M23], [M31, M32, M33]]'], {}), '([[M11, M12, M13], [M21, M22, M23], [M31, M32, M33]])\n', (162, 215), True, 'import numpy as np\n'), ((250, 272), 'numpy.array', 'np.array', (['[[4], [8.0]]'], {}), '([[4], [8.0]])\n', (258, 272), True, 'import numpy as np\n'), ((318, 334), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (331, 334), True, 'import numpy as np\n'), ((367, 388), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (382, 388), True, 'import numpy as np\n'), ((525, 542), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (539, 542), True, 'import numpy as np\n'), ((596, 612), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (609, 612), True, 'import numpy as np\n'), ((622, 636), 'numpy.dot', 'np.dot', (['U.T', 'b'], {}), '(U.T, b)\n', (628, 636), True, 'import numpy as np\n'), ((662, 672), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (669, 672), True, 'import numpy as np\n')] |
#Code for creating halton sampling in low n-dimensions
# references : - https://gist.github.com/tupui/cea0a91cc127ea3890ac0f002f887bae
# - https://www.w3resource.com/python-exercises/list/python-data-type-list-exercise-34.php
import numpy as np
def primes (n):
#Defining prime numbers for base using sieve of erasthostenes
not_prime = []
prime = []
for i in range(2, n+1):
if i not in not_prime:
prime.append(i)
for j in range(i*i, n+1, i):
not_prime.append(j)
return prime
def vandercorput(n_sample,base=2):
#generate sample using van der corput sequence per dimension
sequence=[]
for i in range(0,n_sample):
f=1. ; r=0.
while i > 0:
i, remainder = divmod(i, base)
f = f/base
r = r+f*remainder
sequence.append(r)
return sequence
def halton (dimension,n_sample):
# halton sequence general form of van der corput sequence in n-dimensions
big_number = 1000 # just an input for base, as long as dim <= len(base) the program won't error
base = primes(big_number)[:dimension]
#print("base = ",base) # for debugging
sample = [vandercorput(n_sample + 1, dim) for dim in base] # looping van der corput for each dimension
sample = np.stack(sample, axis= -1)[1:] #arrange the array
sample[1:n_sample,:] = sample[0:n_sample - 1,:]
sample[0,:] = 0
return sample
| [
"numpy.stack"
] | [((1327, 1352), 'numpy.stack', 'np.stack', (['sample'], {'axis': '(-1)'}), '(sample, axis=-1)\n', (1335, 1352), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_, assert_almost_equal
from astroML.time_series import search_frequencies
from astroML.utils import check_random_state
# TODO: add tests of lomb_scargle inputs & significance
# TODO: add tests of bootstrap
def test_search_frequencies():
rng = np.random.RandomState(0)
t = np.arange(0, 1E1, 0.01)
f = 1
w = 2 * np.pi * np.array(f)
y = np.sin(w*t)
dy = 0.01
y += dy * rng.randn(len(y))
omegas, power = search_frequencies(t, y, dy)
omax = omegas[power == max(power)]
assert_almost_equal(w, omax, decimal=3)
| [
"numpy.arange",
"astroML.time_series.search_frequencies",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.sin",
"numpy.random.RandomState"
] | [((303, 327), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (324, 327), True, 'import numpy as np\n'), ((337, 361), 'numpy.arange', 'np.arange', (['(0)', '(10.0)', '(0.01)'], {}), '(0, 10.0, 0.01)\n', (346, 361), True, 'import numpy as np\n'), ((411, 424), 'numpy.sin', 'np.sin', (['(w * t)'], {}), '(w * t)\n', (417, 424), True, 'import numpy as np\n'), ((491, 519), 'astroML.time_series.search_frequencies', 'search_frequencies', (['t', 'y', 'dy'], {}), '(t, y, dy)\n', (509, 519), False, 'from astroML.time_series import search_frequencies\n'), ((564, 603), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['w', 'omax'], {'decimal': '(3)'}), '(w, omax, decimal=3)\n', (583, 603), False, 'from numpy.testing import assert_, assert_almost_equal\n'), ((391, 402), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (399, 402), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from manimlib.imports import *
import numpy as np
class Grid(VGroup):
CONFIG = {
"height": 6.0,
"width": 6.0,
}
def __init__(self, rows, columns, **kwargs):
digest_config(self, kwargs, locals())
super().__init__(**kwargs)
x_step = self.width / self.columns
y_step = self.height / self.rows
for x in np.arange(0, self.width + x_step, x_step):
self.add(Line(
[x - self.width / 2., -self.height / 2., 0],
[x - self.width / 2., self.height / 2., 0],
))
for y in np.arange(0, self.height + y_step, y_step):
self.add(Line(
[-self.width / 2., y - self.height / 2., 0],
[self.width / 2., y - self.height / 2., 0]
))
class ScreenGrid(VGroup):
CONFIG = {
"rows": 8,
"columns": 14,
"height": FRAME_Y_RADIUS * 2,
"width": 14,
"grid_stroke": 0.5,
"grid_color": WHITE,
"axis_color": RED,
"axis_stroke": 2,
"labels_scale": 0.25,
"labels_buff": 0,
"number_decimals": 2
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
rows = self.rows
columns = self.columns
grid = Grid(width=self.width, height=self.height, rows=rows, columns=columns)
grid.set_stroke(self.grid_color, self.grid_stroke)
vector_ii = ORIGIN + np.array((- self.width / 2, - self.height / 2, 0))
vector_si = ORIGIN + np.array((- self.width / 2, self.height / 2, 0))
vector_sd = ORIGIN + np.array((self.width / 2, self.height / 2, 0))
axes_x = Line(LEFT * self.width / 2, RIGHT * self.width / 2)
axes_y = Line(DOWN * self.height / 2, UP * self.height / 2)
axes = VGroup(axes_x, axes_y).set_stroke(self.axis_color, self.axis_stroke)
divisions_x = self.width / columns
divisions_y = self.height / rows
directions_buff_x = [UP, DOWN]
directions_buff_y = [RIGHT, LEFT]
dd_buff = [directions_buff_x, directions_buff_y]
vectors_init_x = [vector_ii, vector_si]
vectors_init_y = [vector_si, vector_sd]
vectors_init = [vectors_init_x, vectors_init_y]
divisions = [divisions_x, divisions_y]
orientations = [RIGHT, DOWN]
labels = VGroup()
set_changes = zip([columns, rows], divisions, orientations, [0, 1], vectors_init, dd_buff)
for c_and_r, division, orientation, coord, vi_c, d_buff in set_changes:
for i in range(1, c_and_r):
for v_i, directions_buff in zip(vi_c, d_buff):
ubication = v_i + orientation * division * i
coord_point = round(ubication[coord], self.number_decimals)
label = Text(f"{coord_point}",font="Arial",stroke_width=0).scale(self.labels_scale)
label.next_to(ubication, directions_buff, buff=self.labels_buff)
labels.add(label)
self.add(grid, axes, labels)
class CoordScreen(Scene):
def construct(self):
screen_grid = ScreenGrid()
dot = Dot([1, 1, 0])
self.add(screen_grid)
self.play(FadeIn(dot))
self.wait()
class Scene_(Scene):
CONFIG = {"camera_config": {"background_color": "#ffffff"}}
class MySPWM(Scene):
def construct(self):
Vphase = 1
# Vphase = 1
'''line and word'''
lineP = Line(np.array([-5, Vphase, 0]), np.array([5, Vphase, 0]), color=PINK)
reference = DashedVMobject(Line(LEFT * 5, RIGHT * 5, color=GRAY))
lineN = Line(np.array([-5, -Vphase, 0]), np.array([5, -Vphase, 0]), color=PINK)
self.add(lineP)
self.add(lineN)
self.add(reference)
##########################################################################################
title = TextMobject("Vdc = %s* 相峰值" %(Vphase*2) )
title.to_corner(UP + LEFT)
# basel = TexMobject(
# "\\sum_{n=1}^\\infty "
# "\\frac{1}{n^2} = \\frac{\\pi^2}{6}"
# )
# VGroup(title, basel).arrange(DOWN)
self.play(
Write(title),
#FadeInFrom(basel, UP),
)
self.wait()
# transform_title = TexMobject("That was a transform")
# transform_title.to_corner(UP + LEFT)
# self.play(
# Transform(title, transform_title),
# LaggedStart(*map(FadeOutAndShiftDown, basel)),
# )
# self.wait()
'''arrow'''
arrowU = Vector(UP, color=YELLOW)
arrowV = Vector(UP, color=GREEN)
arrowW = Vector(UP, color=RED)
dot0 = Dot(color=PINK, fill_opacity=1.0)
arrowU.move_to([0, 0.5, 0])
arrowV.move_to([0, 0.5, 0])
arrowW.move_to([0, 0.5, 0])
arrowV.rotate(-TAU / 3, about_point=ORIGIN)
arrowW.rotate(TAU / 3, about_point=ORIGIN)
'''circle'''
circle = Circle(radius=1, color=GRAY)
circle.set_fill(BLUE, opacity=0)
roo = VGroup(arrowU, arrowV, arrowW, circle, dot0)
roo.move_to([-5, 0, 0])
self.play(GrowFromCenter(arrowU))
self.play(GrowFromCenter(arrowV))
self.play(GrowFromCenter(arrowW))
self.play(GrowFromCenter(dot0))
self.play(GrowFromCenter(circle))
self.wait(0)
self.play(FadeOut(circle))
dotU = Dot(point=[arrowU.get_start()[0], arrowU.get_end()[1], 0], color=YELLOW, fill_opacity=0.0)
dotV = Dot(point=[arrowU.get_start()[0], arrowV.get_end()[1], 0], color=GREEN, fill_opacity=0.0)
dotW = Dot(point=[arrowU.get_start()[0], arrowW.get_end()[1], 0], color=RED, fill_opacity=0.0)
roo = VGroup(arrowU, arrowV, arrowW, dot0)
roo.save_state()
dotU.save_state()
dotV.save_state()
dotW.save_state()
def update_rotate_move(mob, alpha):
roo.restore()
dotU.restore()
dotV.restore()
dotW.restore()
roo.shift(RIGHT * 10 * alpha)
# roo.move_to(np.array((1., 0., 0.)) * 5 * (alpha*2-1))
roo.rotate(3 * PI * alpha, axis=OUT, about_point=arrowU.get_start())
roo.shift(DOWN * (arrowU.get_start()[1]))
# roo.next_to(reference, UP)
minY = min(arrowU.get_end()[1], arrowV.get_end()[1], arrowW.get_end()[1])
maxY = max(arrowU.get_end()[1], arrowV.get_end()[1], arrowW.get_end()[1])
if minY < -Vphase:
roo.shift(UP * (-Vphase - minY))
elif maxY > Vphase:
roo.shift(DOWN * (maxY - Vphase))
dotU.move_to([arrowU.get_start()[0], arrowU.get_end()[1], 0])
dotV.move_to([arrowV.get_start()[0], arrowV.get_end()[1], 0])
dotW.move_to([arrowW.get_start()[0], arrowW.get_end()[1], 0])
pathU = TracedPath(dotU.get_center, color=YELLOW)
pathV = TracedPath(dotV.get_center, color=GREEN)
pathW = TracedPath(dotW.get_center, color=RED)
pathO = TracedPath(dot0.get_center, color=PINK)
pathU.set_color(color=YELLOW)
pathV.set_color(color=GREEN)
pathW.set_color(color=RED)
pathO.set_color(color=PINK)
self.add(pathU)
self.add(pathV)
self.add(pathW)
self.add(pathO)
self.play(
UpdateFromAlphaFunc(roo, update_rotate_move),
run_time=4
)
self.wait(1)
self.wait(3)
class MySPWM_two(Scene):
def construct(self):
grid = ScreenGrid()
self.add(grid)
Vphase = 0.866
'''line and word'''
lineP = Line(np.array([-5, Vphase, 0]), np.array([5, Vphase, 0]), color=PINK)
reference = DashedVMobject(Line(LEFT * 5, RIGHT * 5, color=GRAY))
lineN = Line(np.array([-5, -Vphase, 0]), np.array([5, -Vphase, 0]), color=PINK)
self.add(lineP)
self.add(lineN)
self.add(reference)
##########################################################################################
title1 = TextMobject("网侧变流器与机侧变流器处于同一直流系统")
title1.to_corner(UP)
title2 = TextMobject("交流中性点存在电势差")
title2.next_to(title1, DOWN)
self.play(
Write(title1),
Write(title2),
)
# basel = TexMobject(
# "\\sum_{n=1}^\\infty "
# "\\frac{1}{n^2} = \\frac{\\pi^2}{6}"
# )
# VGroup(title, basel).arrange(DOWN)
# self.play(
# Write(title),
# FadeInFrom(basel, UP),
# )
# self.wait()
#
# transform_title = TexMobject("That was a transform")
# transform_title.to_corner(UP + LEFT)
# self.play(
# Transform(title, transform_title),
# LaggedStart(*map(FadeOutAndShiftDown, basel)),
# )
# self.wait()
'''arrow'''
arrowU = Vector(UP, color=YELLOW)
arrowV = Vector(UP, color=GREEN)
arrowW = Vector(UP, color=RED)
dot0 = Dot(color=PINK, fill_opacity=1.0)
arrowU.move_to([0, 0.5, 0])
arrowV.move_to([0, 0.5, 0])
arrowW.move_to([0, 0.5, 0])
arrowV.rotate(-TAU / 3, about_point=ORIGIN)
arrowW.rotate(TAU / 3, about_point=ORIGIN)
'''circle'''
roo = VGroup(arrowU, arrowV, arrowW, dot0)
roo.move_to([-3, 0, 0])
#self.play(GrowFromCenter(roo))
#########################################################
arrowU2 = Vector(UP, color=YELLOW)
arrowV2 = Vector(UP, color=GREEN)
arrowW2 = Vector(UP, color=RED)
dot02 = Dot(color=PINK, fill_opacity=1.0)
arrowU2.move_to([0, 0.5, 0])
arrowV2.move_to([0, 0.5, 0])
arrowW2.move_to([0, 0.5, 0])
arrowV2.rotate(-TAU / 3, about_point=ORIGIN)
arrowW2.rotate(TAU / 3, about_point=ORIGIN)
'''circle'''
roo2 = VGroup(arrowU, arrowV, arrowW, dot0)
roo2.move_to([3, 0, 0])
#self.play(GrowFromCenter(roo2))
roo = VGroup(arrowU, arrowV, arrowW, dot0)
roo.move_to([-3, 0, 0])
roo2 = VGroup(arrowU2, arrowV2, arrowW2, dot02)
roo2.move_to([3, 0, 0])
roo.save_state()
roo2.save_state()
def update_rotate_move(roo, alpha):
roo.restore()
# roo.move_to(np.array((1., 0., 0.)) * 5 * (alpha*2-1))
roo.rotate(3 * PI * alpha, axis=OUT, about_point=arrowU.get_start())
roo.shift(DOWN * (arrowU.get_start()[1]))
# roo.next_to(reference, UP)
minY = min(arrowU.get_end()[1], arrowV.get_end()[1], arrowW.get_end()[1])
maxY = max(arrowU.get_end()[1], arrowV.get_end()[1], arrowW.get_end()[1])
if minY < -Vphase:
roo.shift(UP * (-Vphase - minY))
elif maxY > Vphase:
roo.shift(DOWN * (maxY - Vphase))
def update_rotate_move2(roo2, alpha):
roo2.restore()
# roo.move_to(np.array((1., 0., 0.)) * 5 * (alpha*2-1))
roo2.rotate(10 * PI * alpha, axis=OUT, about_point=arrowU2.get_start())
roo2.shift(DOWN * (arrowU2.get_start()[1]))
# roo.next_to(reference, UP)
minY2 = min(arrowU2.get_end()[1], arrowV2.get_end()[1], arrowW2.get_end()[1])
maxY2 = max(arrowU2.get_end()[1], arrowV2.get_end()[1], arrowW2.get_end()[1])
if minY2 < -Vphase:
roo2.shift(UP * (-Vphase - minY2))
elif maxY2 > Vphase:
roo2.shift(DOWN * (maxY2 - Vphase))
self.play(
UpdateFromAlphaFunc(roo, update_rotate_move),
UpdateFromAlphaFunc(roo2, update_rotate_move2),
run_time=10,
rate_func = linear
)
self.wait(1)
class AntiClockCircleRun(Scene):
def construct(self):
R2 = Circle(radius=2, color=GREEN)
self.add(R2)
R1 = RegularPolygon(n=3) # Dot(color=RED,fill_opacity=1.0)
self.add(R1)
def rr(x, y, z, t):
x = x + 2 * math.cos(2 * math.pi * t)
y = y + 2 * math.sin(2 * math.pi * t)
z = 0
return [x, y, z]
self.play(Homotopy(rr, R1), run_time=10, rate_func=linear)
self.wait()
class DotUpDown(Scene):
def construct(self):
dot = Dot()
text = TextMobject('this is some text').next_to(dot, RIGHT)
self.add(dot, text)
# group01 = VGroup(text,dot)
text.add_updater(lambda a: a.next_to(dot, RIGHT))
self.play(dot.shift, UP * 3)
self.play(dot.shift, DOWN * 3)
# 移除原先的绑定,下面这句无效,remove_updater不适合匿名函数,因此只能使用clear_updater
text.remove_updater(lambda a: a.next_to(dot, RIGHT))
# 清空绑定的所有关系
text.clear_updaters()
# 来来去去
self.play(dot.shift, UP * 4, rate_func=there_and_back, run_time=2)
class Xarrange01(Scene):
def construct(self):
square1, square2 = VGroup(
Square(color=RED),
Square(color=BLUE)
).scale(0.5).set_x(-5)
reference = DashedVMobject(Line(LEFT * 5, RIGHT * 5, color=GRAY))
self.add(square1, square2, reference)
square2.save_state()
def update_rotate_move(mob, alpha):
square2.restore()
square2.shift(RIGHT * 10 * alpha)
square2.rotate(3 * PI * alpha)
self.play(
square1.rotate, 3 * PI,
square1.move_to, [5, 0, 0],
UpdateFromAlphaFunc(square2, update_rotate_move),
run_time=4
)
self.wait()
class OpeningManimExample(Scene):
def construct(self):
title = TextMobject("This is some \\LaTeX")
basel = TexMobject(
"\\sum_{n=1}^\\infty "
"\\frac{1}{n^2} = \\frac{\\pi^2}{6}"
)
VGroup(title, basel).arrange(DOWN)
self.play(
Write(title),
FadeInFrom(basel, UP),
)
self.wait()
transform_title = TextMobject("That was a transform")
transform_title.to_corner(UP + LEFT)
self.play(
Transform(title, transform_title),
LaggedStart(*map(FadeOutAndShiftDown, basel)),
)
self.wait()
grid = NumberPlane()
grid_title = TextMobject("This is a grid")
grid_title.scale(1.5)
grid_title.move_to(transform_title)
self.add(grid, grid_title) # Make sure title is on top of grid
self.play(
FadeOut(title),
FadeInFromDown(grid_title),
ShowCreation(grid, run_time=3, lag_ratio=0.1),
)
self.wait()
grid_transform_title = TextMobject(
"That was a non-linear function \\\\"
"applied to the grid"
)
grid_transform_title.move_to(grid_title, UL)
grid.prepare_for_nonlinear_transform()
self.play(
grid.apply_function,
lambda p: p + np.array([
np.sin(p[1]),
np.sin(p[0]),
0,
]),
run_time=3,
)
self.wait()
self.play(
Transform(grid_title, grid_transform_title)
)
self.wait()
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
square = Square()
square.flip(RIGHT)
square.rotate(-3 * TAU / 8)
circle.set_fill(PINK, opacity=0.5)
self.play(ShowCreation(square))
self.play(Transform(square, circle))
self.play(FadeOut(square))
class WarpSquare(Scene):
def construct(self):
square = Square()
self.play(ApplyPointwiseFunction(
lambda point: complex_to_R3(np.exp(R3_to_complex(point))),
square
))
self.wait()
class WriteStuff(Scene):
def construct(self):
example_text = TextMobject(
"This is some text",
tex_to_color_map={"text": YELLOW}
)
example_tex = TexMobject(
"\\sum_{k=1}^\\infty {1 \\over k^2} = {\\pi^2 \\over 6}",
)
group = VGroup(example_text, example_tex)
group.arrange(DOWN)
group.set_width(FRAME_WIDTH - 2 * LARGE_BUFF)
self.play(Write(example_text))
self.play(Write(example_tex))
self.wait()
class UpdatersExample(Scene):
def construct(self):
decimal = DecimalNumber(
0,
show_ellipsis=True,
num_decimal_places=3,
include_sign=True,
)
square = Square().to_edge(UP)
decimal.add_updater(lambda d: d.next_to(square, RIGHT))
decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))
self.add(square, decimal)
self.play(
square.to_edge, DOWN,
rate_func=there_and_back,
run_time=5,
)
self.wait()
class Bezier1(Scene_):
def construct(self):
hL = Line(color=BLUE).scale(7).shift(UP * 3)
dot0 = Dot(color=ORANGE).shift(UP * 2)
self.add(hL, dot0)
vg = VGroup()
doti = Dot(color=ORANGE).move_to(hL.get_start())
l_1 = Line(color=BLACK, stroke_width=1.5).put_start_and_end_on(dot0.get_center(), doti.get_center())
l_2 = l_1.copy().rotate(PI / 2).scale(10).set_color(PURPLE).set_stroke(width=6)
doti.save_state()
self.add(doti, l_1, l_2, vg)
def anim(obj, alpha):
doti.restore()
doti.shift(RIGHT * hL.get_length() * alpha)
l_1.put_start_and_end_on(dot0.get_center(), doti.get_center())
l_2.become(l_1.copy().rotate(PI / 2).scale(100).set_color(PURPLE).set_stroke(width=6))
vg.add(l_2.copy().set_stroke(width=2, color=PURPLE_E))
self.play(UpdateFromAlphaFunc(doti, anim), run_time=8, rate_func=linear)
self.wait()
class Bezier2(Scene_):
def construct(self):
dot_a = Dot(np.array([-3, -3, 0]), color=PURPLE_A)
dot_b = Dot(np.array([0, 3, 0]), color=PURPLE_A)
dot_c = Dot(np.array([3, -3, 0]), color=PURPLE_A)
l_1 = Line(color=BLUE).put_start_and_end_on(dot_a.get_center(), dot_b.get_center())
l_2 = Line(color=BLUE).put_start_and_end_on(dot_b.get_center(), dot_c.get_center())
l_3 = l_1.copy()
lineG = VGroup()
self.add(dot_a, dot_b, dot_c, l_1, l_2, l_3, lineG)
def anim(obj, alpha):
dot_a.move_to(l_1.point_from_proportion(alpha))
dot_b.move_to(l_2.point_from_proportion(alpha))
l_3.put_start_and_end_on(dot_a.get_center(), dot_b.get_center())
# if int(alpha*100) % 5 == 0: #加if之后会间歇的加入中间的包络线
# lineG.add(l_3.copy().set_stroke(width=2, color=BLUE_A))
lineG.add(l_3.copy().set_stroke(width=2, color=BLUE_A)) # 这行的话最后是用线填充这个区域
self.play(UpdateFromAlphaFunc(l_3, anim), run_time=8, rate_func=linear)
self.wait()
# See old_projects folder for many, many more
| [
"numpy.sin",
"numpy.array",
"numpy.arange"
] | [((394, 435), 'numpy.arange', 'np.arange', (['(0)', '(self.width + x_step)', 'x_step'], {}), '(0, self.width + x_step, x_step)\n', (403, 435), True, 'import numpy as np\n'), ((617, 659), 'numpy.arange', 'np.arange', (['(0)', '(self.height + y_step)', 'y_step'], {}), '(0, self.height + y_step, y_step)\n', (626, 659), True, 'import numpy as np\n'), ((1469, 1517), 'numpy.array', 'np.array', (['(-self.width / 2, -self.height / 2, 0)'], {}), '((-self.width / 2, -self.height / 2, 0))\n', (1477, 1517), True, 'import numpy as np\n'), ((1549, 1596), 'numpy.array', 'np.array', (['(-self.width / 2, self.height / 2, 0)'], {}), '((-self.width / 2, self.height / 2, 0))\n', (1557, 1596), True, 'import numpy as np\n'), ((1627, 1673), 'numpy.array', 'np.array', (['(self.width / 2, self.height / 2, 0)'], {}), '((self.width / 2, self.height / 2, 0))\n', (1635, 1673), True, 'import numpy as np\n'), ((3499, 3524), 'numpy.array', 'np.array', (['[-5, Vphase, 0]'], {}), '([-5, Vphase, 0])\n', (3507, 3524), True, 'import numpy as np\n'), ((3526, 3550), 'numpy.array', 'np.array', (['[5, Vphase, 0]'], {}), '([5, Vphase, 0])\n', (3534, 3550), True, 'import numpy as np\n'), ((3659, 3685), 'numpy.array', 'np.array', (['[-5, -Vphase, 0]'], {}), '([-5, -Vphase, 0])\n', (3667, 3685), True, 'import numpy as np\n'), ((3687, 3712), 'numpy.array', 'np.array', (['[5, -Vphase, 0]'], {}), '([5, -Vphase, 0])\n', (3695, 3712), True, 'import numpy as np\n'), ((7697, 7722), 'numpy.array', 'np.array', (['[-5, Vphase, 0]'], {}), '([-5, Vphase, 0])\n', (7705, 7722), True, 'import numpy as np\n'), ((7724, 7748), 'numpy.array', 'np.array', (['[5, Vphase, 0]'], {}), '([5, Vphase, 0])\n', (7732, 7748), True, 'import numpy as np\n'), ((7857, 7883), 'numpy.array', 'np.array', (['[-5, -Vphase, 0]'], {}), '([-5, -Vphase, 0])\n', (7865, 7883), True, 'import numpy as np\n'), ((7885, 7910), 'numpy.array', 'np.array', (['[5, -Vphase, 0]'], {}), '([5, -Vphase, 0])\n', (7893, 7910), True, 'import numpy as np\n'), ((18007, 18028), 'numpy.array', 'np.array', (['[-3, -3, 0]'], {}), '([-3, -3, 0])\n', (18015, 18028), True, 'import numpy as np\n'), ((18066, 18085), 'numpy.array', 'np.array', (['[0, 3, 0]'], {}), '([0, 3, 0])\n', (18074, 18085), True, 'import numpy as np\n'), ((18123, 18143), 'numpy.array', 'np.array', (['[3, -3, 0]'], {}), '([3, -3, 0])\n', (18131, 18143), True, 'import numpy as np\n'), ((15058, 15070), 'numpy.sin', 'np.sin', (['p[1]'], {}), '(p[1])\n', (15064, 15070), True, 'import numpy as np\n'), ((15088, 15100), 'numpy.sin', 'np.sin', (['p[0]'], {}), '(p[0])\n', (15094, 15100), True, 'import numpy as np\n')] |
import sys
sys.path.append("../")
from autogl.datasets import build_dataset_from_name
from autogl.solver.classifier.link_predictor import AutoLinkPredictor
from autogl.module.train.evaluation import Auc
import yaml
import random
import torch
import numpy as np
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
"auto link prediction", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset",
default="cora",
type=str,
help="dataset to use",
choices=[
"cora",
"pubmed",
"citeseer",
"coauthor_cs",
"coauthor_physics",
"amazon_computers",
"amazon_photo",
],
)
parser.add_argument(
"--configs",
type=str,
default="../configs/lp_gcn_benchmark.yml",
help="config to use",
)
# following arguments will override parameters in the config file
parser.add_argument("--hpo", type=str, default="tpe", help="hpo methods")
parser.add_argument(
"--max_eval", type=int, default=50, help="max hpo evaluation times"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--device", default=0, type=int, help="GPU device")
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
seed = args.seed
# set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dataset = build_dataset_from_name(args.dataset)
configs = yaml.load(open(args.configs, "r").read(), Loader=yaml.FullLoader)
configs["hpo"]["name"] = args.hpo
configs["hpo"]["max_evals"] = args.max_eval
autoClassifier = AutoLinkPredictor.from_config(configs)
# train
autoClassifier.fit(
dataset,
time_limit=3600,
evaluation_method=[Auc],
seed=seed,
train_split=0.85,
val_split=0.05,
)
autoClassifier.get_leaderboard().show()
# test
predict_result = autoClassifier.predict_proba()
pos_edge_index, neg_edge_index = (
dataset[0].test_pos_edge_index,
dataset[0].test_neg_edge_index,
)
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E)
link_labels[: pos_edge_index.size(1)] = 1.0
print(
"test auc: %.4f"
% (Auc.evaluate(predict_result, link_labels.detach().cpu().numpy()))
)
| [
"torch.manual_seed",
"argparse.ArgumentParser",
"autogl.solver.classifier.link_predictor.AutoLinkPredictor.from_config",
"random.seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"autogl.datasets.build_dataset_from_name",
"numpy.random.seed",
"torch.cuda.manual_seed",
"sys.path.append",
... | [((12, 34), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (27, 34), False, 'import sys\n'), ((376, 466), 'argparse.ArgumentParser', 'ArgumentParser', (['"""auto link prediction"""'], {'formatter_class': 'ArgumentDefaultsHelpFormatter'}), "('auto link prediction', formatter_class=\n ArgumentDefaultsHelpFormatter)\n", (390, 466), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((1411, 1436), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1434, 1436), False, 'import torch\n'), ((1528, 1545), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1539, 1545), False, 'import random\n'), ((1550, 1570), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1564, 1570), True, 'import numpy as np\n'), ((1575, 1598), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1592, 1598), False, 'import torch\n'), ((1606, 1631), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1629, 1631), False, 'import torch\n'), ((1782, 1819), 'autogl.datasets.build_dataset_from_name', 'build_dataset_from_name', (['args.dataset'], {}), '(args.dataset)\n', (1805, 1819), False, 'from autogl.datasets import build_dataset_from_name\n'), ((2008, 2046), 'autogl.solver.classifier.link_predictor.AutoLinkPredictor.from_config', 'AutoLinkPredictor.from_config', (['configs'], {}), '(configs)\n', (2037, 2046), False, 'from autogl.solver.classifier.link_predictor import AutoLinkPredictor\n'), ((2542, 2556), 'torch.zeros', 'torch.zeros', (['E'], {}), '(E)\n', (2553, 2556), False, 'import torch\n'), ((1446, 1480), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.device'], {}), '(args.device)\n', (1467, 1480), False, 'import torch\n'), ((1641, 1669), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1663, 1669), False, 'import torch\n')] |
import sys
sys.path.append('.') #get rid of this at some point with central test script or when package is built
import MSI.utilities.run_simulations_without_optimization as rswo
import pandas as pd
import numpy as np
#start here
files_to_include = [['Hong_0_updated.yaml'],
['Hong_2_updated.yaml'],
['Hong_3_updated.yaml'],
['Hong_1_updated.yaml'],
['Troe_4_updated.yaml','Troe_4_abs_updated.yaml'],
['Troe_5_updated.yaml','Troe_5_abs_updated.yaml'],
['Troe_6_updated.yaml','Troe_6_abs_updated.yaml'],
['Troe_7_updated.yaml','Troe_7_abs_updated.yaml'],
['Troe_8_updated.yaml','Troe_8_abs_updated.yaml'],
['Hong_HO2_fake_data_0_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_1_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_2_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_3_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_4_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_5_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_6_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_7_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_8_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_9_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_10_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_11_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_12_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_13_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_14_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Hong_HO2_fake_data_15_updated.yaml','Hong_fake_data_fitted_abs_updated.yaml'],
['Farooq_0.yaml'],
['Farooq_1.yaml'],
['Farooq_2.yaml'],
['Farooq_3.yaml']]
numer_of_iterations = 1
cti_file = 'FFCM1_custom_extra_reaction_updated.cti'
working_directory = 'MSI/data/hong_H2O2_fake_data'
reaction_uncertainty_csv = 'FFCM1_reaction_uncertainty_extra_reaction.csv'
master_reaction_equation_cti_name = 'master_reactions_FFCM1_optimized_extra_reaction.cti'
#rate_constant_target_value_data = 'burke_target_value_single_reactions.csv'
#this would be an empty string '' if you do not want to include it
run_with_k_target_values = 'On'
master_equation_reactions = ['H2O2 + OH <=> H2O + HO2',
'2 HO2 <=> H2O2 + O2',
'HO2 + OH <=> H2O + O2',
'2 OH <=> H2O + O',
'CH3 + HO2 <=> CH4 + O2',
'CH3 + HO2 <=> CH3O + OH']
#master_index = [2,3,4,5,6]
#master_index = [2,3,4,5,6,7]
#master_equation_uncertainty_df = pd.read_csv('MSI/data/test_data/six_parameter_fit_uncertainty_df.csv')
#this could be 'On'
#rate_constant_target_value_data = 'FFCM1_target_reactions_1_extra_reaction.csv'
#start here
six_parameter_fit_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-13.37032086, 32.42060027, 19.23022032, 6.843287462 , 36.62853824 ,-0.220309785 ,-0.099366346, -4.134352081]),
'n':np.array([1.948532282, -5.341557065, -3.337497841, -1.025292166, -5.813524857, 0.011862923 ,0.061801326, 0.581628835]),
'Ea':np.array([-0.463042822, 1.529151218, 0.808025472 ,0.359889935, -0.021309254, -0.098013004, -0.102022118, -0.097024727]),
'c':np.array([0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149 ,0.001263134, 0.001236963, -0.000390567]),
'd':np.array([1.071992802, -2.780550365, -1.71391034 ,-0.274481751, -4.491132406, -0.054960894, 0.049553379, 0.270885383]),
'f':np.array([-0.027060156, 0.056903076, 0.041102936 ,0.001361221, 0.144385439, 0.003136796 ,0.001374015, -0.006089248])},
'2 HO2 <=> H2O2 + O2': {'A':np.array([-12.93733217, 24.39245077 ,17.73177606, 4.37803475, 33.44985889, 0.381601192 ,3.748890308]),
'n':np.array([1.872602872, -4.096806067, -3.09439453 ,-0.63226683, -5.125008418, -0.061610462, -0.677953862]),
'Ea':np.array([-0.463903763 ,1.259537237, 0.826684258 ,0.257400116, 0.803882706 ,2.20E-05, 0.181336266]),
'c':np.array([0.002069572, -0.008314769, -0.00424128 ,-0.002016113, 0.000134642 ,0.000122049 ,-0.001026567]),
'd':np.array([0.981856324, -1.847383095, -1.493544053, 0.016222685, -3.428753345, -0.050708107, -0.526284003]),
'f':np.array([-0.022628436, 0.023558844, 0.031573523 ,-0.00732987, 0.096573278 ,0.001668073, 0.01033547])},
'HO2 + OH <=> H2O + O2': {'A':np.array([-4.795727446, 6.426354909 ,4.878258417, 2.472791017, 7.856296474, 1.328033302 ,-3.457932692, -0.349839371, 2.331070924 ,2.403555921, -0.165397001, 0.246540172 ,0.722946077]),
'n':np.array([0.624241134, -1.321082842, -1.032242319, -0.36532386, -1.112545721, -0.188622956, 0.421083939 ,0.038859478 ,-0.360855106, -0.38989218, 0.029669899 ,-0.04371581, -0.130487515]),
'Ea':np.array([-0.259799111, 0.205620792 ,0.130799794, 0.137023666 ,0.379232542, 6.19E-02, -0.198196699, -0.023548432, 0.118069394 ,0.104383314 ,-0.003830947, 0.011566499 ,-0.073557828]),
'c':np.array([0.00161312, -0.001906694, -0.000863021, -0.00105112 ,-0.002185605, -0.000334461, 0.001817049 ,0.000170761, -0.000859313, -0.000653029, -3.11E-06 ,-6.37E-05, 0.00047058]),
'd':np.array([0.124499363, -0.645652135, -0.535188558, 0.052734001 ,-0.45181066, -0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, 0.057288687, -0.012776017, -0.192422381]),
'f':np.array([0.002033109, -0.011099716, 0.005351213 ,-0.007623667, 0.005327017 ,0.001259485,0.00245957, 0.000976725 ,-0.004879845, 0.001903886 ,-0.001838669 ,0.000252269, 0.004691829])},
'2 OH <=> H2O + O': {'A': np.array([-5.40485067, 18.96061659 ,8.089301961, 6.953940096 ,-12.54280438, -3.264972401, 2.106487623 ,-1.657943467, 1.614935 ,-1.536463599]),
'n': np.array([0.803274875, -3.167851673, -1.607661056, -1.041258197, 1.679914849, 0.466415264 ,-0.326136934, 0.355297684 ,-0.16618967, 0.253903734]),
'Ea': np.array([0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555, -0.281992263, 0.099465537 ,0.030650483, 0.176069015 ,-0.056967886]),
'c': np.array([-0.003001658, -0.001870536, 0.003820535 ,-0.002753277, 0.014224162, 0.00032969 ,-0.000627241, -0.001081979, -0.002009835, 0.000255318]),
'd':np.array([0.446957978, -1.467039994, -1.298391635, -0.402720385, 0.568106728 ,0.229877892, -0.194395052, 1.033858025 ,0.527183366, 0.308743056]),
'f':np.array([-0.010053913, 0.025128322, 0.035579811 ,0.00515753 ,-0.0083511, -0.00512885, 0.003954, -0.029711993 ,-0.01986861, -0.007691647])},
'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]),
'n':np.array([-0.00104,-.36888,.154462]),
'Ea':np.array([.504278,-.44379,-0.03181]),
'c':np.array([0,0,0]),
'd':np.array([0,0,0]),
'f':np.array([0,0,0])},
'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]),
'n':np.array([-.04282,.150846]),
'Ea':np.array([0.024285,-0.02956]),
'c':np.array([0,0]),
'd':np.array([0,0]),
'f':np.array([0,0])}}
molecular_parameter_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-0.373074255, -5.658058364,-2.203911028,1.69333527,-7.110529947,-0.272049596,1.373125254,-0.644666166]),
'n':np.array([0.043611058, 0.15417925, -0.208413633, -0.306031876, 0.81053055, 0.031772359 ,-0.136901806, 0.073807424]),
'Ea':np.array([0.419762882, -1.301125209, -0.681648059, -0.091866582, -2.353326781, -0.064230907, 0.047721593 ,0.147941186])},
'2 HO2 <=> H2O2 + O2': {'A':np.array([-0.166005487, -6.797175212, -2.798300682, 1.973896891 ,-4.354910767, -0.082067357, -3.839749825]),
'n':np.array([0.018748596, 0.294710827 ,-0.135488286, -0.332967052, 0.4930396, 0.009470627 ,0.409095255]),
'Ea':np.array([0.459015825, -1.401810899, -0.722040616, -0.066133729, -1.52807633 ,-0.021832631, -0.411667639])},
'HO2 + OH <=> H2O + O2': {'A':np.array([-1.30109642, -11.63457509, -4.680271526, 0.782373804 , -0.016083278, 0.005513255 ,-1.738426278, -0.232013539, 0.884067816 ,-0.500473791, 0.399272687 ,0.062255923 ,-1.667253993]),
'n':np.array([0.152797314, 1.1181845, 0.306250902 ,-0.164846884, -0.008229148, -0.001531881, 0.195875814 ,0.026844834, -0.18238354 ,0.017363927, -0.055634983 ,-0.017324495, 0.218771679]),
'Ea':np.array([0.101558432, -1.638858106, -0.704325409, -0.119041648, -0.307281167, -0.04872945, 0.001603412 ,0.000324159, -0.08089174, -0.148811902, 0.027266121 ,-0.002907638, -0.237949453])},
'2 OH <=> H2O + O': {'A': np.array([0.299144373, -2.662684629, -6.643003014, 0.370230493 ,-3.354253502, -0.271981922, -0.581195748, 9.774024441 , 5.90328859, 2.272800133]),
'n': np.array([-0.028599275, -0.071787028, 0.572722706 ,-0.109709456, 0.381272207 ,0.03153973 ,0.061282516, -1.341475144, -0.835422411, -0.302994441]),
'Ea': np.array([0.535103651, -1.054606857, -0.989721261, -0.169631331, -1.099840578, -0.069647609, -0.101285313, 0.74522721, 0.352517552 ,0.205464658])},
'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]),
'n':np.array([-0.00104,-.36888,.154462]),
'Ea':np.array([.504278,-.44379,-0.03181])},
'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]),
'n':np.array([-.04282,.150846]),
'Ea':np.array([0.024285,-0.02956])}}
six_parameter_fit_nominal_parameters_dict = {'H2O2 + OH <=> H2O + HO2':{'A':4.64E-06,'n':5.605491008,'Ea':-5440.266692,'c':126875776.1,'d':0.000441194,'f':-5.35E-13},
'2 HO2 <=> H2O2 + O2':{'A':1.30E+04,'n':1.997152351,'Ea':-3628.04407,'c':93390973.44,'d':-0.000732521,'f':8.20E-12} ,
'HO2 + OH <=> H2O + O2':{'A':1.41E+18,'n':-2.05344973,'Ea':-232.0064051,'c':15243859.12,'d':-0.001187694,'f':8.01E-12},
'2 OH <=> H2O + O':{'A':354.5770856,'n':2.938741717,'Ea':-1836.492972,'c':12010735.18,'d':-4.87E-05,'f':1.22E-12},
'CH3 + HO2 <=> CH4 + O2':{'A':3.19e3,'n':2.670857,'Ea':-4080.73,'c':0.0,'d':0.0,'f':0.0},
'CH3 + HO2 <=> CH3O + OH':{'A':8.38e11,'n':.29,'Ea':-785.45,'c':0.0,'d':0.0,'f':0.0}}
MSI_st_instance_one = rswo.running_simulations_without_optimization(cti_file,
.01,
1,
1,
working_directory,
files_to_include,
reaction_uncertainty_csv,rate_constant_target_value_data,
master_equation_reactions = master_equation_reactions,
molecular_parameter_sensitivities = molecular_parameter_sensitivities,
six_parameter_fit_sensitivities = six_parameter_fit_sensitivities,
master_reaction_equation_cti_name = master_reaction_equation_cti_name,
master_index = master_index,
master_equation_uncertainty_df = master_equation_uncertainty_df,
six_paramter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict)
MSI_st_instance_one.multiple_shock_tube_runs(1)
experimental_dict_two_klip_reactions = MSI_st_instance_one.experiment_dictonaries | [
"MSI.utilities.run_simulations_without_optimization.running_simulations_without_optimization",
"numpy.array",
"sys.path.append"
] | [((13, 33), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (28, 33), False, 'import sys\n'), ((13742, 14376), 'MSI.utilities.run_simulations_without_optimization.running_simulations_without_optimization', 'rswo.running_simulations_without_optimization', (['cti_file', '(0.01)', '(1)', '(1)', 'working_directory', 'files_to_include', 'reaction_uncertainty_csv', 'rate_constant_target_value_data'], {'master_equation_reactions': 'master_equation_reactions', 'molecular_parameter_sensitivities': 'molecular_parameter_sensitivities', 'six_parameter_fit_sensitivities': 'six_parameter_fit_sensitivities', 'master_reaction_equation_cti_name': 'master_reaction_equation_cti_name', 'master_index': 'master_index', 'master_equation_uncertainty_df': 'master_equation_uncertainty_df', 'six_paramter_fit_nominal_parameters_dict': 'six_parameter_fit_nominal_parameters_dict'}), '(cti_file, 0.01, 1, 1,\n working_directory, files_to_include, reaction_uncertainty_csv,\n rate_constant_target_value_data, master_equation_reactions=\n master_equation_reactions, molecular_parameter_sensitivities=\n molecular_parameter_sensitivities, six_parameter_fit_sensitivities=\n six_parameter_fit_sensitivities, master_reaction_equation_cti_name=\n master_reaction_equation_cti_name, master_index=master_index,\n master_equation_uncertainty_df=master_equation_uncertainty_df,\n six_paramter_fit_nominal_parameters_dict=\n six_parameter_fit_nominal_parameters_dict)\n', (13787, 14376), True, 'import MSI.utilities.run_simulations_without_optimization as rswo\n'), ((3794, 3916), 'numpy.array', 'np.array', (['[-13.37032086, 32.42060027, 19.23022032, 6.843287462, 36.62853824, -\n 0.220309785, -0.099366346, -4.134352081]'], {}), '([-13.37032086, 32.42060027, 19.23022032, 6.843287462, 36.62853824,\n -0.220309785, -0.099366346, -4.134352081])\n', (3802, 3916), True, 'import numpy as np\n'), ((3981, 4104), 'numpy.array', 'np.array', (['[1.948532282, -5.341557065, -3.337497841, -1.025292166, -5.813524857, \n 0.011862923, 0.061801326, 0.581628835]'], {}), '([1.948532282, -5.341557065, -3.337497841, -1.025292166, -\n 5.813524857, 0.011862923, 0.061801326, 0.581628835])\n', (3989, 4104), True, 'import numpy as np\n'), ((4168, 4291), 'numpy.array', 'np.array', (['[-0.463042822, 1.529151218, 0.808025472, 0.359889935, -0.021309254, -\n 0.098013004, -0.102022118, -0.097024727]'], {}), '([-0.463042822, 1.529151218, 0.808025472, 0.359889935, -0.021309254,\n -0.098013004, -0.102022118, -0.097024727])\n', (4176, 4291), True, 'import numpy as np\n'), ((4355, 4476), 'numpy.array', 'np.array', (['[0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149, \n 0.001263134, 0.001236963, -0.000390567]'], {}), '([0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149,\n 0.001263134, 0.001236963, -0.000390567])\n', (4363, 4476), True, 'import numpy as np\n'), ((4540, 4663), 'numpy.array', 'np.array', (['[1.071992802, -2.780550365, -1.71391034, -0.274481751, -4.491132406, -\n 0.054960894, 0.049553379, 0.270885383]'], {}), '([1.071992802, -2.780550365, -1.71391034, -0.274481751, -\n 4.491132406, -0.054960894, 0.049553379, 0.270885383])\n', (4548, 4663), True, 'import numpy as np\n'), ((4726, 4846), 'numpy.array', 'np.array', (['[-0.027060156, 0.056903076, 0.041102936, 0.001361221, 0.144385439, \n 0.003136796, 0.001374015, -0.006089248]'], {}), '([-0.027060156, 0.056903076, 0.041102936, 0.001361221, 0.144385439,\n 0.003136796, 0.001374015, -0.006089248])\n', (4734, 4846), True, 'import numpy as np\n'), ((4909, 5015), 'numpy.array', 'np.array', (['[-12.93733217, 24.39245077, 17.73177606, 4.37803475, 33.44985889, \n 0.381601192, 3.748890308]'], {}), '([-12.93733217, 24.39245077, 17.73177606, 4.37803475, 33.44985889, \n 0.381601192, 3.748890308])\n', (4917, 5015), True, 'import numpy as np\n'), ((5076, 5185), 'numpy.array', 'np.array', (['[1.872602872, -4.096806067, -3.09439453, -0.63226683, -5.125008418, -\n 0.061610462, -0.677953862]'], {}), '([1.872602872, -4.096806067, -3.09439453, -0.63226683, -5.125008418,\n -0.061610462, -0.677953862])\n', (5084, 5185), True, 'import numpy as np\n'), ((5248, 5350), 'numpy.array', 'np.array', (['[-0.463903763, 1.259537237, 0.826684258, 0.257400116, 0.803882706, 2.2e-05,\n 0.181336266]'], {}), '([-0.463903763, 1.259537237, 0.826684258, 0.257400116, 0.803882706,\n 2.2e-05, 0.181336266])\n', (5256, 5350), True, 'import numpy as np\n'), ((5413, 5521), 'numpy.array', 'np.array', (['[0.002069572, -0.008314769, -0.00424128, -0.002016113, 0.000134642, \n 0.000122049, -0.001026567]'], {}), '([0.002069572, -0.008314769, -0.00424128, -0.002016113, 0.000134642,\n 0.000122049, -0.001026567])\n', (5421, 5521), True, 'import numpy as np\n'), ((5583, 5694), 'numpy.array', 'np.array', (['[0.981856324, -1.847383095, -1.493544053, 0.016222685, -3.428753345, -\n 0.050708107, -0.526284003]'], {}), '([0.981856324, -1.847383095, -1.493544053, 0.016222685, -\n 3.428753345, -0.050708107, -0.526284003])\n', (5591, 5694), True, 'import numpy as np\n'), ((5755, 5860), 'numpy.array', 'np.array', (['[-0.022628436, 0.023558844, 0.031573523, -0.00732987, 0.096573278, \n 0.001668073, 0.01033547]'], {}), '([-0.022628436, 0.023558844, 0.031573523, -0.00732987, 0.096573278,\n 0.001668073, 0.01033547])\n', (5763, 5860), True, 'import numpy as np\n'), ((5925, 6117), 'numpy.array', 'np.array', (['[-4.795727446, 6.426354909, 4.878258417, 2.472791017, 7.856296474, \n 1.328033302, -3.457932692, -0.349839371, 2.331070924, 2.403555921, -\n 0.165397001, 0.246540172, 0.722946077]'], {}), '([-4.795727446, 6.426354909, 4.878258417, 2.472791017, 7.856296474,\n 1.328033302, -3.457932692, -0.349839371, 2.331070924, 2.403555921, -\n 0.165397001, 0.246540172, 0.722946077])\n', (5933, 6117), True, 'import numpy as np\n'), ((6176, 6371), 'numpy.array', 'np.array', (['[0.624241134, -1.321082842, -1.032242319, -0.36532386, -1.112545721, -\n 0.188622956, 0.421083939, 0.038859478, -0.360855106, -0.38989218, \n 0.029669899, -0.04371581, -0.130487515]'], {}), '([0.624241134, -1.321082842, -1.032242319, -0.36532386, -\n 1.112545721, -0.188622956, 0.421083939, 0.038859478, -0.360855106, -\n 0.38989218, 0.029669899, -0.04371581, -0.130487515])\n', (6184, 6371), True, 'import numpy as np\n'), ((6430, 6618), 'numpy.array', 'np.array', (['[-0.259799111, 0.205620792, 0.130799794, 0.137023666, 0.379232542, 0.0619, \n -0.198196699, -0.023548432, 0.118069394, 0.104383314, -0.003830947, \n 0.011566499, -0.073557828]'], {}), '([-0.259799111, 0.205620792, 0.130799794, 0.137023666, 0.379232542,\n 0.0619, -0.198196699, -0.023548432, 0.118069394, 0.104383314, -\n 0.003830947, 0.011566499, -0.073557828])\n', (6438, 6618), True, 'import numpy as np\n'), ((6679, 6867), 'numpy.array', 'np.array', (['[0.00161312, -0.001906694, -0.000863021, -0.00105112, -0.002185605, -\n 0.000334461, 0.001817049, 0.000170761, -0.000859313, -0.000653029, -\n 3.11e-06, -6.37e-05, 0.00047058]'], {}), '([0.00161312, -0.001906694, -0.000863021, -0.00105112, -0.002185605,\n -0.000334461, 0.001817049, 0.000170761, -0.000859313, -0.000653029, -\n 3.11e-06, -6.37e-05, 0.00047058])\n', (6687, 6867), True, 'import numpy as np\n'), ((6926, 7121), 'numpy.array', 'np.array', (['[0.124499363, -0.645652135, -0.535188558, 0.052734001, -0.45181066, -\n 0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, \n 0.057288687, -0.012776017, -0.192422381]'], {}), '([0.124499363, -0.645652135, -0.535188558, 0.052734001, -0.45181066,\n -0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, \n 0.057288687, -0.012776017, -0.192422381])\n', (6934, 7121), True, 'import numpy as np\n'), ((7180, 7371), 'numpy.array', 'np.array', (['[0.002033109, -0.011099716, 0.005351213, -0.007623667, 0.005327017, \n 0.001259485, 0.00245957, 0.000976725, -0.004879845, 0.001903886, -\n 0.001838669, 0.000252269, 0.004691829]'], {}), '([0.002033109, -0.011099716, 0.005351213, -0.007623667, 0.005327017,\n 0.001259485, 0.00245957, 0.000976725, -0.004879845, 0.001903886, -\n 0.001838669, 0.000252269, 0.004691829])\n', (7188, 7371), True, 'import numpy as np\n'), ((7431, 7576), 'numpy.array', 'np.array', (['[-5.40485067, 18.96061659, 8.089301961, 6.953940096, -12.54280438, -\n 3.264972401, 2.106487623, -1.657943467, 1.614935, -1.536463599]'], {}), '([-5.40485067, 18.96061659, 8.089301961, 6.953940096, -12.54280438,\n -3.264972401, 2.106487623, -1.657943467, 1.614935, -1.536463599])\n', (7439, 7576), True, 'import numpy as np\n'), ((7641, 7795), 'numpy.array', 'np.array', (['[0.803274875, -3.167851673, -1.607661056, -1.041258197, 1.679914849, \n 0.466415264, -0.326136934, 0.355297684, -0.16618967, 0.253903734]'], {}), '([0.803274875, -3.167851673, -1.607661056, -1.041258197, \n 1.679914849, 0.466415264, -0.326136934, 0.355297684, -0.16618967, \n 0.253903734])\n', (7649, 7795), True, 'import numpy as np\n'), ((7855, 8003), 'numpy.array', 'np.array', (['[0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555, -\n 0.281992263, 0.099465537, 0.030650483, 0.176069015, -0.056967886]'], {}), '([0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555,\n -0.281992263, 0.099465537, 0.030650483, 0.176069015, -0.056967886])\n', (7863, 8003), True, 'import numpy as np\n'), ((8068, 8223), 'numpy.array', 'np.array', (['[-0.003001658, -0.001870536, 0.003820535, -0.002753277, 0.014224162, \n 0.00032969, -0.000627241, -0.001081979, -0.002009835, 0.000255318]'], {}), '([-0.003001658, -0.001870536, 0.003820535, -0.002753277, \n 0.014224162, 0.00032969, -0.000627241, -0.001081979, -0.002009835, \n 0.000255318])\n', (8076, 8223), True, 'import numpy as np\n'), ((8281, 8435), 'numpy.array', 'np.array', (['[0.446957978, -1.467039994, -1.298391635, -0.402720385, 0.568106728, \n 0.229877892, -0.194395052, 1.033858025, 0.527183366, 0.308743056]'], {}), '([0.446957978, -1.467039994, -1.298391635, -0.402720385, \n 0.568106728, 0.229877892, -0.194395052, 1.033858025, 0.527183366, \n 0.308743056])\n', (8289, 8435), True, 'import numpy as np\n'), ((8493, 8636), 'numpy.array', 'np.array', (['[-0.010053913, 0.025128322, 0.035579811, 0.00515753, -0.0083511, -\n 0.00512885, 0.003954, -0.029711993, -0.01986861, -0.007691647]'], {}), '([-0.010053913, 0.025128322, 0.035579811, 0.00515753, -0.0083511, -\n 0.00512885, 0.003954, -0.029711993, -0.01986861, -0.007691647])\n', (8501, 8636), True, 'import numpy as np\n'), ((8701, 8741), 'numpy.array', 'np.array', (['[0.007845, -0.89278, -0.94908]'], {}), '([0.007845, -0.89278, -0.94908])\n', (8709, 8741), True, 'import numpy as np\n'), ((8805, 8845), 'numpy.array', 'np.array', (['[-0.00104, -0.36888, 0.154462]'], {}), '([-0.00104, -0.36888, 0.154462])\n', (8813, 8845), True, 'import numpy as np\n'), ((8911, 8951), 'numpy.array', 'np.array', (['[0.504278, -0.44379, -0.03181]'], {}), '([0.504278, -0.44379, -0.03181])\n', (8919, 8951), True, 'import numpy as np\n'), ((9016, 9035), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (9024, 9035), True, 'import numpy as np\n'), ((9102, 9121), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (9110, 9121), True, 'import numpy as np\n'), ((9188, 9207), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (9196, 9207), True, 'import numpy as np\n'), ((9276, 9306), 'numpy.array', 'np.array', (['[1.319108, -0.92151]'], {}), '([1.319108, -0.92151])\n', (9284, 9306), True, 'import numpy as np\n'), ((9374, 9404), 'numpy.array', 'np.array', (['[-0.04282, 0.150846]'], {}), '([-0.04282, 0.150846])\n', (9382, 9404), True, 'import numpy as np\n'), ((9472, 9502), 'numpy.array', 'np.array', (['[0.024285, -0.02956]'], {}), '([0.024285, -0.02956])\n', (9480, 9502), True, 'import numpy as np\n'), ((9571, 9587), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9579, 9587), True, 'import numpy as np\n'), ((9656, 9672), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9664, 9672), True, 'import numpy as np\n'), ((9741, 9757), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9749, 9757), True, 'import numpy as np\n'), ((9830, 9954), 'numpy.array', 'np.array', (['[-0.373074255, -5.658058364, -2.203911028, 1.69333527, -7.110529947, -\n 0.272049596, 1.373125254, -0.644666166]'], {}), '([-0.373074255, -5.658058364, -2.203911028, 1.69333527, -\n 7.110529947, -0.272049596, 1.373125254, -0.644666166])\n', (9838, 9954), True, 'import numpy as np\n'), ((10011, 10131), 'numpy.array', 'np.array', (['[0.043611058, 0.15417925, -0.208413633, -0.306031876, 0.81053055, \n 0.031772359, -0.136901806, 0.073807424]'], {}), '([0.043611058, 0.15417925, -0.208413633, -0.306031876, 0.81053055, \n 0.031772359, -0.136901806, 0.073807424])\n', (10019, 10131), True, 'import numpy as np\n'), ((10195, 10319), 'numpy.array', 'np.array', (['[0.419762882, -1.301125209, -0.681648059, -0.091866582, -2.353326781, -\n 0.064230907, 0.047721593, 0.147941186]'], {}), '([0.419762882, -1.301125209, -0.681648059, -0.091866582, -\n 2.353326781, -0.064230907, 0.047721593, 0.147941186])\n', (10203, 10319), True, 'import numpy as np\n'), ((10381, 10493), 'numpy.array', 'np.array', (['[-0.166005487, -6.797175212, -2.798300682, 1.973896891, -4.354910767, -\n 0.082067357, -3.839749825]'], {}), '([-0.166005487, -6.797175212, -2.798300682, 1.973896891, -\n 4.354910767, -0.082067357, -3.839749825])\n', (10389, 10493), True, 'import numpy as np\n'), ((10554, 10660), 'numpy.array', 'np.array', (['[0.018748596, 0.294710827, -0.135488286, -0.332967052, 0.4930396, \n 0.009470627, 0.409095255]'], {}), '([0.018748596, 0.294710827, -0.135488286, -0.332967052, 0.4930396, \n 0.009470627, 0.409095255])\n', (10562, 10660), True, 'import numpy as np\n'), ((10722, 10833), 'numpy.array', 'np.array', (['[0.459015825, -1.401810899, -0.722040616, -0.066133729, -1.52807633, -\n 0.021832631, -0.411667639]'], {}), '([0.459015825, -1.401810899, -0.722040616, -0.066133729, -\n 1.52807633, -0.021832631, -0.411667639])\n', (10730, 10833), True, 'import numpy as np\n'), ((10897, 11093), 'numpy.array', 'np.array', (['[-1.30109642, -11.63457509, -4.680271526, 0.782373804, -0.016083278, \n 0.005513255, -1.738426278, -0.232013539, 0.884067816, -0.500473791, \n 0.399272687, 0.062255923, -1.667253993]'], {}), '([-1.30109642, -11.63457509, -4.680271526, 0.782373804, -\n 0.016083278, 0.005513255, -1.738426278, -0.232013539, 0.884067816, -\n 0.500473791, 0.399272687, 0.062255923, -1.667253993])\n', (10905, 11093), True, 'import numpy as np\n'), ((11152, 11344), 'numpy.array', 'np.array', (['[0.152797314, 1.1181845, 0.306250902, -0.164846884, -0.008229148, -\n 0.001531881, 0.195875814, 0.026844834, -0.18238354, 0.017363927, -\n 0.055634983, -0.017324495, 0.218771679]'], {}), '([0.152797314, 1.1181845, 0.306250902, -0.164846884, -0.008229148, \n -0.001531881, 0.195875814, 0.026844834, -0.18238354, 0.017363927, -\n 0.055634983, -0.017324495, 0.218771679])\n', (11160, 11344), True, 'import numpy as np\n'), ((11403, 11599), 'numpy.array', 'np.array', (['[0.101558432, -1.638858106, -0.704325409, -0.119041648, -0.307281167, -\n 0.04872945, 0.001603412, 0.000324159, -0.08089174, -0.148811902, \n 0.027266121, -0.002907638, -0.237949453]'], {}), '([0.101558432, -1.638858106, -0.704325409, -0.119041648, -\n 0.307281167, -0.04872945, 0.001603412, 0.000324159, -0.08089174, -\n 0.148811902, 0.027266121, -0.002907638, -0.237949453])\n', (11411, 11599), True, 'import numpy as np\n'), ((11659, 11813), 'numpy.array', 'np.array', (['[0.299144373, -2.662684629, -6.643003014, 0.370230493, -3.354253502, -\n 0.271981922, -0.581195748, 9.774024441, 5.90328859, 2.272800133]'], {}), '([0.299144373, -2.662684629, -6.643003014, 0.370230493, -\n 3.354253502, -0.271981922, -0.581195748, 9.774024441, 5.90328859, \n 2.272800133])\n', (11667, 11813), True, 'import numpy as np\n'), ((11873, 12028), 'numpy.array', 'np.array', (['[-0.028599275, -0.071787028, 0.572722706, -0.109709456, 0.381272207, \n 0.03153973, 0.061282516, -1.341475144, -0.835422411, -0.302994441]'], {}), '([-0.028599275, -0.071787028, 0.572722706, -0.109709456, \n 0.381272207, 0.03153973, 0.061282516, -1.341475144, -0.835422411, -\n 0.302994441])\n', (11881, 12028), True, 'import numpy as np\n'), ((12088, 12243), 'numpy.array', 'np.array', (['[0.535103651, -1.054606857, -0.989721261, -0.169631331, -1.099840578, -\n 0.069647609, -0.101285313, 0.74522721, 0.352517552, 0.205464658]'], {}), '([0.535103651, -1.054606857, -0.989721261, -0.169631331, -\n 1.099840578, -0.069647609, -0.101285313, 0.74522721, 0.352517552, \n 0.205464658])\n', (12096, 12243), True, 'import numpy as np\n'), ((12303, 12343), 'numpy.array', 'np.array', (['[0.007845, -0.89278, -0.94908]'], {}), '([0.007845, -0.89278, -0.94908])\n', (12311, 12343), True, 'import numpy as np\n'), ((12407, 12447), 'numpy.array', 'np.array', (['[-0.00104, -0.36888, 0.154462]'], {}), '([-0.00104, -0.36888, 0.154462])\n', (12415, 12447), True, 'import numpy as np\n'), ((12513, 12553), 'numpy.array', 'np.array', (['[0.504278, -0.44379, -0.03181]'], {}), '([0.504278, -0.44379, -0.03181])\n', (12521, 12553), True, 'import numpy as np\n'), ((12620, 12650), 'numpy.array', 'np.array', (['[1.319108, -0.92151]'], {}), '([1.319108, -0.92151])\n', (12628, 12650), True, 'import numpy as np\n'), ((12718, 12748), 'numpy.array', 'np.array', (['[-0.04282, 0.150846]'], {}), '([-0.04282, 0.150846])\n', (12726, 12748), True, 'import numpy as np\n'), ((12816, 12846), 'numpy.array', 'np.array', (['[0.024285, -0.02956]'], {}), '([0.024285, -0.02956])\n', (12824, 12846), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""
Unit tests for kernel operations, tested for the forward and the backward pass
"""
import numpy as np
import pytest
from .ops_test_utils import unittest_helper, AA, I, precision, PRECISION_TO_TYPE, constant
from cntk.ops import AVG_POOLING, MAX_POOLING
from ...utils import sanitize_dtype_cntk
CONVOLUTION_OPERANDS = [
([[[5., 6.], # (1, 2, 2) map
[3., 4.]]],
[[[1., 2.], # (1, 2, 2) input operand
[7., 8.]]]),
([[[1., 2.], # (3, 2, 2) map
[3., 4.]],
[[1., 2.],
[3., 4.]],
[[1., 2.],
[3., 4.]]],
[[[1., 2.], # (3, 2, 2) input operand
[3., 4.]],
[[5., 6.],
[7., 8.]],
[[9., 10.],
[11., 12.]]])
]
@pytest.mark.parametrize("convolution_map, convolution_input", CONVOLUTION_OPERANDS)
def test_op_convolution_without_padding(convolution_map, convolution_input, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
conv_map = AA(convolution_map, dtype=dt)
conv_input = AA(convolution_input, dtype=dt)
conv_input.shape = (1,1) + conv_input.shape # adding batch and channel axis
conv_map.shape = (1,1) + conv_map.shape
flipped_conv_map = conv_map[...,::-1,::-1]
from scipy import signal
expected_forward = AA([[signal.convolve(flipped_conv_map, conv_input, mode='valid')]])
backward = AA([[conv_map]])
a = I(shape=conv_input.shape,
data_type=sanitize_dtype_cntk(precision),
needs_gradient=True,
name='a')
constant_map = constant(value=conv_map)
from cntk import convolution
input_op = convolution(constant_map, a, auto_padding=[False])
forward_input = {a: conv_input}
expected_backward = {a: backward}
unittest_helper(input_op,
forward_input, expected_forward, expected_backward,
device_id=device_id, precision=precision)
AVG_POOLING_DATA = [
([1, 2, 2, 4 ,3], # input_size
(1, 2, 2, 2, 1), # pooling_window
(1, 2, 2, 2, 1), # strides
[[[[[20.5, 21.5, 22.5],
[ 26.5, 27.5, 28.5]]]]]), # result
([1, 2, 4, 4 ,4],
(1, 2, 2, 2, 2),
(1, 2, 2, 2, 2),
[[[[[ 43.5, 45.5],
[ 51.5, 53.5]],
[[ 75.5, 77.5],
[ 83.5, 85.5]]]]]),
]
@pytest.mark.parametrize("input_size, pooling_window, strides, result", AVG_POOLING_DATA)
def test_op_avg_pooling(input_size, pooling_window, strides, result, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
# fill input operand with a sequence 1,2,3,... til total size and then resize to input_size
total_size = np.prod(input_size)
x = np.arange(1, total_size + 1, 1, dtype=dt)
input_operand = x.reshape(input_size)
a = I(shape=input_operand.shape,
data_type=sanitize_dtype_cntk(precision),
needs_gradient=True,
name='a')
expected_forward = AA([[result]])
backward = (1 / np.prod(pooling_window)) * np.ones_like(input_operand)
from cntk import pooling
input_op = pooling(a, AVG_POOLING, pooling_window, strides, auto_padding=[True])
forward_input = {a: input_operand}
expected_backward = {a: [[backward]]}
unittest_helper(input_op,
forward_input, expected_forward, expected_backward,
device_id=device_id, precision=precision)
MAX_POOLING_DATA = [
([1, 2, 2, 4 ,3], # input_size
(1, 2, 2, 2, 1), # pooling_window
(1, 2, 2, 2, 1), # strides
[[[[[ 40., 41., 42.],
[ 46., 47., 48.]]]]]), # result
([1, 2, 4, 4 ,4],
(1, 2, 2, 2, 2),
(1, 2, 2, 2, 2),
[[[[[ 86., 88.],
[ 94., 96.]],
[[ 118., 120.],
[ 126., 128.]]]]]),
]
@pytest.mark.parametrize("input_size, pooling_window, strides, result", MAX_POOLING_DATA)
def test_op_max_pooling(input_size, pooling_window, strides, result, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
# fill input operand with a sequence 1,2,3,... til total size and then resize to input_size
total_size = np.prod(input_size)
x = np.arange(1, total_size + 1, 1, dtype=dt)
input_operand = x.reshape(input_size)
a = I(shape=input_operand.shape,
data_type=sanitize_dtype_cntk(precision),
needs_gradient=True,
name='a')
result_array = np.asarray(result, dtype=dt)
max_elements = result_array.reshape(result_array.size).tolist()
# place 1.0s where maximum elements are
backward = np.zeros_like(input_operand)
for element in max_elements:
backward += np.asarray(input_operand == element)
expected_forward = AA([[result]])
from cntk import pooling
input_op = pooling(a, MAX_POOLING, pooling_window, strides)
forward_input = {a: input_operand}
expected_backward = {a: [[backward]]}
unittest_helper(input_op,
forward_input, expected_forward, expected_backward,
device_id=device_id, precision=precision)
| [
"numpy.prod",
"numpy.ones_like",
"scipy.signal.convolve",
"cntk.pooling",
"cntk.convolution",
"numpy.asarray",
"pytest.mark.parametrize",
"numpy.zeros_like",
"numpy.arange"
] | [((938, 1025), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""convolution_map, convolution_input"""', 'CONVOLUTION_OPERANDS'], {}), "('convolution_map, convolution_input',\n CONVOLUTION_OPERANDS)\n", (961, 1025), False, 'import pytest\n'), ((2480, 2572), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_size, pooling_window, strides, result"""', 'AVG_POOLING_DATA'], {}), "('input_size, pooling_window, strides, result',\n AVG_POOLING_DATA)\n", (2503, 2572), False, 'import pytest\n'), ((3906, 3998), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_size, pooling_window, strides, result"""', 'MAX_POOLING_DATA'], {}), "('input_size, pooling_window, strides, result',\n MAX_POOLING_DATA)\n", (3929, 3998), False, 'import pytest\n'), ((1807, 1857), 'cntk.convolution', 'convolution', (['constant_map', 'a'], {'auto_padding': '[False]'}), '(constant_map, a, auto_padding=[False])\n', (1818, 1857), False, 'from cntk import convolution\n'), ((2813, 2832), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (2820, 2832), True, 'import numpy as np\n'), ((2841, 2882), 'numpy.arange', 'np.arange', (['(1)', '(total_size + 1)', '(1)'], {'dtype': 'dt'}), '(1, total_size + 1, 1, dtype=dt)\n', (2850, 2882), True, 'import numpy as np\n'), ((3221, 3290), 'cntk.pooling', 'pooling', (['a', 'AVG_POOLING', 'pooling_window', 'strides'], {'auto_padding': '[True]'}), '(a, AVG_POOLING, pooling_window, strides, auto_padding=[True])\n', (3228, 3290), False, 'from cntk import pooling\n'), ((4239, 4258), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (4246, 4258), True, 'import numpy as np\n'), ((4267, 4308), 'numpy.arange', 'np.arange', (['(1)', '(total_size + 1)', '(1)'], {'dtype': 'dt'}), '(1, total_size + 1, 1, dtype=dt)\n', (4276, 4308), True, 'import numpy as np\n'), ((4506, 4534), 'numpy.asarray', 'np.asarray', (['result'], {'dtype': 'dt'}), '(result, dtype=dt)\n', (4516, 4534), True, 'import numpy as np\n'), ((4663, 4691), 'numpy.zeros_like', 'np.zeros_like', (['input_operand'], {}), '(input_operand)\n', (4676, 4691), True, 'import numpy as np\n'), ((4867, 4915), 'cntk.pooling', 'pooling', (['a', 'MAX_POOLING', 'pooling_window', 'strides'], {}), '(a, MAX_POOLING, pooling_window, strides)\n', (4874, 4915), False, 'from cntk import pooling\n'), ((3147, 3174), 'numpy.ones_like', 'np.ones_like', (['input_operand'], {}), '(input_operand)\n', (3159, 3174), True, 'import numpy as np\n'), ((4745, 4781), 'numpy.asarray', 'np.asarray', (['(input_operand == element)'], {}), '(input_operand == element)\n', (4755, 4781), True, 'import numpy as np\n'), ((3120, 3143), 'numpy.prod', 'np.prod', (['pooling_window'], {}), '(pooling_window)\n', (3127, 3143), True, 'import numpy as np\n'), ((1485, 1544), 'scipy.signal.convolve', 'signal.convolve', (['flipped_conv_map', 'conv_input'], {'mode': '"""valid"""'}), "(flipped_conv_map, conv_input, mode='valid')\n", (1500, 1544), False, 'from scipy import signal\n')] |
# -*- coding: utf-8 -*-
"""Padding transformer, pad unequal length panel to max length or fixed length."""
import numpy as np
import pandas as pd
from sktime.transformations.base import BaseTransformer
__all__ = ["PaddingTransformer"]
__author__ = ["abostrom"]
class PaddingTransformer(BaseTransformer):
"""Padding panel of unequal length time series to equal, fixed length.
Pads the input dataset to either a optional fixed length
(longer than the longest series).
Or finds the max length series across all series and dimensions and
pads to that with zeroes.
Parameters
----------
pad_length : int, optional (default=None) length to pad the series too.
if None, will find the longest sequence and use instead.
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": False, # is this an instance-wise transform?
"X_inner_mtype": "nested_univ", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for X?
"fit_is_empty": False,
}
def __init__(self, pad_length=None, fill_value=0):
self.pad_length = pad_length
self.fill_value = fill_value
super(PaddingTransformer, self).__init__()
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, n_features]
each cell of X must contain pandas.Series
Data to fit transform to
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
self : reference to self
"""
if self.pad_length is None:
n_instances, _ = X.shape
arr = [X.iloc[i, :].values for i in range(n_instances)]
self.pad_length_ = _get_max_length(arr)
else:
self.pad_length_ = self.pad_length
return self
def _create_pad(self, series):
out = np.full(self.pad_length_, self.fill_value, float)
out[: len(series)] = series.iloc[: len(series)]
return out
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing core logic, called from transform
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, n_features]
each cell of X must contain pandas.Series
Data to fit transform to
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : nested pandas DataFrame of shape [n_instances, n_features]
each cell of Xt contains pandas.Series
transformed version of X
"""
n_instances, _ = X.shape
arr = [X.iloc[i, :].values for i in range(n_instances)]
max_length = _get_max_length(arr)
if max_length > self.pad_length_:
raise ValueError(
"Error: max_length of series \
is greater than the one found when fit or set."
)
pad = [pd.Series([self._create_pad(series) for series in out]) for out in arr]
Xt = pd.DataFrame(pad).applymap(pd.Series)
return Xt
def _get_max_length(X):
def get_length(input):
return max(map(lambda series: len(series), input))
return max(map(get_length, X))
| [
"pandas.DataFrame",
"numpy.full"
] | [((2318, 2367), 'numpy.full', 'np.full', (['self.pad_length_', 'self.fill_value', 'float'], {}), '(self.pad_length_, self.fill_value, float)\n', (2325, 2367), True, 'import numpy as np\n'), ((3579, 3596), 'pandas.DataFrame', 'pd.DataFrame', (['pad'], {}), '(pad)\n', (3591, 3596), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
extensions = [
Extension("ace", ["MCNPtools/ace.pyx"],
include_dirs=[numpy.get_include()])
]
setup(name='MCNPtools',
version='0.1',
description='Python scripts that are useful analyzing MCNP results and creating some ww for inputs in new runs.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/sellitforcache/MCNPtools',
packages=['MCNPtools'], # include all packages under src
cmdclass = {'build_ext': build_ext},
include_dirs = [numpy.get_include()],
ext_modules = extensions,
scripts=['MCNPtools/convert2singlefile.py','MCNPtools/'],
license="BSD3",
)
| [
"numpy.get_include"
] | [((660, 679), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (677, 679), False, 'import numpy\n'), ((232, 251), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (249, 251), False, 'import numpy\n')] |
"""Load ASL BIDS filter class"""
import os
import json
import numpy as np
import nibabel as nib
from asldro.filters.basefilter import BaseFilter, FilterInputValidationError
from asldro.containers.image import NiftiImageContainer
from asldro.validators.parameters import (
Parameter,
ParameterValidator,
isinstance_validator,
)
from asldro.validators.user_parameter_input import SUPPORTED_ASL_CONTEXTS
class LoadAslBidsFilter(BaseFilter):
"""
A filter that loads in ASL data in BIDS format, comprising of a NIFTI image file, json
sidear and tsv aslcontext file. After loading in the data, image containers are created using
the volumes described in aslcontext. For each of these containers, the data in sidecar is added
to the metadata object. In addition a metadata 'asl_context' is created which is a list of the
corresponding volumes contained in each container. Any metadata entries that are an array and
specific to each volume have only the corresponding values copied.
**Inputs**
Input Parameters are all keyword arguments for the :class:`LoadAslBidsFilter.add_inputs()`
member function. They are also accessible via class constants, for example
:class:`LoadAslBidsFilter.KEY_SIDECAR`
:param 'image_filename': path and filename to the ASL NIFTI image (must end in .nii or.nii.gz)
:type 'image_filename': str
:param 'sidecar_filename': path and filename to the json sidecar (must end in .json)
:type 'image_filename': str
:param 'aslcontext_filename': path and filename to the aslcontext file (must end in .tsv). This
must be a tab separated values file, with heading 'volume_type' and then entries which are
either 'control', 'label', or 'm0scan'.
:type 'aslcontext_filename': str
**Outputs**
Once run, the filter will populate the dictionary :class:`LoadAslBidsFilter.outputs` with the
following entries
:param 'source': the full ASL NIFTI image
:type 'source': BaseImageContainer
:param 'control': control volumes (as defined by aslcontext)
:type 'control': BaseImageContainer
:param 'label': label volumes (as defined by aslcontext)
:type 'label': BaseImageContainer
:param 'm0': m0 volumes (as defined by aslcontext)
:type 'm0': BaseImageContainer
"""
KEY_IMAGE_FILENAME = "image_filename"
KEY_SIDECAR_FILENAME = "sidecar_filename"
KEY_ASLCONTEXT_FILENAME = "aslcontext_filename"
KEY_SOURCE = "source"
KEY_CONTROL = "control"
KEY_LABEL = "label"
KEY_M0 = "m0"
KEY_SIDECAR = "sidecar"
ASL_CONTEXT_MAPPING = {
KEY_CONTROL: "control",
KEY_LABEL: "label",
KEY_M0: "m0scan",
}
LIST_FIELDS_TO_EXCLUDE = [
"ScanningSequence",
"ComplexImageComponent",
"ImageType",
"AcquisitionVoxelSize",
]
def __init__(self):
super().__init__(name="Load ASL BIDS")
def _run(self):
"""
Loads in the NIFTI image, json sidecar and tsv aslcontext, then creates image containers
according to the content of the aslcontext.
"""
# load in the NIFTI image
image = nib.load(self.inputs[self.KEY_IMAGE_FILENAME])
# load in the sidecar
with open(self.inputs[self.KEY_SIDECAR_FILENAME], "r") as json_file:
sidecar = json.load(json_file)
json_file.close()
# load in the aslcontext tsv
with open(self.inputs[self.KEY_ASLCONTEXT_FILENAME], "r") as tsv_file:
loaded_tsv = tsv_file.readlines()
tsv_file.close()
# get the ASL context array
asl_context = [s.strip() for s in loaded_tsv][1:]
# create the output source image
self.outputs[self.KEY_SOURCE] = NiftiImageContainer(
image, metadata=sidecar.copy()
)
self.outputs[self.KEY_SOURCE].metadata["asl_context"] = asl_context
self.outputs[self.KEY_SIDECAR] = sidecar
# iterate over 'control', 'label' and 'm0'. Determine which volumes correspond using
# asl_context, then place the volumes into new cloned image containers and update
# the metadata entry 'asl_context'
for key in [self.KEY_CONTROL, self.KEY_LABEL, self.KEY_M0]:
volume_indices = [
i
for (i, val) in enumerate(asl_context)
if val == self.ASL_CONTEXT_MAPPING[key]
]
if volume_indices is not None:
self.outputs[key] = self.outputs[self.KEY_SOURCE].clone()
self.outputs[key].image = np.squeeze(
self.outputs[self.KEY_SOURCE].image[:, :, :, volume_indices]
)
self.outputs[key].metadata["asl_context"] = [
asl_context[i] for i in volume_indices
]
# adjust any lists in the metdata that correspond to a value per volume
for metadata_key in self.outputs[key].metadata.keys():
if metadata_key not in self.LIST_FIELDS_TO_EXCLUDE:
if isinstance(self.outputs[key].metadata[metadata_key], list):
if len(self.outputs[key].metadata[metadata_key]) == len(
asl_context
):
self.outputs[key].metadata[metadata_key] = [
self.outputs[key].metadata[metadata_key][i]
for i in volume_indices
]
def _validate_inputs(self):
"""Checks that inputs meet their validation criteria
'image_filename' must be a str, .nii or .nii.gz and exist on the file system
'sidecar_filename' must be a str, end with .json and exist on the file system
'aslcontext_filename' must be a str, end with .tsv, exist on the file system
and container 'volume_type' followed by a list comprised of 'control', 'label'
or 'm0scan'
"""
input_validator = ParameterValidator(
parameters={
self.KEY_IMAGE_FILENAME: Parameter(
validators=[
isinstance_validator(str),
]
),
self.KEY_SIDECAR_FILENAME: Parameter(
validators=[
isinstance_validator(str),
]
),
self.KEY_ASLCONTEXT_FILENAME: Parameter(
validators=[
isinstance_validator(str),
]
),
}
)
input_validator.validate(self.inputs, error_type=FilterInputValidationError)
# Additional validation
# 'image_filename' should end with .nii or .nii.gz
if not self.inputs[self.KEY_IMAGE_FILENAME].endswith((".nii", ".nii.gz")):
raise FilterInputValidationError(
"LoadAslBidsFilter input 'image_filename' must be a .nii or .nii.gz file"
)
# 'sidecar_filename' should be a .json
if not self.inputs[self.KEY_SIDECAR_FILENAME].endswith((".json")):
raise FilterInputValidationError(
"LoadAslBidsFilter input 'sidecar_filename' must be a .json"
)
# 'aslcontex_filename' should be a .tsv
if not self.inputs[self.KEY_ASLCONTEXT_FILENAME].endswith((".tsv")):
raise FilterInputValidationError(
"LoadAslBidsFilter input 'aslcontex_filename' must be a .tsv"
)
# check the files actually exist
for key in self.inputs.keys():
# the file should exist
if not os.path.exists(self.inputs[key]):
raise FilterInputValidationError(f"Input {key} does not exist")
# check that the contents of the aslcontext file are valid
with open(self.inputs[self.KEY_ASLCONTEXT_FILENAME], "r") as tsv_file:
loaded_tsv = tsv_file.readlines()
tsv_file.close()
asl_context = [s.strip() for s in loaded_tsv]
# the first entry should be 'volume_type'
if not asl_context[0] == "volume_type":
raise FilterInputValidationError(
f"{self.inputs[self.KEY_ASLCONTEXT_FILENAME]} does not"
"start with the string 'volume_type'"
)
if not all(volume in SUPPORTED_ASL_CONTEXTS for volume in asl_context[1:]):
raise FilterInputValidationError(
f"{self.inputs[self.KEY_ASLCONTEXT_FILENAME]} does not"
"contain valid asl context strings"
)
# length of asl_context should be the same as total number of volumes in image
image = nib.load(self.inputs[self.KEY_IMAGE_FILENAME])
if not len(asl_context[1:]) == image.dataobj.shape[3]:
raise FilterInputValidationError(
"The number of aslcontext entries must be equal to the number of"
"volumes in the input image"
)
| [
"os.path.exists",
"nibabel.load",
"asldro.validators.parameters.isinstance_validator",
"numpy.squeeze",
"asldro.filters.basefilter.FilterInputValidationError",
"json.load"
] | [((3176, 3222), 'nibabel.load', 'nib.load', (['self.inputs[self.KEY_IMAGE_FILENAME]'], {}), '(self.inputs[self.KEY_IMAGE_FILENAME])\n', (3184, 3222), True, 'import nibabel as nib\n'), ((8798, 8844), 'nibabel.load', 'nib.load', (['self.inputs[self.KEY_IMAGE_FILENAME]'], {}), '(self.inputs[self.KEY_IMAGE_FILENAME])\n', (8806, 8844), True, 'import nibabel as nib\n'), ((3352, 3372), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3361, 3372), False, 'import json\n'), ((6958, 7064), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['"""LoadAslBidsFilter input \'image_filename\' must be a .nii or .nii.gz file"""'], {}), '(\n "LoadAslBidsFilter input \'image_filename\' must be a .nii or .nii.gz file")\n', (6984, 7064), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((7231, 7324), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['"""LoadAslBidsFilter input \'sidecar_filename\' must be a .json"""'], {}), '(\n "LoadAslBidsFilter input \'sidecar_filename\' must be a .json")\n', (7257, 7324), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((7494, 7588), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['"""LoadAslBidsFilter input \'aslcontex_filename\' must be a .tsv"""'], {}), '(\n "LoadAslBidsFilter input \'aslcontex_filename\' must be a .tsv")\n', (7520, 7588), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((8257, 8385), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['f"""{self.inputs[self.KEY_ASLCONTEXT_FILENAME]} does notstart with the string \'volume_type\'"""'], {}), '(\n f"{self.inputs[self.KEY_ASLCONTEXT_FILENAME]} does notstart with the string \'volume_type\'"\n )\n', (8283, 8385), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((8528, 8654), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['f"""{self.inputs[self.KEY_ASLCONTEXT_FILENAME]} does notcontain valid asl context strings"""'], {}), "(\n f'{self.inputs[self.KEY_ASLCONTEXT_FILENAME]} does notcontain valid asl context strings'\n )\n", (8554, 8654), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((8926, 9055), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['"""The number of aslcontext entries must be equal to the number ofvolumes in the input image"""'], {}), "(\n 'The number of aslcontext entries must be equal to the number ofvolumes in the input image'\n )\n", (8952, 9055), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((4598, 4670), 'numpy.squeeze', 'np.squeeze', (['self.outputs[self.KEY_SOURCE].image[:, :, :, volume_indices]'], {}), '(self.outputs[self.KEY_SOURCE].image[:, :, :, volume_indices])\n', (4608, 4670), True, 'import numpy as np\n'), ((7750, 7782), 'os.path.exists', 'os.path.exists', (['self.inputs[key]'], {}), '(self.inputs[key])\n', (7764, 7782), False, 'import os\n'), ((7806, 7863), 'asldro.filters.basefilter.FilterInputValidationError', 'FilterInputValidationError', (['f"""Input {key} does not exist"""'], {}), "(f'Input {key} does not exist')\n", (7832, 7863), False, 'from asldro.filters.basefilter import BaseFilter, FilterInputValidationError\n'), ((6226, 6251), 'asldro.validators.parameters.isinstance_validator', 'isinstance_validator', (['str'], {}), '(str)\n', (6246, 6251), False, 'from asldro.validators.parameters import Parameter, ParameterValidator, isinstance_validator\n'), ((6405, 6430), 'asldro.validators.parameters.isinstance_validator', 'isinstance_validator', (['str'], {}), '(str)\n', (6425, 6430), False, 'from asldro.validators.parameters import Parameter, ParameterValidator, isinstance_validator\n'), ((6587, 6612), 'asldro.validators.parameters.isinstance_validator', 'isinstance_validator', (['str'], {}), '(str)\n', (6607, 6612), False, 'from asldro.validators.parameters import Parameter, ParameterValidator, isinstance_validator\n')] |
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
SMOOTH = 1e-6
def iou_numpy(outputs: np.array, labels: np.array):
# outputs = outputs.squeeze(2)
intersection = (outputs & labels).sum((0, 1))
union = (outputs | labels).sum((0, 1))
iou = (intersection + SMOOTH) / (union + SMOOTH)
thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
return thresholded.mean() # Or thresholded.mean()
# 1.读取图像
img = cv.imread('/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train/0030fd0e6378/0030fd0e6378.png',0)
# 2. 阈值分割
thresholdValue=135
ret, th1 = cv.threshold(img, thresholdValue, 255, cv.THRESH_BINARY)
ret,th2 = cv.threshold(img,200,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
ret, th4 = cv.threshold(img, thresholdValue, 255, cv.THRESH_TOZERO)
# 3. 图像显示
titles = ['original', 'th1', 'th2', 'th3', 'th4', 'th5']
images = [img, th1,th2, th4]
plt.figure(figsize=(10,6))
# 使用Matplotlib显示
for i in range(4):
plt.subplot(2, 3, i + 1)
plt.imshow(images[i], 'gray')
plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
plt.show() | [
"matplotlib.pyplot.imshow",
"numpy.clip",
"matplotlib.pyplot.xticks",
"cv2.threshold",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((470, 600), 'cv2.imread', 'cv.imread', (['"""/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train/0030fd0e6378/0030fd0e6378.png"""', '(0)'], {}), "(\n '/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train/0030fd0e6378/0030fd0e6378.png'\n , 0)\n", (479, 600), True, 'import cv2 as cv\n'), ((630, 686), 'cv2.threshold', 'cv.threshold', (['img', 'thresholdValue', '(255)', 'cv.THRESH_BINARY'], {}), '(img, thresholdValue, 255, cv.THRESH_BINARY)\n', (642, 686), True, 'import cv2 as cv\n'), ((697, 759), 'cv2.threshold', 'cv.threshold', (['img', '(200)', '(255)', '(cv.THRESH_BINARY + cv.THRESH_OTSU)'], {}), '(img, 200, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n', (709, 759), True, 'import cv2 as cv\n'), ((766, 822), 'cv2.threshold', 'cv.threshold', (['img', 'thresholdValue', '(255)', 'cv.THRESH_TOZERO'], {}), '(img, thresholdValue, 255, cv.THRESH_TOZERO)\n', (778, 822), True, 'import cv2 as cv\n'), ((921, 948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (931, 948), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1099, 1101), True, 'import matplotlib.pyplot as plt\n'), ((988, 1012), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(i + 1)'], {}), '(2, 3, i + 1)\n', (999, 1012), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1046), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[i]', '"""gray"""'], {}), "(images[i], 'gray')\n", (1027, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1065), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1061, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1081), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1077, 1081), True, 'import matplotlib.pyplot as plt\n'), ((356, 388), 'numpy.clip', 'np.clip', (['(20 * (iou - 0.5))', '(0)', '(10)'], {}), '(20 * (iou - 0.5), 0, 10)\n', (363, 388), True, 'import numpy as np\n')] |
import numpy as np
import random
from boxenv import *
from agent import *
NB_SKILLS = 6
COND = 'OUR'
STATE_DIM = 2
DIM = STATE_DIM
policy_function = GaussianPolicyFunction(STATE_DIM + NB_SKILLS, 2)
policy = GaussianPolicy()
d = SkillDiscriminator(DIM, NB_SKILLS)
# initial training task list
# TASKS = [(0.5, 0.8), (-0.5, 0.8)]
TASKS = [(1., 0.8), (0.33, 0.8), (-0.33, 0.8), (-1, 0.8)]
# create a stationary test task list
rng = np.random.default_rng(1)
TEST_TASKS = rng.random((10,2))
TEST_TASKS = TEST_TASKS * 2 - 1
TEST_TASKS[:,1] = 0.8
print('Testing zero-shot generalization on tasks ')
print(TEST_TASKS)
def compute_rewards(s1, s2, g):
"""
input: s1 - state before action
s2 - state after action
rewards based on proximity to each goal
"""
dist1 = np.linalg.norm(s1 - g)
dist2 = np.linalg.norm(s2 - g)
r = dist1 - dist2
return r
rewards = []
for SEED in [123, 456, 789]:
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
box = BoxWorld()
NB_TASKS = TEST_TASKS.shape[0]
policy_function.load_state_dict(torch.load(
'models/{}/{}/policy_seed{}.pth'.format(COND, len(TASKS), SEED)))
d.load_state_dict(torch.load(
'models/{}/{}/variational_seed{}.pth'.format(COND, len(TASKS), SEED)))
_, d_skills = d(torch.Tensor(TEST_TASKS))
d_skills = d_skills.detach().exp()
rewards.append([])
for gid in range(NB_TASKS):
rewards[-1].append([])
for i in range(10):
# sample a skill
w = np.random.choice(range(NB_SKILLS), p=d_skills[gid].numpy())
w_onehot = np.zeros(NB_SKILLS)
w_onehot[w] = 1
s = box.reset()
done = False
states = []
rewards[-1][gid].append(0)
while not done:
states.append(s)
s = torch.Tensor(np.concatenate((s, w_onehot)))
# get action and logprobs
mu, sigma = policy_function(s)
unscaled_action, logprob, entropy = policy.forward(mu, sigma)
# scale action to environment limits
a = box.scale_action(unscaled_action.detach().numpy())
# step the environment
s, _, done = box.step(a)
r = compute_rewards(states[-1], s, TEST_TASKS[gid])
rewards[-1][gid][-1] += r
rewards = np.stack(rewards)
print(rewards.mean(-1).mean(-1))
print(rewards.mean())
print(np.std(rewards.mean(-1).mean(-1)))
| [
"numpy.random.default_rng",
"random.seed",
"numpy.stack",
"numpy.zeros",
"numpy.random.seed",
"numpy.concatenate",
"numpy.linalg.norm"
] | [((434, 458), 'numpy.random.default_rng', 'np.random.default_rng', (['(1)'], {}), '(1)\n', (455, 458), True, 'import numpy as np\n'), ((2404, 2421), 'numpy.stack', 'np.stack', (['rewards'], {}), '(rewards)\n', (2412, 2421), True, 'import numpy as np\n'), ((792, 814), 'numpy.linalg.norm', 'np.linalg.norm', (['(s1 - g)'], {}), '(s1 - g)\n', (806, 814), True, 'import numpy as np\n'), ((827, 849), 'numpy.linalg.norm', 'np.linalg.norm', (['(s2 - g)'], {}), '(s2 - g)\n', (841, 849), True, 'import numpy as np\n'), ((933, 953), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (947, 953), True, 'import numpy as np\n'), ((958, 975), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (969, 975), False, 'import random\n'), ((1624, 1643), 'numpy.zeros', 'np.zeros', (['NB_SKILLS'], {}), '(NB_SKILLS)\n', (1632, 1643), True, 'import numpy as np\n'), ((1882, 1911), 'numpy.concatenate', 'np.concatenate', (['(s, w_onehot)'], {}), '((s, w_onehot))\n', (1896, 1911), True, 'import numpy as np\n')] |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from execution.graph_executer import GraphExecuter
from execution.quantization_mode import QuantizationMode
from graph.manipulations import add_dimensions, calculate_liveness
from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group
from graph.types import Parameters, Transposable
from importer.tflite.new_tflite_graph_all import TfliteImporter
from reports.graph_reporter import GraphReporter
from utils.node_id import NodeId
from utils.tabular import TextTableRenderer
def verify_steps(steps, cnt):
assert len(steps) == cnt
assert all(isinstance(step['node'], Parameters) for step in steps)
def test_load1(mnist_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mnist_graph, {})
assert G
def test_load2(ir_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ir_graph, {})
assert G
def test_load3(ssd_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ssd_graph, {})
assert G
def test_load4(cifar10_graph):
tfi = TfliteImporter()
G = tfi.create_graph(cifar10_graph, {})
assert G
def test_load5(kws_graph):
tfi = TfliteImporter()
G = tfi.create_graph(kws_graph, {})
assert G
def test_load6(vww_graph):
tfi = TfliteImporter()
G = tfi.create_graph(vww_graph, {})
assert G
def test_load7(qvww_graph):
tfi = TfliteImporter()
G = tfi.create_graph(qvww_graph, {'load_tensors': True, 'load_quantization': True})
for node in G.nodes():
assert NodeId(node) in G.quantization, "node %s doesn't have a qrec" % (node.name)
assert G
def test_load8(mn2_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mn2_graph, {'load_tensors': True, 'load_quantization': True})
for node in G.nodes():
assert NodeId(node) in G.quantization, "node %s doesn't have a qrec" % (node.name)
assert G
def test_load9(mn1q_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mn1q_graph, {'load_tensors': True, 'load_quantization': True})
assert G
def test_load10():
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/xor.tflite", {'load_tensors': True})
steps = add_dimensions(G)
verify_steps(steps, 6)
assert G
def test_load11():
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/ring.tflite", {'load_tensors': True})
steps = add_dimensions(G)
verify_steps(steps, 11)
assert G
def test_load12():
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/imu.tflite", {'load_tensors': True})
steps = add_dimensions(G)
verify_steps(steps, 8)
assert G
def test_add_dimension1(mnist_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mnist_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 10)
def test_add_dimension2(ir_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ir_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 31)
def test_add_dimension3(ssd_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ssd_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 40)
def test_add_dimension4(cifar10_graph):
tfi = TfliteImporter()
G = tfi.create_graph(cifar10_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 16)
def test_add_dimension5(kws_graph):
tfi = TfliteImporter()
G = tfi.create_graph(kws_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 9)
def test_add_dimension6(vww_graph):
tfi = TfliteImporter()
G = tfi.create_graph(vww_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 122)
def test_add_dimension7(qvww_graph):
tfi = TfliteImporter()
G = tfi.create_graph(qvww_graph, {})
steps = add_dimensions(G)
verify_steps(steps, 159)
def test_add_dimension8(mn3_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mn3_graph, {})
assert len(list(G.dfs())) == 160
steps = add_dimensions(G)
verify_steps(steps, 160)
def test_liveness1(mnist_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mnist_graph, {})
steps = add_dimensions(G)
liveness = calculate_liveness(G, steps)
assert len(liveness) == 9 # no record for 1 output
def test_liveness2(ir_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ir_graph, {})
steps = add_dimensions(G)
liveness = calculate_liveness(G, steps)
assert len(liveness) == 23 # no record for 8 outputs
def test_liveness3(ssd_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ssd_graph, {})
assert G
steps = add_dimensions(G)
liveness = calculate_liveness(G, steps)
assert len(liveness) == 39 # no record for 1 output
def test_liveness4(cifar10_graph):
tfi = TfliteImporter()
G = tfi.create_graph(cifar10_graph, {})
assert G
steps = add_dimensions(G)
liveness = calculate_liveness(G, steps)
assert len(liveness) == 15 # no record for 1 output
def test_liveness5(kws_graph):
tfi = TfliteImporter()
G = tfi.create_graph(kws_graph, {})
assert G
steps = add_dimensions(G)
liveness = calculate_liveness(G, steps)
assert len(liveness) == 8 # no record for 1 output
def test_liveness6(vww_graph):
tfi = TfliteImporter()
G = tfi.create_graph(vww_graph, {})
assert G
steps = add_dimensions(G)
liveness = calculate_liveness(G, steps)
assert len(liveness) == 121 # no record for 1 output
def test_adjust1(mnist_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mnist_graph, {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
assert all([not (node.transpose_in or node.transpose_out)
for node in G.nodes() if isinstance(node, Transposable)]), "shouldn't have transposes"
def test_adjust2(ir_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ir_graph, {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
assert all([not (node.transpose_in or node.transpose_out)
for node in G.nodes() if isinstance(node, Transposable)]), "shouldn't have transposes"
def test_adjust3(ssd_graph):
tfi = TfliteImporter()
G = tfi.create_graph(ssd_graph, {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
def test_adjust4(cifar10_graph):
tfi = TfliteImporter()
G = tfi.create_graph(cifar10_graph, {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
assert all([not (node.transpose_in or node.transpose_out)
for node in G.nodes() if isinstance(node, Transposable)]), "shouldn't have transposes"
def test_adjust5(kws_graph):
tfi = TfliteImporter()
G = tfi.create_graph(kws_graph, {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
assert all([not (node.transpose_in or node.transpose_out)
for node in G.nodes() if isinstance(node, Transposable)]), "shouldn't have transposes"
def test_adjust6():
tfi = TfliteImporter()
try:
G = tfi.create_graph("tests/graph/character_recogniction_cnn_ocr.tflite",
{'load_tensors': True})
# This graph has an insance concat which multiplies the output of a linear
# layer. It will never be supported.
G.add_dimensions()
error = False
G.adjust_order()
except NotImplementedError:
error = True
assert error
def test_adjust_new():
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/ocr_cnn_notile_fquant.tflite",
{'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
def test_adjust_new2():
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/ssdlite_v2_quant_ocr_nopostprocess.tflite",
{'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G['output_1'].fixed_order = True
G['output_2'].fixed_order = True
G.adjust_order()
def test_adjust7(concat_test_graph):
tfi = TfliteImporter()
G = tfi.create_graph(concat_test_graph, {'load_tensors': True})
G.node('input_1').fixed_order = True
G.node('output_1').fixed_order = True
G.node('output_2').fixed_order = True
G.add_dimensions()
G.adjust_order()
matcher = get_pow2_match_group()
matcher.match(G)
G.add_dimensions()
report = GraphReporter().report(G, None)
renderer = TextTableRenderer(maxwidth=200)
print(report.render(renderer))
report = GraphReporter(split_dims=True).report(G, None)
def test_adjust8(qvww_graph):
tfi = TfliteImporter()
G = tfi.create_graph(qvww_graph, {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
matcher = get_fusion("fuse_external_bias")
matcher.match(G)
G.add_dimensions()
def test_adjust9(mn3q_graph, caplog):
caplog.set_level(logging.INFO)
tfi = TfliteImporter()
G = tfi.create_graph(mn3q_graph, {'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
matcher = get_scale8_match_group()
matcher.match(G)
G.add_dimensions()
def test_adjust10(caplog):
caplog.set_level(logging.INFO)
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/ssdlite_v2_quant_ocr_nopostprocess.tflite",
{'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
matcher = get_scale8_match_group()
matcher.match(G)
G.add_dimensions()
def test_adjust11():
tfi = TfliteImporter()
G = tfi.create_graph("tests/graph/imu.tflite", {'load_tensors': True})
G.add_dimensions()
G.adjust_order()
assert all([not (node.transpose_in or node.transpose_out)
for node in G.nodes() if isinstance(node, Transposable)]), "shouldn't have transposes"
def test_validate_mn1_float(mn1f_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mn1f_graph, {'load_tensors': True})
G.add_dimensions()
matcher = get_pow2_match_group()
matcher.match(G)
G.add_dimensions()
input_tensor = np.load('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')
input_tensor = input_tensor.reshape((224, 224, 3))
executer = GraphExecuter(G, qrecs=G.quantization)
routput_tensors = executer.execute([input_tensor])
output_tensor = np.load('tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy')
assert np.max(np.abs(routput_tensors[-1][0] - output_tensor[0])) < 0.0001
def test_min(mn1q_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mn1q_graph, {'load_tensors': True, 'load_quantization': True})
def test_validate_mn1_quantized1(mn1q_graph, mn1f_graph):
tfi = TfliteImporter()
Gf = tfi.create_graph(mn1f_graph, {'load_tensors': True})
Gf.add_dimensions()
Gf.adjust_order()
matcher = get_pow2_match_group()
matcher.match(Gf)
Gf.add_dimensions()
tfi = TfliteImporter()
G = tfi.create_graph(mn1q_graph, {'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
matcher = get_pow2_match_group()
matcher.match(G)
G.add_dimensions()
fpnode = Gf.graph_state.steps[2]['node']
fpcnode = fpnode.contained_filters()[0]
qpnode = G.graph_state.steps[2]['node']
qpcnode = qpnode.contained_filters()[0]
nid = NodeId(qpnode, qpcnode)
qrec = G.quantization[nid]
dqbiases = qrec.biases_q.get_dequantized(qpcnode.biases)
assert np.max(np.abs(fpcnode.biases - dqbiases)) < 0.1
input_tensor = np.load('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')
input_tensor = input_tensor.reshape((224, 224, 3)).transpose((2, 0, 1))
executer = GraphExecuter(Gf)
foutput_tensors = executer.execute([input_tensor])
foutput_tensor = np.load('tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy')
assert np.max(np.abs(foutput_tensors[-1][0] - foutput_tensor[0])) < 0.0001
executer = GraphExecuter(G, qrecs=G.quantization)
qfroutput_tensors = executer.execute([input_tensor], qmode=QuantizationMode.none())
assert np.max(np.abs(qfroutput_tensors[-1][0] - foutput_tensor[0])) < 0.2
executer = GraphExecuter(G, qrecs=G.quantization)
qroutput_tensors = executer.execute([input_tensor], qmode=QuantizationMode.all_dequantize())
output_tensor = np.load('tests/mobv1_valid/output_COCO_val2014_000000362331_0_quant.npy')
# assert np.max(np.abs(qroutput_tensors[-1][0] - output_tensor[0])) < 0.16
assert np.max(np.abs(qroutput_tensors[-1][0] - output_tensor[0])) < 0.28
def test_validate_mn1_quantized2(mn1q_graph):
tfi = TfliteImporter()
G = tfi.create_graph(mn1q_graph, {'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
matcher = get_pow2_match_group()
matcher.match(G)
G.add_dimensions()
def test_validate_mn1_dequant_quantfloat(mn1q_graph):
# load dequantized graph same results as quant graph and float execution
tfi = TfliteImporter()
G = tfi.create_graph(mn1q_graph, {'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
matcher = get_pow2_match_group()
matcher.match(G)
G.add_dimensions()
Gdq = tfi.create_graph(mn1q_graph, {'load_tensors': True, 'load_dequantized': True})
Gdq.add_dimensions()
Gdq.adjust_order()
matcher = get_pow2_match_group()
matcher.match(Gdq)
Gdq.add_dimensions()
input_tensor = np.load('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')
input_tensor = input_tensor.reshape((224, 224, 3)).transpose((2, 0, 1))
executer = GraphExecuter(G, qrecs=G.quantization)
qfoutput_tensors = executer.execute([input_tensor], qmode=QuantizationMode.none())
executer = GraphExecuter(Gdq)
dfoutput_tensors = executer.execute([input_tensor])
diff_list = [np.abs(df[0] - qf[0]) for df, qf in zip(dfoutput_tensors, qfoutput_tensors)]
max_diff = [np.max(elem) for elem in diff_list]
assert max(max_diff) < 0.003
def test_mobv2_quant_asym_tf1_15_vwwvehicle():
graph = 'tests/mobv2_valid/mobv2_vwwvehicle_quant_asym.tflite'
tfi = TfliteImporter()
G = tfi.create_graph(graph, {'load_tensors': True, 'load_quantization': True})
G.add_dimensions()
G.adjust_order()
matcher = get_scale8_match_group()
matcher.match(G)
G.add_dimensions()
| [
"graph.matches.matches.get_pow2_match_group",
"importer.tflite.new_tflite_graph_all.TfliteImporter",
"execution.graph_executer.GraphExecuter",
"numpy.abs",
"execution.quantization_mode.QuantizationMode.all_dequantize",
"graph.matches.matches.get_fusion",
"graph.matches.matches.get_scale8_match_group",
... | [((1421, 1437), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (1435, 1437), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((1531, 1547), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (1545, 1547), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((1639, 1655), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (1653, 1655), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((1752, 1768), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (1766, 1768), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((1865, 1881), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (1879, 1881), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((1974, 1990), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (1988, 1990), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((2084, 2100), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (2098, 2100), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((2359, 2375), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (2373, 2375), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((2634, 2650), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (2648, 2650), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((2783, 2799), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (2797, 2799), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((2887, 2904), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (2901, 2904), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((2976, 2992), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (2990, 2992), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((3081, 3098), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (3095, 3098), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((3171, 3187), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (3185, 3187), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((3275, 3292), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (3289, 3292), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((3383, 3399), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (3397, 3399), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((3454, 3471), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (3468, 3471), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((3547, 3563), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (3561, 3563), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((3615, 3632), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (3629, 3632), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((3709, 3725), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (3723, 3725), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((3778, 3795), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (3792, 3795), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((3876, 3892), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (3890, 3892), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((3949, 3966), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (3963, 3966), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4043, 4059), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (4057, 4059), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((4112, 4129), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (4126, 4129), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4205, 4221), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (4219, 4221), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((4274, 4291), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (4288, 4291), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4370, 4386), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (4384, 4386), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((4440, 4457), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (4454, 4457), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4535, 4551), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (4549, 4551), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((4641, 4658), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (4655, 4658), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4733, 4749), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (4747, 4749), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((4804, 4821), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (4818, 4821), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4837, 4865), 'graph.manipulations.calculate_liveness', 'calculate_liveness', (['G', 'steps'], {}), '(G, steps)\n', (4855, 4865), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((4964, 4980), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (4978, 4980), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((5032, 5049), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (5046, 5049), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5065, 5093), 'graph.manipulations.calculate_liveness', 'calculate_liveness', (['G', 'steps'], {}), '(G, steps)\n', (5083, 5093), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5195, 5211), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (5209, 5211), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((5277, 5294), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (5291, 5294), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5310, 5338), 'graph.manipulations.calculate_liveness', 'calculate_liveness', (['G', 'steps'], {}), '(G, steps)\n', (5328, 5338), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5443, 5459), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (5457, 5459), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((5529, 5546), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (5543, 5546), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5562, 5590), 'graph.manipulations.calculate_liveness', 'calculate_liveness', (['G', 'steps'], {}), '(G, steps)\n', (5580, 5590), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5691, 5707), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (5705, 5707), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((5773, 5790), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (5787, 5790), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5806, 5834), 'graph.manipulations.calculate_liveness', 'calculate_liveness', (['G', 'steps'], {}), '(G, steps)\n', (5824, 5834), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((5934, 5950), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (5948, 5950), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((6016, 6033), 'graph.manipulations.add_dimensions', 'add_dimensions', (['G'], {}), '(G)\n', (6030, 6033), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((6049, 6077), 'graph.manipulations.calculate_liveness', 'calculate_liveness', (['G', 'steps'], {}), '(G, steps)\n', (6067, 6077), False, 'from graph.manipulations import add_dimensions, calculate_liveness\n'), ((6179, 6195), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (6193, 6195), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((6507, 6523), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (6521, 6523), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((6833, 6849), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (6847, 6849), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((6999, 7015), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (7013, 7015), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((7330, 7346), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (7344, 7346), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((7648, 7664), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (7662, 7664), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((8116, 8132), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (8130, 8132), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((8357, 8373), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (8371, 8373), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((8698, 8714), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (8712, 8714), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((8966, 8988), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (8986, 8988), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((9093, 9124), 'utils.tabular.TextTableRenderer', 'TextTableRenderer', ([], {'maxwidth': '(200)'}), '(maxwidth=200)\n', (9110, 9124), False, 'from utils.tabular import TextTableRenderer\n'), ((9262, 9278), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (9276, 9278), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((9398, 9430), 'graph.matches.matches.get_fusion', 'get_fusion', (['"""fuse_external_bias"""'], {}), "('fuse_external_bias')\n", (9408, 9430), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((9560, 9576), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (9574, 9576), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((9723, 9747), 'graph.matches.matches.get_scale8_match_group', 'get_scale8_match_group', ([], {}), '()\n', (9745, 9747), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((9866, 9882), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (9880, 9882), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((10099, 10123), 'graph.matches.matches.get_scale8_match_group', 'get_scale8_match_group', ([], {}), '()\n', (10121, 10123), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((10201, 10217), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (10215, 10217), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((10555, 10571), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (10569, 10571), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((10670, 10692), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (10690, 10692), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((10756, 10816), 'numpy.load', 'np.load', (['"""tests/mobv1_valid/COCO_val2014_000000362331_0.npy"""'], {}), "('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')\n", (10763, 10816), True, 'import numpy as np\n'), ((10887, 10925), 'execution.graph_executer.GraphExecuter', 'GraphExecuter', (['G'], {'qrecs': 'G.quantization'}), '(G, qrecs=G.quantization)\n', (10900, 10925), False, 'from execution.graph_executer import GraphExecuter\n'), ((11001, 11074), 'numpy.load', 'np.load', (['"""tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy"""'], {}), "('tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy')\n", (11008, 11074), True, 'import numpy as np\n'), ((11191, 11207), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (11205, 11207), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((11366, 11382), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (11380, 11382), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((11505, 11527), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (11525, 11527), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((11585, 11601), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (11599, 11601), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((11748, 11770), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (11768, 11770), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((12003, 12026), 'utils.node_id.NodeId', 'NodeId', (['qpnode', 'qpcnode'], {}), '(qpnode, qpcnode)\n', (12009, 12026), False, 'from utils.node_id import NodeId\n'), ((12197, 12257), 'numpy.load', 'np.load', (['"""tests/mobv1_valid/COCO_val2014_000000362331_0.npy"""'], {}), "('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')\n", (12204, 12257), True, 'import numpy as np\n'), ((12350, 12367), 'execution.graph_executer.GraphExecuter', 'GraphExecuter', (['Gf'], {}), '(Gf)\n', (12363, 12367), False, 'from execution.graph_executer import GraphExecuter\n'), ((12444, 12517), 'numpy.load', 'np.load', (['"""tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy"""'], {}), "('tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy')\n", (12451, 12517), True, 'import numpy as np\n'), ((12613, 12651), 'execution.graph_executer.GraphExecuter', 'GraphExecuter', (['G'], {'qrecs': 'G.quantization'}), '(G, qrecs=G.quantization)\n', (12626, 12651), False, 'from execution.graph_executer import GraphExecuter\n'), ((12834, 12872), 'execution.graph_executer.GraphExecuter', 'GraphExecuter', (['G'], {'qrecs': 'G.quantization'}), '(G, qrecs=G.quantization)\n', (12847, 12872), False, 'from execution.graph_executer import GraphExecuter\n'), ((12991, 13064), 'numpy.load', 'np.load', (['"""tests/mobv1_valid/output_COCO_val2014_000000362331_0_quant.npy"""'], {}), "('tests/mobv1_valid/output_COCO_val2014_000000362331_0_quant.npy')\n", (12998, 13064), True, 'import numpy as np\n'), ((13279, 13295), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (13293, 13295), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((13442, 13464), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (13462, 13464), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((13652, 13668), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (13666, 13668), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((13815, 13837), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (13835, 13837), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((14034, 14056), 'graph.matches.matches.get_pow2_match_group', 'get_pow2_match_group', ([], {}), '()\n', (14054, 14056), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((14125, 14185), 'numpy.load', 'np.load', (['"""tests/mobv1_valid/COCO_val2014_000000362331_0.npy"""'], {}), "('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')\n", (14132, 14185), True, 'import numpy as np\n'), ((14278, 14316), 'execution.graph_executer.GraphExecuter', 'GraphExecuter', (['G'], {'qrecs': 'G.quantization'}), '(G, qrecs=G.quantization)\n', (14291, 14316), False, 'from execution.graph_executer import GraphExecuter\n'), ((14420, 14438), 'execution.graph_executer.GraphExecuter', 'GraphExecuter', (['Gdq'], {}), '(Gdq)\n', (14433, 14438), False, 'from execution.graph_executer import GraphExecuter\n'), ((14801, 14817), 'importer.tflite.new_tflite_graph_all.TfliteImporter', 'TfliteImporter', ([], {}), '()\n', (14815, 14817), False, 'from importer.tflite.new_tflite_graph_all import TfliteImporter\n'), ((14959, 14983), 'graph.matches.matches.get_scale8_match_group', 'get_scale8_match_group', ([], {}), '()\n', (14981, 14983), False, 'from graph.matches.matches import get_fusion, get_pow2_match_group, get_scale8_match_group\n'), ((14513, 14534), 'numpy.abs', 'np.abs', (['(df[0] - qf[0])'], {}), '(df[0] - qf[0])\n', (14519, 14534), True, 'import numpy as np\n'), ((14606, 14618), 'numpy.max', 'np.max', (['elem'], {}), '(elem)\n', (14612, 14618), True, 'import numpy as np\n'), ((2231, 2243), 'utils.node_id.NodeId', 'NodeId', (['node'], {}), '(node)\n', (2237, 2243), False, 'from utils.node_id import NodeId\n'), ((2505, 2517), 'utils.node_id.NodeId', 'NodeId', (['node'], {}), '(node)\n', (2511, 2517), False, 'from utils.node_id import NodeId\n'), ((9046, 9061), 'reports.graph_reporter.GraphReporter', 'GraphReporter', ([], {}), '()\n', (9059, 9061), False, 'from reports.graph_reporter import GraphReporter\n'), ((9173, 9203), 'reports.graph_reporter.GraphReporter', 'GraphReporter', ([], {'split_dims': '(True)'}), '(split_dims=True)\n', (9186, 9203), False, 'from reports.graph_reporter import GraphReporter\n'), ((11093, 11142), 'numpy.abs', 'np.abs', (['(routput_tensors[-1][0] - output_tensor[0])'], {}), '(routput_tensors[-1][0] - output_tensor[0])\n', (11099, 11142), True, 'import numpy as np\n'), ((12137, 12170), 'numpy.abs', 'np.abs', (['(fpcnode.biases - dqbiases)'], {}), '(fpcnode.biases - dqbiases)\n', (12143, 12170), True, 'import numpy as np\n'), ((12536, 12586), 'numpy.abs', 'np.abs', (['(foutput_tensors[-1][0] - foutput_tensor[0])'], {}), '(foutput_tensors[-1][0] - foutput_tensor[0])\n', (12542, 12586), True, 'import numpy as np\n'), ((12715, 12738), 'execution.quantization_mode.QuantizationMode.none', 'QuantizationMode.none', ([], {}), '()\n', (12736, 12738), False, 'from execution.quantization_mode import QuantizationMode\n'), ((12758, 12810), 'numpy.abs', 'np.abs', (['(qfroutput_tensors[-1][0] - foutput_tensor[0])'], {}), '(qfroutput_tensors[-1][0] - foutput_tensor[0])\n', (12764, 12810), True, 'import numpy as np\n'), ((12935, 12968), 'execution.quantization_mode.QuantizationMode.all_dequantize', 'QuantizationMode.all_dequantize', ([], {}), '()\n', (12966, 12968), False, 'from execution.quantization_mode import QuantizationMode\n'), ((13162, 13212), 'numpy.abs', 'np.abs', (['(qroutput_tensors[-1][0] - output_tensor[0])'], {}), '(qroutput_tensors[-1][0] - output_tensor[0])\n', (13168, 13212), True, 'import numpy as np\n'), ((14379, 14402), 'execution.quantization_mode.QuantizationMode.none', 'QuantizationMode.none', ([], {}), '()\n', (14400, 14402), False, 'from execution.quantization_mode import QuantizationMode\n')] |
import pandas as pd
import numpy as np
import scipy as sp
from scipy.special import expit as sigmoid_function
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('ggplot')
def load_data(location):
""" Given a directory string, returns a pandas dataframe containing hw data."""
# dictionary containing various matrices and some metadata
data = sp.io.loadmat(location)
x = pd.DataFrame(data['X'])
y = pd.DataFrame(data['y'], columns=['digit_class'])
y[y == 10] = 0 # convert from matlab's 1-index to python's 0-index
return x, y
def visualize_digit_images_data(data, gridSize=(10, 10), desiredDigitIndices=None, title=None):
""" Provides a plot of image data so we can see what we are playing with.
The kwarg allows for the option of hand selecting digit images we desired to see. """
# thanks to pdf we know data is (5000,400). for plotting images, we want to
# take it to (5000,20,20)
pixelSquares = pd.Panel(data.values.reshape(5000, 20, 20)).transpose(0, 2, 1)
# we have to manually build the image by stitching together individual digits
# first, we choose the digits we want
if desiredDigitIndices is None:
desiredDigitIndices = []
desiredDigits = pixelSquares.ix[desiredDigitIndices, :, :] # for default kwarg, this is empty
randomDigits = pixelSquares.sample(gridSize[0] * gridSize[1] - len(desiredDigitIndices), axis=0) # get remaining images
allDigits = pd.concat([desiredDigits, randomDigits], axis=0)
# now we must fill in the matrix that represents the picture
pixelRows = 20 * gridSize[0]
pixelCols = 20 * gridSize[1]
digitImage = np.zeros((pixelRows, pixelCols))
digitToPlot = -1
for i in range(0, pixelRows, 20):
for j in range(0, pixelCols, 20):
digitToPlot += 1
digitImage[i:i+20, j:j+20] = allDigits.iloc[digitToPlot]
# lastly we convert to Pillow image (accepted by mpl) and plot
digitImage = sp.misc.toimage(digitImage)
plt.figure()
plt.imshow(digitImage, cmap=mpl.cm.Greys)
if title is None:
title = ''
plt.title(title)
return
# shamelessly stolen from my own hw2, where i had previously written this
def compute_cost(theta, features, response, regularizationParameter=0):
""" Returns the logistic regression func, evaluated on the data set and passed theta. This
also provides the opportunity for regularization. """
# set up regularization so that we always ignore the intercept parameter
interceptKnockOut = np.ones(len(features.columns))
interceptKnockOut[0] = 0
regularization = np.dot(interceptKnockOut, theta**2) # this is SUM (i=1, numFeatures) theta_i^2
regularization = regularization * regularizationParameter / (2 * len(features))
features = np.dot(theta, features.T) # dont forget H(x; theta) = sigmoid(innerprod(theta, features))
# build up the cost function one step at a time
cost = sigmoid_function(features)
cost = response * np.log(cost) + (1 - response) * np.log(1 - cost)
cost = -cost.sum(axis=0) / len(features)
return cost + regularization
def compute_dCost_dTheta(theta, features, response, regularizationParameter=0):
""" Returns the gradient of the cost function with respect to theta, evaluated on the data """
# set up regularization so that we always ignore the intercept parameter
interceptKnockOut = np.ones(len(features.columns))
interceptKnockOut[0] = 0
regularization = interceptKnockOut * theta # no summation this time, so just elementwise mult
regularization = regularization * regularizationParameter / len(features)
dottedFeats = np.dot(theta, features.T)
gradient = sigmoid_function(dottedFeats) - response
gradient = gradient[:,np.newaxis] * features
gradient = gradient.sum(axis=0)
return gradient / len(features) + regularization
def train_one_vs_all(features, response, classes, regularizationParameter, numIters=500):
""" Trains classifiers for the provided number of classes, returning optimal model parameters
in a len(classes) x numFeatures parameter matrix. """
# some preprocessing
features.insert(0, 'intercept', 1)
optimalTheta = np.zeros((len(classes), len(features.columns)))
# as specified by the hw, train separate models for the classes
for model in range(len(classes)):
print('Training model {0}'.format(classes[model]))
classResponse = pd.get_dummies(response, columns=['digit_class'])['digit_class_' + str(classes[model])] # what a great func
res = sp.optimize.minimize(compute_cost,
np.zeros(len(features.columns)), # initial theta (1 x 401)
args=(features, classResponse, regularizationParameter),
jac=compute_dCost_dTheta,
options={'maxiter': numIters},
method='CG')
optimalTheta[model,:] = res.x
return optimalTheta
def predict_one_vs_all(features, optimalTheta, numClasses):
""" Returns a len(features)-vector of predicted class labels, given an optimalTheta calculated
from a training routine.
The numClasses parameter is required as a sanity check. It is possible that the user expects more or fewer
classes than were passed into the training routine, in which case this model would be ill-defined. """
assert optimalTheta.shape[0] == numClasses, 'The passed number of classes is not the same as was used in training.'
return np.argmax(np.dot(features, optimalTheta.T), axis=1)
def logistic_regression_main(dataLoc):
""" Part 1 of the homework """
digitFeatures, digitResponse = load_data(dataLoc)
optimalTheta = train_one_vs_all(digitFeatures, digitResponse, np.arange(0, 10, 1), 0.1)
predictions = predict_one_vs_all(digitFeatures, optimalTheta, 10)
digitResponse['logistic_regression_predictions'] = predictions
accuracy = digitResponse['digit_class'] - predictions
accuracy = accuracy.value_counts() / len(digitResponse)
print('Logistic regression accuracy: {0}'.format(accuracy))
digitFeatures.drop('intercept', axis=1, inplace=True) # remove the added column
for i in np.random.randint(0, 5001, 10):
titleStr = 'Predicted: {0}, Actual: {1}'.format(digitResponse.ix[i, 'logistic_regression_predictions'],
digitResponse.ix[i, 'digit_class'])
visualize_digit_images_data(digitFeatures, gridSize=(1,1),
desiredDigitIndices=[i],
title=titleStr)
return
def load_neural_network_weights(location):
""" DocString"""
data = sp.io.loadmat(location)
thetaOne = pd.DataFrame(data['Theta1'])
thetaTwo = pd.DataFrame(data['Theta2'])
return thetaOne, thetaTwo
def feed_forward_propagate_and_predict(features, layerParameters):
""" DocString"""
features = features.values # change pandas dataframe to np array
for layer in layerParameters:
features = np.insert(features, 0, 1, axis=1)
z = np.dot(features, layer.T)
features = sigmoid_function(z)
return np.argmax(features, axis=1)
def neural_networks_main(dataLoc, weightLoc):
""" Part 2 of the homework """
layerParams = load_neural_network_weights(weightLoc)
digitFeatures, digitResponse = load_data(dataLoc)
predictions = feed_forward_propagate_and_predict(digitFeatures, layerParams)
predictions = np.arange(1, 11, 1)[predictions]
predictions[predictions == 10] = 0
digitResponse['network_predictions'] = predictions # must convert back to the 0-indexing
accuracy = digitResponse['digit_class'] - digitResponse['network_predictions']
accuracy = accuracy.value_counts().ix[0] / len(digitResponse)
print('Neural network accuracy: {0}'.format(accuracy))
for i in np.random.randint(0, 5001, 10):
titleStr = 'Predicted: {0}, Actual: {1}'.format(digitResponse.ix[i, 'network_predictions'], digitResponse.ix[i, 'digit_class'])
visualize_digit_images_data(digitFeatures, gridSize=(1,1),
desiredDigitIndices=[i],
title=titleStr)
return
if __name__ == '__main__':
dataLocation = r"C:\Users\ashamlian\Downloads\machine-learning-ex3\ex3\ex3data1.mat"
weightLocation = r"C:\Users\ashamlian\Downloads\machine-learning-ex3\ex3\ex3weights.mat"
logistic_regression_main(dataLocation)
neural_networks_main(dataLocation, weightLocation) | [
"matplotlib.pyplot.imshow",
"numpy.insert",
"scipy.io.loadmat",
"numpy.log",
"numpy.argmax",
"pandas.get_dummies",
"scipy.misc.toimage",
"scipy.special.expit",
"numpy.zeros",
"matplotlib.style.use",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.random.randint",
"pandas.DataFrame",
"mat... | [((167, 190), 'matplotlib.style.use', 'mpl.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (180, 190), True, 'import matplotlib as mpl\n'), ((381, 404), 'scipy.io.loadmat', 'sp.io.loadmat', (['location'], {}), '(location)\n', (394, 404), True, 'import scipy as sp\n'), ((413, 436), 'pandas.DataFrame', 'pd.DataFrame', (["data['X']"], {}), "(data['X'])\n", (425, 436), True, 'import pandas as pd\n'), ((445, 493), 'pandas.DataFrame', 'pd.DataFrame', (["data['y']"], {'columns': "['digit_class']"}), "(data['y'], columns=['digit_class'])\n", (457, 493), True, 'import pandas as pd\n'), ((1481, 1529), 'pandas.concat', 'pd.concat', (['[desiredDigits, randomDigits]'], {'axis': '(0)'}), '([desiredDigits, randomDigits], axis=0)\n', (1490, 1529), True, 'import pandas as pd\n'), ((1679, 1711), 'numpy.zeros', 'np.zeros', (['(pixelRows, pixelCols)'], {}), '((pixelRows, pixelCols))\n', (1687, 1711), True, 'import numpy as np\n'), ((1999, 2026), 'scipy.misc.toimage', 'sp.misc.toimage', (['digitImage'], {}), '(digitImage)\n', (2014, 2026), True, 'import scipy as sp\n'), ((2031, 2043), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2041, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2089), 'matplotlib.pyplot.imshow', 'plt.imshow', (['digitImage'], {'cmap': 'mpl.cm.Greys'}), '(digitImage, cmap=mpl.cm.Greys)\n', (2058, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2136, 2152), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2145, 2152), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2693), 'numpy.dot', 'np.dot', (['interceptKnockOut', '(theta ** 2)'], {}), '(interceptKnockOut, theta ** 2)\n', (2662, 2693), True, 'import numpy as np\n'), ((2835, 2860), 'numpy.dot', 'np.dot', (['theta', 'features.T'], {}), '(theta, features.T)\n', (2841, 2860), True, 'import numpy as np\n'), ((2989, 3015), 'scipy.special.expit', 'sigmoid_function', (['features'], {}), '(features)\n', (3005, 3015), True, 'from scipy.special import expit as sigmoid_function\n'), ((3708, 3733), 'numpy.dot', 'np.dot', (['theta', 'features.T'], {}), '(theta, features.T)\n', (3714, 3733), True, 'import numpy as np\n'), ((6340, 6370), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5001)', '(10)'], {}), '(0, 5001, 10)\n', (6357, 6370), True, 'import numpy as np\n'), ((6856, 6879), 'scipy.io.loadmat', 'sp.io.loadmat', (['location'], {}), '(location)\n', (6869, 6879), True, 'import scipy as sp\n'), ((6895, 6923), 'pandas.DataFrame', 'pd.DataFrame', (["data['Theta1']"], {}), "(data['Theta1'])\n", (6907, 6923), True, 'import pandas as pd\n'), ((6939, 6967), 'pandas.DataFrame', 'pd.DataFrame', (["data['Theta2']"], {}), "(data['Theta2'])\n", (6951, 6967), True, 'import pandas as pd\n'), ((7340, 7367), 'numpy.argmax', 'np.argmax', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (7349, 7367), True, 'import numpy as np\n'), ((8057, 8087), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5001)', '(10)'], {}), '(0, 5001, 10)\n', (8074, 8087), True, 'import numpy as np\n'), ((3750, 3779), 'scipy.special.expit', 'sigmoid_function', (['dottedFeats'], {}), '(dottedFeats)\n', (3766, 3779), True, 'from scipy.special import expit as sigmoid_function\n'), ((5646, 5678), 'numpy.dot', 'np.dot', (['features', 'optimalTheta.T'], {}), '(features, optimalTheta.T)\n', (5652, 5678), True, 'import numpy as np\n'), ((5889, 5908), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(1)'], {}), '(0, 10, 1)\n', (5898, 5908), True, 'import numpy as np\n'), ((7217, 7250), 'numpy.insert', 'np.insert', (['features', '(0)', '(1)'], {'axis': '(1)'}), '(features, 0, 1, axis=1)\n', (7226, 7250), True, 'import numpy as np\n'), ((7263, 7288), 'numpy.dot', 'np.dot', (['features', 'layer.T'], {}), '(features, layer.T)\n', (7269, 7288), True, 'import numpy as np\n'), ((7308, 7327), 'scipy.special.expit', 'sigmoid_function', (['z'], {}), '(z)\n', (7324, 7327), True, 'from scipy.special import expit as sigmoid_function\n'), ((7668, 7687), 'numpy.arange', 'np.arange', (['(1)', '(11)', '(1)'], {}), '(1, 11, 1)\n', (7677, 7687), True, 'import numpy as np\n'), ((3038, 3050), 'numpy.log', 'np.log', (['cost'], {}), '(cost)\n', (3044, 3050), True, 'import numpy as np\n'), ((3070, 3086), 'numpy.log', 'np.log', (['(1 - cost)'], {}), '(1 - cost)\n', (3076, 3086), True, 'import numpy as np\n'), ((4508, 4557), 'pandas.get_dummies', 'pd.get_dummies', (['response'], {'columns': "['digit_class']"}), "(response, columns=['digit_class'])\n", (4522, 4557), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
####################
def merge_dicts(list_of_dicts):
results = {}
for d in list_of_dicts:
for key in d.keys():
if key in results.keys():
results[key].append(d[key])
else:
results[key] = [d[key]]
return results
####################
comp_pJ = 22. * 1e-12 / 32. / 16.
num_layers = 7
num_comparator = 8
results = np.load('results.npy', allow_pickle=True).item()
results_tf = np.load('results_tf.npy', allow_pickle=True).item()
x = np.array([0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, 0.13, 0.14, 0.15])
y_mean = np.zeros(shape=(2, 2, len(x), num_layers))
y_std = np.zeros(shape=(2, 2, len(x), num_layers))
y_mac_per_cycle = np.zeros(shape=(2, 2, len(x), num_layers))
y_mac_per_pJ = np.zeros(shape=(2, 2, len(x), num_layers))
y_mac = np.zeros(shape=(2, 2, len(x), num_layers))
y_cycle = np.zeros(shape=(2, 2, len(x), num_layers))
y_ron = np.zeros(shape=(2, 2, len(x), num_layers))
y_roff = np.zeros(shape=(2, 2, len(x), num_layers))
y_adc = np.zeros(shape=(2, 2, len(x), num_layers, num_comparator))
y_energy = np.zeros(shape=(2, 2, len(x), num_layers))
acc = results_tf['acc_tf']
for key in sorted(results.keys()):
(skip, cards, sigma) = key
layer_results = results[key]
for layer in range(num_layers):
example_results = merge_dicts(layer_results[layer])
sigma_index = np.where(x == sigma)[0][0]
y_mean[skip][cards][sigma_index][layer] = np.mean(example_results['mean'])
y_std[skip][cards][sigma_index][layer] = np.mean(example_results['std'])
y_mac_per_cycle[skip][cards][sigma_index][layer] = np.sum(example_results['nmac']) / np.sum(example_results['cycle'])
y_mac[skip][cards][sigma_index][layer] = np.mean(example_results['nmac'])
y_cycle[skip][cards][sigma_index][layer] = np.mean(example_results['cycle'])
y_ron[skip][cards][sigma_index][layer] = np.sum(example_results['ron'])
y_roff[skip][cards][sigma_index][layer] = np.sum(example_results['roff'])
y_adc[skip][cards][sigma_index][layer] = np.sum(example_results['adc'], axis=0)
y_energy[skip][cards][sigma_index][layer] += y_ron[skip][cards][sigma_index][layer] * 2e-16
y_energy[skip][cards][sigma_index][layer] += y_roff[skip][cards][sigma_index][layer] * 2e-16
y_energy[skip][cards][sigma_index][layer] += np.sum(y_adc[skip][cards][sigma_index][layer] * np.array([1,2,3,4,5,6,7,8]) * comp_pJ)
# print (skip, cards, y_adc[skip][cards][sigma_index][layer] * np.array([1,2,3,4,5,6,7,8]))
y_mac_per_pJ[skip][cards][sigma_index][layer] = np.sum(example_results['nmac']) / 1e12 / np.sum(y_energy[skip][cards][sigma_index][layer])
####################
#print (np.around(y_mac_per_cycle[0, 0], 1))
#print (np.around(y_mac_per_cycle[1, 0], 1))
#print (np.around(y_mac_per_cycle[1, 1], 1))
# print (np.around(y_mean[1, 1], 3))
#print (np.around(y_std[0, 0], 3))
#print (np.around(y_std[1, 0], 3))
#print (np.around(y_std[1, 1], 3))
print (np.around(y_mac_per_pJ[0, 0], 3))
print (np.around(y_mac_per_pJ[1, 0], 3))
print (np.around(y_mac_per_pJ[1, 1], 3))
####################
plot_layer = 4
####################
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(x, y_mac_per_cycle[0, 0, :, plot_layer], color='red', label='baseline')
axs[0, 0].plot(x, y_mac_per_cycle[1, 0, :, plot_layer], color='blue', label='skip')
axs[0, 0].plot(x, y_mac_per_cycle[1, 1, :, plot_layer], color='green', label='cards')
axs[0, 0].set_ylim(bottom=0)
axs[0, 0].set_ylabel("MAC / Cycle")
axs[0, 1].plot(x, y_mac_per_pJ[0, 0, :, plot_layer], color='red', label='baseline')
axs[0, 1].plot(x, y_mac_per_pJ[1, 0, :, plot_layer], color='blue', label='skip')
axs[0, 1].plot(x, y_mac_per_pJ[1, 1, :, plot_layer], color='green', label='cards')
axs[0, 1].set_ylim(bottom=0)
axs[0, 1].set_ylabel("TMAC / W")
axs[1, 0].plot(x, y_std[0, 0, :, plot_layer], color='red', label='baseline')
axs[1, 0].plot(x, y_std[1, 0, :, plot_layer], color='blue', label='skip')
axs[1, 0].plot(x, y_std[1, 1, :, plot_layer], color='green', label='cards')
axs[1, 0].set_ylim(bottom=0)
axs[1, 0].set_ylabel("MatMul Error STD")
axs[1, 1].plot(x, acc[0, 0, :], color='red', label='baseline')
axs[1, 1].plot(x, acc[1, 0, :], color='blue', label='skip')
axs[1, 1].plot(x, acc[1, 1, :], color='green', label='cards')
axs[1, 1].set_ylabel("Classification Accuracy")
####################
fig = plt.gcf()
fig.set_size_inches(8, 4.5)
fig.savefig('cards.png', dpi=300)
# plt.show()
####################
| [
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.gcf",
"numpy.array",
"numpy.sum",
"numpy.around",
"numpy.load",
"matplotlib.pyplot.subplots"
] | [((562, 637), 'numpy.array', 'np.array', (['[0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15]'], {}), '([0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15])\n', (570, 637), True, 'import numpy as np\n'), ((3293, 3311), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3305, 3311), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4516), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4514, 4516), True, 'import matplotlib.pyplot as plt\n'), ((3102, 3134), 'numpy.around', 'np.around', (['y_mac_per_pJ[0, 0]', '(3)'], {}), '(y_mac_per_pJ[0, 0], 3)\n', (3111, 3134), True, 'import numpy as np\n'), ((3144, 3176), 'numpy.around', 'np.around', (['y_mac_per_pJ[1, 0]', '(3)'], {}), '(y_mac_per_pJ[1, 0], 3)\n', (3153, 3176), True, 'import numpy as np\n'), ((3186, 3218), 'numpy.around', 'np.around', (['y_mac_per_pJ[1, 1]', '(3)'], {}), '(y_mac_per_pJ[1, 1], 3)\n', (3195, 3218), True, 'import numpy as np\n'), ((443, 484), 'numpy.load', 'np.load', (['"""results.npy"""'], {'allow_pickle': '(True)'}), "('results.npy', allow_pickle=True)\n", (450, 484), True, 'import numpy as np\n'), ((505, 549), 'numpy.load', 'np.load', (['"""results_tf.npy"""'], {'allow_pickle': '(True)'}), "('results_tf.npy', allow_pickle=True)\n", (512, 549), True, 'import numpy as np\n'), ((1527, 1559), 'numpy.mean', 'np.mean', (["example_results['mean']"], {}), "(example_results['mean'])\n", (1534, 1559), True, 'import numpy as np\n'), ((1609, 1640), 'numpy.mean', 'np.mean', (["example_results['std']"], {}), "(example_results['std'])\n", (1616, 1640), True, 'import numpy as np\n'), ((1820, 1852), 'numpy.mean', 'np.mean', (["example_results['nmac']"], {}), "(example_results['nmac'])\n", (1827, 1852), True, 'import numpy as np\n'), ((1905, 1938), 'numpy.mean', 'np.mean', (["example_results['cycle']"], {}), "(example_results['cycle'])\n", (1912, 1938), True, 'import numpy as np\n'), ((1989, 2019), 'numpy.sum', 'np.sum', (["example_results['ron']"], {}), "(example_results['ron'])\n", (1995, 2019), True, 'import numpy as np\n'), ((2070, 2101), 'numpy.sum', 'np.sum', (["example_results['roff']"], {}), "(example_results['roff'])\n", (2076, 2101), True, 'import numpy as np\n'), ((2151, 2189), 'numpy.sum', 'np.sum', (["example_results['adc']"], {'axis': '(0)'}), "(example_results['adc'], axis=0)\n", (2157, 2189), True, 'import numpy as np\n'), ((1702, 1733), 'numpy.sum', 'np.sum', (["example_results['nmac']"], {}), "(example_results['nmac'])\n", (1708, 1733), True, 'import numpy as np\n'), ((1736, 1768), 'numpy.sum', 'np.sum', (["example_results['cycle']"], {}), "(example_results['cycle'])\n", (1742, 1768), True, 'import numpy as np\n'), ((2738, 2787), 'numpy.sum', 'np.sum', (['y_energy[skip][cards][sigma_index][layer]'], {}), '(y_energy[skip][cards][sigma_index][layer])\n', (2744, 2787), True, 'import numpy as np\n'), ((1441, 1461), 'numpy.where', 'np.where', (['(x == sigma)'], {}), '(x == sigma)\n', (1449, 1461), True, 'import numpy as np\n'), ((2697, 2728), 'numpy.sum', 'np.sum', (["example_results['nmac']"], {}), "(example_results['nmac'])\n", (2703, 2728), True, 'import numpy as np\n'), ((2501, 2535), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8])\n', (2509, 2535), True, 'import numpy as np\n')] |
import collections
import logging
import pickle
from typing import Any, Dict, Hashable, Iterable, Iterator, Mapping, Optional, Sequence, Union
import warnings
import numpy as np
from smqtk_dataprovider import from_uri
from smqtk_descriptors import DescriptorElement
from smqtk_classifier.interfaces.classify_descriptor_supervised import ClassifyDescriptorSupervised
LOG = logging.getLogger(__name__)
try:
# noinspection PyPackageRequirements
import scipy.stats # type: ignore
except ImportError:
warnings.warn(
"scipy.stats not importable: SkLearnSvmClassifier will not be usable."
)
scipy = None
try:
from sklearn import svm
except ImportError:
warnings.warn(
"svm not importable: SkLearnSvmClassifier will not be usable."
)
svm = None
class SkLearnSvmClassifier (ClassifyDescriptorSupervised):
"""
Classifier that wraps the SkLearn SVM (Support Vector Machine)
SVC (C-Support Vector Classification) module.
Model file paths are optional. If they are given and the file(s) exist,
we will load them. If they do not, we treat the path(s) as the output
path(s) for saving a model after calling ``train``. If this is None
(default), no model is loaded nor output via training, thus any model
trained will only exist in memory during the lifetime of this instance.
:param svm_model_uri: Path to the model file.
:param C: Regularization parameter passed to SkLearn SVM SVC model.
:param kernel: Kernel type passed to SkLearn SVM SVC model.
:param probability: Whether to enable probability estimates or not.
:param calculate_class_weights: Whether to manually calculate the
class weights to be passed to the SVM model or not.
Defaults to true. If false, all classes will be given equal weight.
:param normalize: Normalize input vectors to training and
classification methods using ``numpy.linalg.norm``. This may either
be ``None``, disabling normalization, or any valid value that
could be passed to the ``ord`` parameter in ``numpy.linalg.norm``
for 1D arrays. This is ``None`` by default (no normalization).
"""
# noinspection PyDefaultArgument
def __init__(
self,
svm_model_uri: Optional[str] = None,
C: float = 2.0, # Regularization parameter
kernel: str = 'linear', # Kernel type
probability: bool = True, # Enable probabilty estimates
calculate_class_weights: bool = True, # Enable calculation of class weights
normalize: Optional[Union[int, float, str]] = None,
):
super(SkLearnSvmClassifier, self).__init__()
self.svm_model_uri = svm_model_uri
# Elements will be None if input URI is None
#: :type: None | smqtk.representation.DataElement
self.svm_model_elem = \
svm_model_uri and from_uri(svm_model_uri)
self.C = C
self.kernel = kernel
self.probability = probability
self.calculate_class_weights = calculate_class_weights
self.normalize = normalize
# Validate normalization parameter by trying it on a random vector
if normalize is not None:
self._norm_vector(np.random.rand(8))
# generated parameters
self.svm_model: Optional[svm.SVC] = None
self._reload_model()
@classmethod
def is_usable(cls) -> bool:
return None not in {scipy, svm}
def get_config(self) -> Dict[str, Any]:
return {
"svm_model_uri": self.svm_model_uri,
"C": self.C,
"kernel": self.kernel,
"probability": self.probability,
"calculate_class_weights": self.calculate_class_weights,
"normalize": self.normalize,
}
def _reload_model(self) -> None:
"""
Reload SVM model from configured file path.
"""
if self.svm_model_elem and not self.svm_model_elem.is_empty():
svm_model_tmp_fp = self.svm_model_elem.write_temp()
with open(svm_model_tmp_fp, 'rb') as f:
self.svm_model = pickle.load(f)
self.svm_model_elem.clean_temp()
def _norm_vector(self, v: np.ndarray) -> np.ndarray:
"""
Class standard array normalization. Normalized along max dimension (a=0
for a 1D array, a=1 for a 2D array, etc.).
:param v: Vector to normalize
:return: Returns the normalized version of input array ``v``.
"""
if self.normalize is not None:
n = np.linalg.norm(v, self.normalize, v.ndim - 1,
keepdims=True)
# replace 0's with 1's, preventing div-by-zero
n[n == 0.] = 1.
return v / n
# Normalization off
return v
def has_model(self) -> bool:
"""
:return: If this instance currently has a model loaded. If no model is
present, classification of descriptors cannot happen.
:rtype: bool
"""
return self.svm_model is not None
def _train(
self,
class_examples: Mapping[Hashable, Iterable[DescriptorElement]]
) -> None:
train_labels = []
train_vectors = []
train_group_sizes: Dict = {} # number of examples per class
# Making SVM label assignment deterministic to lexicographical order
# of the type repr.
# -- Can't specifically guarantee that dict key types will all support
# less-than operator, however we can always get some kind of repr
# which is a string which does support less-than. In the common case
# keys will be strings and ints, but this "should" handle more
# exotic cases, at least for the purpose of ordering keys reasonably
# deterministically.
for i, l in enumerate(sorted(class_examples, key=lambda e: str(e))):
# requires a sequence, so making the iterable ``g`` a tuple
g = class_examples[l]
if not isinstance(g, collections.abc.Sequence):
LOG.debug(' (expanding iterable into sequence)')
g = tuple(g)
train_group_sizes[l] = float(len(g))
x = np.array(DescriptorElement.get_many_vectors(g))
x = self._norm_vector(x)
train_labels.extend([l] * x.shape[0])
train_vectors.extend(x)
del g, x
assert len(train_labels) == len(train_vectors), \
"Count mismatch between parallel labels and descriptor vectors" \
"(%d != %d)" \
% (len(train_labels), len(train_vectors))
# Calculate class weights
weights = None
if self.calculate_class_weights:
weights = {}
# (john.moeller): The weighting should probably be the geometric
# mean of the number of examples over the classes divided by the
# number of examples for the current class.
gmean = scipy.stats.gmean(list(train_group_sizes.values()))
for i, g in enumerate(train_group_sizes):
w = gmean / train_group_sizes[g]
weights[g] = w
self.svm_model = svm.SVC(C=self.C,
kernel=self.kernel,
probability=self.probability,
class_weight=weights)
LOG.debug("Training SVM model")
self.svm_model.fit(train_vectors, train_labels)
if self.svm_model_elem and self.svm_model_elem.writable():
LOG.debug("Saving model to element (%s)", self.svm_model_elem)
self.svm_model_elem.set_bytes(pickle.dumps(self.svm_model))
def get_labels(self) -> Sequence[Hashable]:
if self.svm_model is not None:
return list(self.svm_model.classes_)
else:
raise RuntimeError("No model loaded")
def _classify_arrays(self, array_iter: Union[np.ndarray, Iterable[np.ndarray]]) -> Iterator[Dict[Hashable, float]]:
if self.svm_model is None:
raise RuntimeError("No SVM model present for classification")
# Dump descriptors into a matrix for normalization and use in
# prediction.
vec_mat = np.array(list(array_iter))
vec_mat = self._norm_vector(vec_mat)
svm_model_labels = self.get_labels()
if self.svm_model.probability:
proba_mat = self.svm_model.predict_proba(vec_mat)
for proba in proba_mat:
yield dict(zip(svm_model_labels, proba))
else:
c_base = {label: 0.0 for label in svm_model_labels}
proba_mat = self.svm_model.predict(vec_mat)
for p in proba_mat:
c = dict(c_base)
c[p] = 1.0
yield c
| [
"logging.getLogger",
"smqtk_descriptors.DescriptorElement.get_many_vectors",
"smqtk_dataprovider.from_uri",
"numpy.random.rand",
"pickle.dumps",
"pickle.load",
"numpy.linalg.norm",
"warnings.warn",
"sklearn.svm.SVC"
] | [((376, 403), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (393, 403), False, 'import logging\n'), ((514, 604), 'warnings.warn', 'warnings.warn', (['"""scipy.stats not importable: SkLearnSvmClassifier will not be usable."""'], {}), "(\n 'scipy.stats not importable: SkLearnSvmClassifier will not be usable.')\n", (527, 604), False, 'import warnings\n'), ((689, 766), 'warnings.warn', 'warnings.warn', (['"""svm not importable: SkLearnSvmClassifier will not be usable."""'], {}), "('svm not importable: SkLearnSvmClassifier will not be usable.')\n", (702, 766), False, 'import warnings\n'), ((7227, 7320), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'self.C', 'kernel': 'self.kernel', 'probability': 'self.probability', 'class_weight': 'weights'}), '(C=self.C, kernel=self.kernel, probability=self.probability,\n class_weight=weights)\n', (7234, 7320), False, 'from sklearn import svm\n'), ((2884, 2907), 'smqtk_dataprovider.from_uri', 'from_uri', (['svm_model_uri'], {}), '(svm_model_uri)\n', (2892, 2907), False, 'from smqtk_dataprovider import from_uri\n'), ((4562, 4622), 'numpy.linalg.norm', 'np.linalg.norm', (['v', 'self.normalize', '(v.ndim - 1)'], {'keepdims': '(True)'}), '(v, self.normalize, v.ndim - 1, keepdims=True)\n', (4576, 4622), True, 'import numpy as np\n'), ((3234, 3251), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (3248, 3251), True, 'import numpy as np\n'), ((4124, 4138), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4135, 4138), False, 'import pickle\n'), ((6258, 6295), 'smqtk_descriptors.DescriptorElement.get_many_vectors', 'DescriptorElement.get_many_vectors', (['g'], {}), '(g)\n', (6292, 6295), False, 'from smqtk_descriptors import DescriptorElement\n'), ((7698, 7726), 'pickle.dumps', 'pickle.dumps', (['self.svm_model'], {}), '(self.svm_model)\n', (7710, 7726), False, 'import pickle\n')] |
# author: <NAME>, <NAME>
# data: 2020-11-27
"""Creates eda plots for the pre-processed training data from the open hotel booking demand dataset
(from https://www.sciencedirect.com/science/article/pii/S2352340918315191#f0010). Saves the results
as csv and svg files.
Usage: eda_ms2.py --train=<train_data_file> --out_dir=<report_file>
Options:
--train=<train_data_file> Path (including filename) to training data (which needs to be saved as an csv file)
--out_dir=<report_file> Path to directory where the plots should be saved
"""
# common packages
import numpy as np
import pandas as pd
from docopt import docopt
from pandas.api.types import CategoricalDtype
# Visualization packages
import altair as alt
import chromedriver_binary
from altair_saver import save
from selenium import webdriver
driver = webdriver.Chrome()
# Save a vega-lite spec and a PNG blob for each plot in the notebook
# alt.renderers.enable("mimetype")
# Handle large data sets without embedding them in the notebook
alt.data_transformers.enable("data_server")
opt = docopt(__doc__)
def main(train_data_file, report_file):
# read in train data set
try:
train_df = pd.read_csv(train_data_file)
print("Traning data set reading complete")
except Exception as ex:
print(ex)
print(type(ex))
exit(-99)
# split features and target
X_train, y_train = train_df.drop(columns=["is_canceled"]), train_df["is_canceled"]
# Seperate Resort and City Hotel:
resort_train = X_train.loc[(X_train["hotel"] == "Resort Hotel")].copy()
city_train = X_train.loc[(X_train["hotel"] == "City Hotel")].copy()
# numeric features distribution against target graph
numeric_features = [
"lead_time",
"stays_in_weekend_nights",
"stays_in_week_nights",
"adults",
"children",
"babies",
"previous_cancellations",
"previous_bookings_not_canceled",
"booking_changes",
"days_in_waiting_list",
"adr",
"required_car_parking_spaces",
"total_of_special_requests",
]
# categorical features against target graph
categorical_features = [
"hotel",
"meal",
"market_segment",
"distribution_channel",
"reserved_room_type",
"deposit_type",
"customer_type",
"is_repeated_guest",
]
train_df = train_df.copy()
train_df["is_canceled_cat"] = train_df["is_canceled"].apply(
lambda x: "Canceled" if x == 1 else "Not Canceled"
)
numeric_vs_target = (
(
alt.Chart(train_df)
.mark_line(interpolate="step")
.encode(
alt.X(alt.repeat(), type="quantitative"),
alt.Y("count()", title=""),
alt.Color("is_canceled_cat", title=""),
)
)
.properties(width=150, height=150)
.repeat(numeric_features, columns=4, title="Numeric features with target")
)
try:
numeric_vs_target.save(report_file + "/" + "numeric_vs_target.svg")
print("Numeric features with target graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
cat_vs_target = (
alt.Chart(train_df)
.mark_rect()
.encode(
alt.X(alt.repeat(), type="nominal"),
alt.Y("is_canceled_cat", title=""),
alt.Color("count()", title="Number of Observations"),
)
.properties(width=150, height=150)
.repeat(
categorical_features, columns=4, title="Categorical features with target"
)
)
try:
cat_vs_target.save(report_file + "/" + "cat_vs_target.svg")
print("Categorical features with target graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# missing table
null_df = (
train_df.isna()
.sum()
.reset_index(name="missing_count")
.query("missing_count != 0")
)
null_df["missing_percentage"] = np.round(
null_df["missing_count"] / train_df.shape[0] * 100, 2
)
null_df = null_df.rename({"index": "feature"}, axis=1)
try:
null_df.to_csv(report_file + "/" + "missing_summary.csv")
print("Table with missing values generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# correlation chart all variable
corr_df = train_df.corr().stack().reset_index(name="corr")
corr_df["round_corr"] = np.round(corr_df["corr"], 2)
corr_plot = (
alt.Chart(
corr_df.query("level_0 != 'is_canceled' & level_1 != 'is_canceled'"),
title="Feature Correlation",
)
.mark_rect()
.encode(
x="level_0",
y="level_1",
tooltip="corr",
color=alt.Color(
"corr", scale=alt.Scale(domain=(-1, 1), scheme="purpleorange")
),
)
.properties(width=500, height=500)
)
corr_text = (
alt.Chart(corr_df.query("level_0 != 'is_canceled' & level_1 != 'is_canceled'"))
.mark_text(size=8)
.encode(
x=alt.X("level_0", title="Features"),
y=alt.Y("level_1", title="Features"),
text="round_corr",
)
.properties(width=500, height=500)
)
corr_all = corr_plot + corr_text
try:
corr_all.save(report_file + "/" + "corr_all.svg")
print("Feature correlation graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# correlation against target chart
corr_plot = (
alt.Chart(
corr_df[corr_df.level_1 == "is_canceled"], title="Feature Correlation"
)
.mark_rect()
.encode(
x=alt.X("level_0", title="Features"),
y=alt.Y("level_1", title="Target"),
tooltip="corr",
color=alt.Color(
"corr", scale=alt.Scale(domain=(-1, 1), scheme="purpleorange")
),
)
.properties(width=600)
)
corr_text = (
alt.Chart(corr_df[corr_df.level_1 == "is_canceled"])
.mark_text(size=8)
.encode(
x=alt.X("level_0", title="Features"),
y=alt.Y("level_1", title="Target"),
text="round_corr",
)
.properties(width=600)
)
corr_target = corr_plot + corr_text
try:
corr_target.save(report_file + "/" + "corr_target.svg")
print("Feature correlation with target graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# feature examination charts
top_20_countries = (
X_train.groupby("country")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)[:20]
)
countries = (
alt.Chart(top_20_countries, title="Top 20 home country of guests")
.mark_bar()
.encode(
alt.X("counts", title="Guests numbers"),
alt.Y("country", sort="-x", title="Country"),
alt.Tooltip("country"),
)
)
X_train["adr_ac"] = X_train["adr"] / (X_train["adults"] + X_train["children"])
room_price = X_train[["hotel", "reserved_room_type", "adr_ac"]].sort_values(
"reserved_room_type"
)
room_price = (
alt.Chart(room_price)
.mark_boxplot(extent="min-max", clip=True)
.encode(
alt.X("adr_ac", title="Price [EUR]", scale=alt.Scale(domain=(0, 120))),
alt.Y("hotel", title="Hotel"),
color="hotel",
)
.facet(
"reserved_room_type",
columns=2,
title="Price per night and person for different room types",
)
)
resort_train["total_nights"] = (
resort_train["stays_in_weekend_nights"] + resort_train["stays_in_week_nights"]
)
city_train["total_nights"] = (
city_train["stays_in_weekend_nights"] + city_train["stays_in_week_nights"]
)
num_nights_resort = list(resort_train["total_nights"].value_counts().index)
num_bookings_resort = list(resort_train["total_nights"].value_counts())
rel_bookings_resort = (
resort_train["total_nights"].value_counts() / sum(num_bookings_resort) * 100
) # convert to percent
num_nights_city = list(city_train["total_nights"].value_counts().index)
num_bookings_city = list(city_train["total_nights"].value_counts())
rel_bookings_city = (
city_train["total_nights"].value_counts() / sum(num_bookings_city) * 100
) # convert to percent
resort_nights = pd.DataFrame(
{
"hotel": "Resort hotel",
"num_nights": num_nights_resort,
"rel_num_bookings": rel_bookings_resort,
}
)
city_nights = pd.DataFrame(
{
"hotel": "City hotel",
"num_nights": num_nights_city,
"rel_num_bookings": rel_bookings_city,
}
)
nights_data = pd.concat([resort_nights, city_nights], ignore_index=True)
nights_data
stay = (
alt.Chart(nights_data)
.mark_bar()
.encode(
alt.X("num_nights", title="Number of nights"),
alt.Y("rel_num_bookings", title="Percent of guests"),
color=alt.Color("hotel", legend=None),
)
.facet("hotel", title="Length of guests stay")
)
feature_exam = (countries.properties(height=300, width=200) | stay) & room_price
try:
feature_exam.save(report_file + "/" + "feature_exam.svg")
print("Feature examination chars generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# price versus month graph
prices_monthly = X_train[["hotel", "arrival_date_month", "adr_ac"]].sort_values(
"arrival_date_month"
)
# order by month:
months_ordered = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
prices_monthly["arrival_date_month"] = pd.Categorical(
prices_monthly["arrival_date_month"], categories=months_ordered, ordered=True
)
prices_monthly = prices_monthly.sort_values("arrival_date_month")
prices_points = (
alt.Chart(prices_monthly, title="Room price per night over the year")
.mark_point()
.encode(
alt.X("arrival_date_month", title="Month", sort=months_ordered),
alt.Y("adr_ac", title="Price [EUR]"),
alt.Color("hotel"),
)
.properties(width=500, height=400)
)
price_vs_month = prices_points.encode(y="mean(adr_ac)").mark_line()
try:
price_vs_month.save(report_file + "/" + "price_vs_month.svg")
print("Room price changing graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# guest versus month graph
rguests_monthly = resort_train.groupby("arrival_date_month")["hotel"].count()
cguests_monthly = city_train.groupby("arrival_date_month")["hotel"].count()
rguest_data = pd.DataFrame(
{
"month": list(rguests_monthly.index),
"hotel": "Resort hotel",
"guests": list(rguests_monthly.values),
}
)
cguest_data = pd.DataFrame(
{
"month": list(cguests_monthly.index),
"hotel": "City hotel",
"guests": list(cguests_monthly.values),
}
)
guest_data = pd.concat([rguest_data, cguest_data], ignore_index=True)
guest_data["month"] = pd.Categorical(
guest_data["month"], categories=months_ordered, ordered=True
)
guest_data = guest_data.sort_values("month")
# Dataset contains July and August date from 3 years, the other month from 2 years. Normalize data:
guest_data.loc[
(guest_data["month"] == "July") | (guest_data["month"] == "August"), "guests"
] /= 3
guest_data.loc[
~((guest_data["month"] == "July") | (guest_data["month"] == "August")), "guests"
] /= 2
guests_points = (
alt.Chart(guest_data, title="Number of guests over the year")
.mark_point()
.encode(
alt.X("month", title="Month", sort=months_ordered),
alt.Y("guests", title="Number of guests"),
alt.Color("hotel"),
)
.properties(width=500, height=400)
)
guest_vs_month = guests_points.mark_line()
try:
guest_vs_month.save(report_file + "/" + "guest_vs_month.svg")
print("Guest number changing graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
# guest repeat booking with cancel history graph
guests_prev_cancel = X_train[
["is_repeated_guest", "previous_bookings_not_canceled"]
]
rep_guests_prev_cancel = (
alt.Chart(
guests_prev_cancel, title="Guests repeat booking with cancellation history"
)
.mark_bar()
.encode(
alt.X(
"sum(previous_bookings_not_canceled)",
title="Total number of previous bookings not cancelled",
),
alt.Y("is_repeated_guest:O", title="Repeated guests"),
)
)
try:
rep_guests_prev_cancel.save(report_file + "/" + "rep_guests_prev_cancel.svg")
print("Repeat booking with cancellation history graph generating complete")
except FileNotFoundError as fx:
print("Error in target file path")
print(fx)
print(type(fx))
except Exception as ex:
print(ex)
print(type(ex))
print("Successfully generate EDA results")
if __name__ == "__main__":
main(opt["--train"], opt["--out_dir"])
| [
"pandas.read_csv",
"altair.Color",
"selenium.webdriver.Chrome",
"altair.Chart",
"altair.repeat",
"altair.Scale",
"pandas.Categorical",
"altair.data_transformers.enable",
"altair.X",
"altair.Y",
"altair.Tooltip",
"pandas.DataFrame",
"pandas.concat",
"docopt.docopt",
"numpy.round"
] | [((847, 865), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (863, 865), False, 'from selenium import webdriver\n'), ((1042, 1085), 'altair.data_transformers.enable', 'alt.data_transformers.enable', (['"""data_server"""'], {}), "('data_server')\n", (1070, 1085), True, 'import altair as alt\n'), ((1095, 1110), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1101, 1110), False, 'from docopt import docopt\n'), ((4463, 4526), 'numpy.round', 'np.round', (["(null_df['missing_count'] / train_df.shape[0] * 100)", '(2)'], {}), "(null_df['missing_count'] / train_df.shape[0] * 100, 2)\n", (4471, 4526), True, 'import numpy as np\n'), ((5077, 5105), 'numpy.round', 'np.round', (["corr_df['corr']", '(2)'], {}), "(corr_df['corr'], 2)\n", (5085, 5105), True, 'import numpy as np\n'), ((9598, 9715), 'pandas.DataFrame', 'pd.DataFrame', (["{'hotel': 'Resort hotel', 'num_nights': num_nights_resort,\n 'rel_num_bookings': rel_bookings_resort}"], {}), "({'hotel': 'Resort hotel', 'num_nights': num_nights_resort,\n 'rel_num_bookings': rel_bookings_resort})\n", (9610, 9715), True, 'import pandas as pd\n'), ((9800, 9911), 'pandas.DataFrame', 'pd.DataFrame', (["{'hotel': 'City hotel', 'num_nights': num_nights_city, 'rel_num_bookings':\n rel_bookings_city}"], {}), "({'hotel': 'City hotel', 'num_nights': num_nights_city,\n 'rel_num_bookings': rel_bookings_city})\n", (9812, 9911), True, 'import pandas as pd\n'), ((9996, 10054), 'pandas.concat', 'pd.concat', (['[resort_nights, city_nights]'], {'ignore_index': '(True)'}), '([resort_nights, city_nights], ignore_index=True)\n', (10005, 10054), True, 'import pandas as pd\n'), ((11328, 11426), 'pandas.Categorical', 'pd.Categorical', (["prices_monthly['arrival_date_month']"], {'categories': 'months_ordered', 'ordered': '(True)'}), "(prices_monthly['arrival_date_month'], categories=\n months_ordered, ordered=True)\n", (11342, 11426), True, 'import pandas as pd\n'), ((12916, 12972), 'pandas.concat', 'pd.concat', (['[rguest_data, cguest_data]'], {'ignore_index': '(True)'}), '([rguest_data, cguest_data], ignore_index=True)\n', (12925, 12972), True, 'import pandas as pd\n'), ((13002, 13078), 'pandas.Categorical', 'pd.Categorical', (["guest_data['month']"], {'categories': 'months_ordered', 'ordered': '(True)'}), "(guest_data['month'], categories=months_ordered, ordered=True)\n", (13016, 13078), True, 'import pandas as pd\n'), ((1218, 1246), 'pandas.read_csv', 'pd.read_csv', (['train_data_file'], {}), '(train_data_file)\n', (1229, 1246), True, 'import pandas as pd\n'), ((7900, 7939), 'altair.X', 'alt.X', (['"""counts"""'], {'title': '"""Guests numbers"""'}), "('counts', title='Guests numbers')\n", (7905, 7939), True, 'import altair as alt\n'), ((7954, 7998), 'altair.Y', 'alt.Y', (['"""country"""'], {'sort': '"""-x"""', 'title': '"""Country"""'}), "('country', sort='-x', title='Country')\n", (7959, 7998), True, 'import altair as alt\n'), ((8013, 8035), 'altair.Tooltip', 'alt.Tooltip', (['"""country"""'], {}), "('country')\n", (8024, 8035), True, 'import altair as alt\n'), ((14610, 14716), 'altair.X', 'alt.X', (['"""sum(previous_bookings_not_canceled)"""'], {'title': '"""Total number of previous bookings not cancelled"""'}), "('sum(previous_bookings_not_canceled)', title=\n 'Total number of previous bookings not cancelled')\n", (14615, 14716), True, 'import altair as alt\n'), ((14776, 14829), 'altair.Y', 'alt.Y', (['"""is_repeated_guest:O"""'], {'title': '"""Repeated guests"""'}), "('is_repeated_guest:O', title='Repeated guests')\n", (14781, 14829), True, 'import altair as alt\n'), ((8481, 8510), 'altair.Y', 'alt.Y', (['"""hotel"""'], {'title': '"""Hotel"""'}), "('hotel', title='Hotel')\n", (8486, 8510), True, 'import altair as alt\n'), ((10172, 10217), 'altair.X', 'alt.X', (['"""num_nights"""'], {'title': '"""Number of nights"""'}), "('num_nights', title='Number of nights')\n", (10177, 10217), True, 'import altair as alt\n'), ((10232, 10284), 'altair.Y', 'alt.Y', (['"""rel_num_bookings"""'], {'title': '"""Percent of guests"""'}), "('rel_num_bookings', title='Percent of guests')\n", (10237, 10284), True, 'import altair as alt\n'), ((11665, 11728), 'altair.X', 'alt.X', (['"""arrival_date_month"""'], {'title': '"""Month"""', 'sort': 'months_ordered'}), "('arrival_date_month', title='Month', sort=months_ordered)\n", (11670, 11728), True, 'import altair as alt\n'), ((11743, 11779), 'altair.Y', 'alt.Y', (['"""adr_ac"""'], {'title': '"""Price [EUR]"""'}), "('adr_ac', title='Price [EUR]')\n", (11748, 11779), True, 'import altair as alt\n'), ((11794, 11812), 'altair.Color', 'alt.Color', (['"""hotel"""'], {}), "('hotel')\n", (11803, 11812), True, 'import altair as alt\n'), ((13645, 13695), 'altair.X', 'alt.X', (['"""month"""'], {'title': '"""Month"""', 'sort': 'months_ordered'}), "('month', title='Month', sort=months_ordered)\n", (13650, 13695), True, 'import altair as alt\n'), ((13710, 13751), 'altair.Y', 'alt.Y', (['"""guests"""'], {'title': '"""Number of guests"""'}), "('guests', title='Number of guests')\n", (13715, 13751), True, 'import altair as alt\n'), ((13766, 13784), 'altair.Color', 'alt.Color', (['"""hotel"""'], {}), "('hotel')\n", (13775, 13784), True, 'import altair as alt\n'), ((5759, 5793), 'altair.X', 'alt.X', (['"""level_0"""'], {'title': '"""Features"""'}), "('level_0', title='Features')\n", (5764, 5793), True, 'import altair as alt\n'), ((5810, 5844), 'altair.Y', 'alt.Y', (['"""level_1"""'], {'title': '"""Features"""'}), "('level_1', title='Features')\n", (5815, 5844), True, 'import altair as alt\n'), ((6542, 6576), 'altair.X', 'alt.X', (['"""level_0"""'], {'title': '"""Features"""'}), "('level_0', title='Features')\n", (6547, 6576), True, 'import altair as alt\n'), ((6593, 6625), 'altair.Y', 'alt.Y', (['"""level_1"""'], {'title': '"""Target"""'}), "('level_1', title='Target')\n", (6598, 6625), True, 'import altair as alt\n'), ((6976, 7010), 'altair.X', 'alt.X', (['"""level_0"""'], {'title': '"""Features"""'}), "('level_0', title='Features')\n", (6981, 7010), True, 'import altair as alt\n'), ((7027, 7059), 'altair.Y', 'alt.Y', (['"""level_1"""'], {'title': '"""Target"""'}), "('level_1', title='Target')\n", (7032, 7059), True, 'import altair as alt\n'), ((7781, 7847), 'altair.Chart', 'alt.Chart', (['top_20_countries'], {'title': '"""Top 20 home country of guests"""'}), "(top_20_countries, title='Top 20 home country of guests')\n", (7790, 7847), True, 'import altair as alt\n'), ((10305, 10336), 'altair.Color', 'alt.Color', (['"""hotel"""'], {'legend': 'None'}), "('hotel', legend=None)\n", (10314, 10336), True, 'import altair as alt\n'), ((14447, 14538), 'altair.Chart', 'alt.Chart', (['guests_prev_cancel'], {'title': '"""Guests repeat booking with cancellation history"""'}), "(guests_prev_cancel, title=\n 'Guests repeat booking with cancellation history')\n", (14456, 14538), True, 'import altair as alt\n'), ((2856, 2882), 'altair.Y', 'alt.Y', (['"""count()"""'], {'title': '""""""'}), "('count()', title='')\n", (2861, 2882), True, 'import altair as alt\n'), ((2901, 2939), 'altair.Color', 'alt.Color', (['"""is_canceled_cat"""'], {'title': '""""""'}), "('is_canceled_cat', title='')\n", (2910, 2939), True, 'import altair as alt\n'), ((3619, 3653), 'altair.Y', 'alt.Y', (['"""is_canceled_cat"""'], {'title': '""""""'}), "('is_canceled_cat', title='')\n", (3624, 3653), True, 'import altair as alt\n'), ((3668, 3720), 'altair.Color', 'alt.Color', (['"""count()"""'], {'title': '"""Number of Observations"""'}), "('count()', title='Number of Observations')\n", (3677, 3720), True, 'import altair as alt\n'), ((8439, 8465), 'altair.Scale', 'alt.Scale', ([], {'domain': '(0, 120)'}), '(domain=(0, 120))\n', (8448, 8465), True, 'import altair as alt\n'), ((2803, 2815), 'altair.repeat', 'alt.repeat', ([], {}), '()\n', (2813, 2815), True, 'import altair as alt\n'), ((3575, 3587), 'altair.repeat', 'alt.repeat', ([], {}), '()\n', (3585, 3587), True, 'import altair as alt\n'), ((5463, 5511), 'altair.Scale', 'alt.Scale', ([], {'domain': '(-1, 1)', 'scheme': '"""purpleorange"""'}), "(domain=(-1, 1), scheme='purpleorange')\n", (5472, 5511), True, 'import altair as alt\n'), ((6381, 6467), 'altair.Chart', 'alt.Chart', (["corr_df[corr_df.level_1 == 'is_canceled']"], {'title': '"""Feature Correlation"""'}), "(corr_df[corr_df.level_1 == 'is_canceled'], title=\n 'Feature Correlation')\n", (6390, 6467), True, 'import altair as alt\n'), ((6717, 6765), 'altair.Scale', 'alt.Scale', ([], {'domain': '(-1, 1)', 'scheme': '"""purpleorange"""'}), "(domain=(-1, 1), scheme='purpleorange')\n", (6726, 6765), True, 'import altair as alt\n'), ((6862, 6914), 'altair.Chart', 'alt.Chart', (["corr_df[corr_df.level_1 == 'is_canceled']"], {}), "(corr_df[corr_df.level_1 == 'is_canceled'])\n", (6871, 6914), True, 'import altair as alt\n'), ((8291, 8312), 'altair.Chart', 'alt.Chart', (['room_price'], {}), '(room_price)\n', (8300, 8312), True, 'import altair as alt\n'), ((10097, 10119), 'altair.Chart', 'alt.Chart', (['nights_data'], {}), '(nights_data)\n', (10106, 10119), True, 'import altair as alt\n'), ((11541, 11610), 'altair.Chart', 'alt.Chart', (['prices_monthly'], {'title': '"""Room price per night over the year"""'}), "(prices_monthly, title='Room price per night over the year')\n", (11550, 11610), True, 'import altair as alt\n'), ((13529, 13590), 'altair.Chart', 'alt.Chart', (['guest_data'], {'title': '"""Number of guests over the year"""'}), "(guest_data, title='Number of guests over the year')\n", (13538, 13590), True, 'import altair as alt\n'), ((2694, 2713), 'altair.Chart', 'alt.Chart', (['train_df'], {}), '(train_df)\n', (2703, 2713), True, 'import altair as alt\n'), ((3496, 3515), 'altair.Chart', 'alt.Chart', (['train_df'], {}), '(train_df)\n', (3505, 3515), True, 'import altair as alt\n')] |
"""
Food Nonfood Classifier
"""
import tensorflow as tf
import numpy as np
class FoodNonfood(object):
def __init__(self, model_file=None):
self.categories = ['food', 'nonfood']
self.model_version = None
self.load_graph()
def load_graph(self, model_file=None):
"""load_graph
model_file (str): model file name
"""
if model_file is None:
model_file = '../models/retrained_mobilenet_v2_035_224.pb'
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, 'rb') as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
self.graph = graph
self.model_version = model_file.split('/')[-2]
def _get_operations(self):
input_layer = 'Placeholder'
output_layer = 'final_result'
input_operation = self.graph.get_operation_by_name('import/{}'.format(input_layer))
output_operation = self.graph.get_operation_by_name('import/{}'.format(output_layer))
return input_operation, output_operation
def predict(self, filename):
"""predict
Args:
filename (str): input image file path
Returns:
predict (str): "food" or "nonfood"
"""
with tf.Session(graph = self.graph) as sess:
t = read_tensor_from_image_file(sess, filename)
input_operation, output_operation = self._get_operations()
result = sess.run(
output_operation.outputs[0],
{ input_operation.outputs[0]: t })
prediction = self.categories[np.squeeze(result).argmax()]
return prediction
def read_tensor_from_image_file(
sess,
file_name,
input_height=224,
input_width=224,
channels=3,
input_mean=0,
input_std=255):
file_reader = tf.read_file(file_name)
image_reader = tf.image.decode_jpeg(file_reader, channels=channels)
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(
dims_expander,
[input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
result = sess.run(normalized)
return result
| [
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.image.resize_bilinear",
"tensorflow.GraphDef",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.subtract",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.read_file",
"tensorflow.image.decode_jpeg"
] | [((1936, 1959), 'tensorflow.read_file', 'tf.read_file', (['file_name'], {}), '(file_name)\n', (1948, 1959), True, 'import tensorflow as tf\n'), ((1979, 2031), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['file_reader'], {'channels': 'channels'}), '(file_reader, channels=channels)\n', (1999, 2031), True, 'import tensorflow as tf\n'), ((2051, 2084), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (2058, 2084), True, 'import tensorflow as tf\n'), ((2105, 2136), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (2119, 2136), True, 'import tensorflow as tf\n'), ((2151, 2219), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (2175, 2219), True, 'import tensorflow as tf\n'), ((492, 502), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (500, 502), True, 'import tensorflow as tf\n'), ((523, 536), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (534, 536), True, 'import tensorflow as tf\n'), ((2272, 2306), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (2283, 2306), True, 'import tensorflow as tf\n'), ((673, 703), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {}), '(graph_def)\n', (692, 703), True, 'import tensorflow as tf\n'), ((1336, 1364), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (1346, 1364), True, 'import tensorflow as tf\n'), ((1679, 1697), 'numpy.squeeze', 'np.squeeze', (['result'], {}), '(result)\n', (1689, 1697), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
# 最优化算法;
# Sigmoid 函数,fx = 1/(1 + e ** -x);
# Sigmoid 函数输入:x = w0x0 + w1x1 + w2x2 + ... + wnxn
def loadDataSet():
dataMat = []
labelMat = []
fr = open('./data.csv')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
return 1.0/(1 + np.exp(-inX))
def gradAscent(dataMathIn, classLabels):
# Trans to matrix
dataMatrix = np.mat(dataMathIn)
labelMat = np.mat(classLabels).transpose()
m, n = np.shape(dataMatrix) # (100, 3)
# Foot step
alpha = 0.001
maxCycles = 500
weights = np.ones((n, 1))
for k in range(maxCycles):
# (100, 1)
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
# 极大似然估计;
weights = weights + alpha * dataMatrix.transpose() * error
return weights
if __name__ == '__main__':
dataMat, labelMat = loadDataSet()
print(gradAscent(dataMat, labelMat))
# print(dataSet) | [
"numpy.exp",
"numpy.mat",
"numpy.shape",
"numpy.ones"
] | [((590, 608), 'numpy.mat', 'np.mat', (['dataMathIn'], {}), '(dataMathIn)\n', (596, 608), True, 'import numpy as np\n'), ((667, 687), 'numpy.shape', 'np.shape', (['dataMatrix'], {}), '(dataMatrix)\n', (675, 687), True, 'import numpy as np\n'), ((767, 782), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (774, 782), True, 'import numpy as np\n'), ((495, 507), 'numpy.exp', 'np.exp', (['(-inX)'], {}), '(-inX)\n', (501, 507), True, 'import numpy as np\n'), ((624, 643), 'numpy.mat', 'np.mat', (['classLabels'], {}), '(classLabels)\n', (630, 643), True, 'import numpy as np\n')] |
# Import all libraries we will use
import random
import numpy as np
import cv2
def create_image(p):
# let's create a heigth x width matrix with all pixels in black color
heigth = 1080
width = 1920
diameter = 50
x_correction = int(0.7 * diameter / 2)
y_correction = int(0.7 * diameter / 2)
img = np.ones((heigth, width, 3), np.uint8)*255
hcount = int(diameter/2)
while hcount < (heigth-3):
wcount = int(diameter/2)
while wcount < (width-3):
if random.uniform(0, 1) >= (1-p):
shape = random.uniform(0, 3)
if shape < 1.0:
cv2.circle(img, (wcount, hcount), int(diameter/2), [0, 0, 0], -1)
elif shape < 2.0:
cv2.rectangle(img, (wcount - x_correction, hcount - y_correction), (wcount + x_correction, hcount +
y_correction), [0, 0, 0], -1)
else:
pt1 = (wcount, hcount-y_correction)
pt2 = (wcount-x_correction, hcount+y_correction)
pt3 = (wcount+x_correction, hcount+y_correction)
triangle_cnt = np.array([pt1, pt2, pt3])
cv2.drawContours(img, [triangle_cnt], 0, (0, 0, 0), -1)
# img[hcount, wcount] = [255, 255, 255]
wcount += diameter
hcount += diameter
p = int(p * 100)
# save our image as a "jpg" image
cv2.imwrite("bernoulli" + str(p) + "M" + ".png", img)
if __name__ == '__main__':
create_image(0.08)
| [
"cv2.rectangle",
"random.uniform",
"cv2.drawContours",
"numpy.ones",
"numpy.array"
] | [((326, 363), 'numpy.ones', 'np.ones', (['(heigth, width, 3)', 'np.uint8'], {}), '((heigth, width, 3), np.uint8)\n', (333, 363), True, 'import numpy as np\n'), ((512, 532), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (526, 532), False, 'import random\n'), ((567, 587), 'random.uniform', 'random.uniform', (['(0)', '(3)'], {}), '(0, 3)\n', (581, 587), False, 'import random\n'), ((760, 893), 'cv2.rectangle', 'cv2.rectangle', (['img', '(wcount - x_correction, hcount - y_correction)', '(wcount + x_correction, hcount + y_correction)', '[0, 0, 0]', '(-1)'], {}), '(img, (wcount - x_correction, hcount - y_correction), (wcount +\n x_correction, hcount + y_correction), [0, 0, 0], -1)\n', (773, 893), False, 'import cv2\n'), ((1229, 1254), 'numpy.array', 'np.array', (['[pt1, pt2, pt3]'], {}), '([pt1, pt2, pt3])\n', (1237, 1254), True, 'import numpy as np\n'), ((1275, 1330), 'cv2.drawContours', 'cv2.drawContours', (['img', '[triangle_cnt]', '(0)', '(0, 0, 0)', '(-1)'], {}), '(img, [triangle_cnt], 0, (0, 0, 0), -1)\n', (1291, 1330), False, 'import cv2\n')] |
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given, settings
from numpy.testing import assert_array_equal
from mygrad import Tensor
from tests.custom_strategies import tensors, valid_constant_arg
real_types = (
hnp.integer_dtypes() | hnp.unsigned_integer_dtypes() | hnp.floating_dtypes()
)
@given(
tensor=tensors(dtype=real_types),
dest_type=real_types,
data=st.data(),
)
def test_astype(tensor: Tensor, dest_type: np.dtype, data: st.DataObject):
tensor = +tensor # give tensor a creator
constant = data.draw(valid_constant_arg(dest_type), label="constant")
new_tensor = tensor.astype(dest_type, constant=constant)
expected_tensor = Tensor(tensor, dtype=dest_type, constant=constant)
assert new_tensor is not tensor
assert new_tensor.constant is expected_tensor.constant
assert tensor.creator is not None
assert new_tensor.creator is None
assert new_tensor.dtype == dest_type
assert new_tensor.shape == tensor.shape
assert new_tensor.data is not tensor.data
assert_array_equal(new_tensor.data, expected_tensor.data)
@settings(max_examples=30)
@pytest.mark.parametrize(
"type_strategy",
[hnp.integer_dtypes(), hnp.unsigned_integer_dtypes(), hnp.floating_dtypes()],
)
@given(data=st.data())
def test_upcast_roundtrip(type_strategy, data: st.DataObject):
thin, wide = data.draw(
st.tuples(type_strategy, type_strategy).map(
lambda x: sorted(x, key=lambda y: np.dtype(y).itemsize)
)
)
orig_tensor = data.draw(
hnp.arrays(
dtype=thin,
shape=hnp.array_shapes(),
elements=hnp.from_dtype(thin).filter(np.isfinite),
).map(Tensor)
)
roundtripped_tensor = orig_tensor.astype(wide).astype(thin)
assert_array_equal(orig_tensor, roundtripped_tensor)
@pytest.mark.parametrize("src_constant", [True, False])
@pytest.mark.parametrize("dst_constant", [None, "match"])
@pytest.mark.parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"])
def test_nocopy(src_constant: bool, dst_constant, casting):
x = Tensor([1.0, 2.0], constant=src_constant)
if dst_constant == "match":
dst_constant = src_constant
y = x.astype(x.dtype, copy=False, casting=casting, constant=dst_constant)
assert y is x
| [
"numpy.dtype",
"tests.custom_strategies.valid_constant_arg",
"hypothesis.extra.numpy.unsigned_integer_dtypes",
"hypothesis.strategies.data",
"hypothesis.strategies.tuples",
"pytest.mark.parametrize",
"hypothesis.extra.numpy.integer_dtypes",
"mygrad.Tensor",
"tests.custom_strategies.tensors",
"hypo... | [((1173, 1198), 'hypothesis.settings', 'settings', ([], {'max_examples': '(30)'}), '(max_examples=30)\n', (1181, 1198), False, 'from hypothesis import given, settings\n'), ((1908, 1962), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""src_constant"""', '[True, False]'], {}), "('src_constant', [True, False])\n", (1931, 1962), False, 'import pytest\n'), ((1964, 2020), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dst_constant"""', "[None, 'match']"], {}), "('dst_constant', [None, 'match'])\n", (1987, 2020), False, 'import pytest\n'), ((2022, 2108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""casting"""', "['no', 'equiv', 'safe', 'same_kind', 'unsafe']"], {}), "('casting', ['no', 'equiv', 'safe', 'same_kind',\n 'unsafe'])\n", (2045, 2108), False, 'import pytest\n'), ((355, 376), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {}), '()\n', (374, 376), True, 'import hypothesis.extra.numpy as hnp\n'), ((754, 804), 'mygrad.Tensor', 'Tensor', (['tensor'], {'dtype': 'dest_type', 'constant': 'constant'}), '(tensor, dtype=dest_type, constant=constant)\n', (760, 804), False, 'from mygrad import Tensor\n'), ((1112, 1169), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['new_tensor.data', 'expected_tensor.data'], {}), '(new_tensor.data, expected_tensor.data)\n', (1130, 1169), False, 'from numpy.testing import assert_array_equal\n'), ((1852, 1904), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['orig_tensor', 'roundtripped_tensor'], {}), '(orig_tensor, roundtripped_tensor)\n', (1870, 1904), False, 'from numpy.testing import assert_array_equal\n'), ((2173, 2214), 'mygrad.Tensor', 'Tensor', (['[1.0, 2.0]'], {'constant': 'src_constant'}), '([1.0, 2.0], constant=src_constant)\n', (2179, 2214), False, 'from mygrad import Tensor\n'), ((300, 320), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {}), '()\n', (318, 320), True, 'import hypothesis.extra.numpy as hnp\n'), ((323, 352), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {}), '()\n', (350, 352), True, 'import hypothesis.extra.numpy as hnp\n'), ((621, 650), 'tests.custom_strategies.valid_constant_arg', 'valid_constant_arg', (['dest_type'], {}), '(dest_type)\n', (639, 650), False, 'from tests.custom_strategies import tensors, valid_constant_arg\n'), ((400, 425), 'tests.custom_strategies.tensors', 'tensors', ([], {'dtype': 'real_types'}), '(dtype=real_types)\n', (407, 425), False, 'from tests.custom_strategies import tensors, valid_constant_arg\n'), ((462, 471), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (469, 471), True, 'import hypothesis.strategies as st\n'), ((1251, 1271), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {}), '()\n', (1269, 1271), True, 'import hypothesis.extra.numpy as hnp\n'), ((1273, 1302), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {}), '()\n', (1300, 1302), True, 'import hypothesis.extra.numpy as hnp\n'), ((1304, 1325), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {}), '()\n', (1323, 1325), True, 'import hypothesis.extra.numpy as hnp\n'), ((1342, 1351), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (1349, 1351), True, 'import hypothesis.strategies as st\n'), ((1452, 1491), 'hypothesis.strategies.tuples', 'st.tuples', (['type_strategy', 'type_strategy'], {}), '(type_strategy, type_strategy)\n', (1461, 1491), True, 'import hypothesis.strategies as st\n'), ((1672, 1690), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {}), '()\n', (1688, 1690), True, 'import hypothesis.extra.numpy as hnp\n'), ((1543, 1554), 'numpy.dtype', 'np.dtype', (['y'], {}), '(y)\n', (1551, 1554), True, 'import numpy as np\n'), ((1713, 1733), 'hypothesis.extra.numpy.from_dtype', 'hnp.from_dtype', (['thin'], {}), '(thin)\n', (1727, 1733), True, 'import hypothesis.extra.numpy as hnp\n')] |
from __future__ import print_function, division, absolute_import
import numpy as np
from . import models
import multiprocessing as multi
from collections import MutableMapping
from ..plasma import plasma
########################################
# Physical constants, DO NOT OVERWRITE #
########################################
q = 1.6e-19
c = 2.998e8
mp = 1.672e-27
class Sensor(object):
"""A representation for a camera sensor for the Fabry Perot
Attributes:
nx (int): number of pixels in the x direction
ny (int): number of pixels in the y direction
px_size (float): Pixel size in mm
x0 (float): x location of the center
y0 (float): y location of th center
sensor (np.ndarray): 2D array representing the sensors where each
element is the count value for that pixel
"""
def __init__(self, nx, ny, px_size=0.004):
super(Sensor, self).__init__()
self.nx = int(nx)
self.ny = int(ny)
self.sensor = np.zeros((nx, ny))
self.px_size = px_size
self.x0 = nx / 2.0
self.y0 = ny / 2.0
self._R = None # Delay the creation of the R matrix until it is actually needed
def create_Rgrid(self):
"""Creates a 2D matrix of radii values for each pixel
returns:
np.ndarray
"""
nx = self.nx
ny = self.ny
x = np.arange(1, nx + 1, 1)
y = np.arange(1, ny + 1, 1)
XX, YY = np.meshgrid(x, y)
R = np.sqrt((XX - self.x0) ** 2 + (YY - self.y0) ** 2)
return R
@property
def R(self):
"""np.ndarray: 2D array of radii values for each pixel"""
if self._R is None:
self._R = self.create_Rgrid()
return self._R
# @R.setter
# def R(self):
# self._R = self.create_Rgrid()
def calculate_emission(self, etalon, light_source, nprocs=4):
"""Calculates emission from a light source through an etalon onto the sensor
Args:
etalon (Etalon): representation of the etalon
light_source (LightSource): representation of the light source
"""
self.sensor = etalon.calculate_emission(self, light_source, nprocs=nprocs)
@classmethod
def from_dict(cls, sensor):
"""Creates an instance of Sensor from a dictionary
Args:
sensor (dict): dictionary representing a sensor
Returns:
Sensor: a new instance of a Sensor
"""
nx = sensor.get('nx', 1024)
ny = sensor.get('ny', 1024)
px_size = sensor.get('px_size', 0.004)
return cls(nx, ny, px_size=px_size)
def to_dict(self):
"""Returns a dictionary representation of a Sensor
Returns:
dict: a dictionary representation of a Sensor
"""
return {'nx': self.nx, 'ny': self.ny, 'px_size': self.px_size}
def __repr__(self):
class_name = type(self).__name__
return '{}({!r}, {!r}, px_size={!r})'.format(class_name, self.nx, self.ny, self.px_size)
class Etalon(object):
"""
Class that represents an etalon for a Fabry-Perot spectrometer
Attributes:
L (float): focal length of lens for the camera
d (float): etalon spacing
F (float): finesse of etalon
"""
def __init__(self, L, d, F):
super(Etalon, self).__init__()
self.L = L
self.d = d
self.F = F
def calculate_emission(self, sensor, light_source, nprocs=4):
"""Calcultes the emission onto a sensor from a light source
Note: This uses multiprocessing for speed. Each process has a for loop
because of memory constraints if the sensor is too big.
Args:
sensor (Sensor): represents a camera sensor
light_source (LightSource): represents a light source for the Fabry Perot
nprocs (int): number of processes to use
Returns:
np.ndarray: shape matches sensor.sensor.shape
"""
r = sensor.R
px_size = sensor.px_size
w = light_source.wavelength
mu = light_source.mu
amp = light_source.amplitude
vel = light_source.velocity
temp = light_source.temperature
split_r = np.array_split(r.flatten(), nprocs)
procs = []
sigs = {}
out = multi.Queue()
labels = ['{0}'.format(x) for x in range(nprocs)]
for k in range(nprocs):
p = multi.Process(target=Etalon._calculate_emission,
args=(split_r[k], self.L / px_size, self.d, self.F, w, mu, amp, temp, vel),
kwargs={'out': out, 'label': labels[k]})
procs.append(p)
p.start()
for i in range(nprocs):
tup = out.get()
sigs[tup[0]] = tup[1]
for p in procs:
p.join()
emission = []
for k in labels:
emission.append(sigs[k])
emission = np.concatenate(emission)
emission.shape = r.shape
return emission
@staticmethod
def _calculate_emission(r, L, d, F, w, mu, amp, temp, vel, out=None, label=None, noise=True):
"""Helper function for calculating emission
Note: This is utlized by calculate_emission for multiprocessing
Args:
r (np.ndarray): radii in pixels
L (float): focal length of camera lens in pixels
d (float): etalon spacing in mm
F (float): finesse of etalon
w (float): wavelength in nm
mu (float): relative mass
amp (float): amplitude of line
temp (float): ion temperature in eV
vel (float): velocity of ion in m/s
out (multiprocessing.Queue): output queue
label (str): label for the output being put into the output queue
Returns:
np.ndarray (optional) only if not using for multiprocessing
"""
# Memory is a problem here because of broadcasting so I'm going to split the problem up
model = []
r_split = np.array_split(r.flatten(), 1000)
for r_sp in r_split:
model.append(models.forward_model(r_sp, L, d, F, w, mu, amp, temp, vel))
model = np.concatenate(model)
if noise:
print(label, 'adding noise to the image')
npts = len(model)
for i in xrange(npts):
model[i] = model[i] + np.random.normal(scale=np.sqrt(model[i]))
if i % 100 == 0:
print(model[i], np.random.normal(scale=np.sqrt(model[i])))
if out and label:
print(label)
out.put((label, model))
else:
return model
def __repr__(self):
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r})".format(class_name, self.L, self.d, self.F)
@classmethod
def from_dict(cls, etalon):
"""Creates an instance of Etalon from a dictionary
Args:
etalon (dict): dictionary representing a etalon
Returns:
Etalon: an new instance of a Etalon
"""
L = etalon.get('L', 150.0)
d = etalon.get('d', 0.88)
F = etalon.get('F', 21.0)
return cls(L, d, F)
def to_dict(self):
"""Returns a dictionary representing itself
Returns:
dict: a dictionary representing itself
"""
return {"L": self.L, "d": self.d, "F": self.F}
class LightSource(object):
"""A representation of a light source for a Fabry-Perot spectrometer
Attributes:
temperature (float): temperature of the emitting ion in eV
wavelength (float): wavelength of the light emitted in nm
mu (float): relative mass of the ion
amplitude (float): amplitude of the light emitted (you can choose your units here...)
velocity (VelocityProfile or float): velocity of the emitting ion in m/s
"""
def __init__(self, Ti, w, mu, velocity, amplitude=1):
super(LightSource, self).__init__()
self.temperature = Ti
self.wavelength = w
self.mu = mu
self.amplitude = amplitude
self.velocity = velocity
def __repr__(self):
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r}, {!r}, amplitude={!r})".format(
class_name, self.temperature, self.wavelength, self.mu,
self.velocity, self.amplitude)
@classmethod
def from_dict(cls, light_source):
"""Creates a new instance of LightSource from a dictionary
Args:
light_source (dict): dictionary representing a light source
Returns:
LightSource
"""
temperature = light_source.get('temperature', 0.5)
wavelength = light_source.get('wavelength', 488.0)
mu = light_source.get('mu', 40.0)
amplitude = light_source.get('amplitude', 1.0)
# Oomph, not sure I like this, but I couldn't think of a better way
velocity = light_source.get('velocity', 0.0)
if isinstance(velocity, MutableMapping):
vel_class = velocity.get("class_name", VelocityProfile)
vel_class = globals().get(vel_class, None)
velocity.pop('class_name')
if vel_class is None:
velocity = 0.0
else:
velocity = vel_class(**velocity)
return cls(temperature, wavelength, mu, velocity, amplitude=amplitude)
def to_dict(self):
"""Returns a dict representing itself"""
velocity = 0.0
if self.velocity is not None:
velocity = self.velocity
try:
velocity = velocity.to_dict()
except AttributeError:
pass
return {
'temperature': self.temperature,
'wavelength': self.wavelength,
'mu': self.mu,
'amplitude': self.amplitude,
'velocity': velocity,
}
class UniformPlasma(LightSource):
"""A representation of a uniform density and uniform Ti plasma with ion
species mu emitting light for a Fabry-Perot spectrometer
Attributes:
mu (float): relative mass of the ion
velocity (Union[VelocityProfile,float]): velocity of the emitting ion in m/s
ne (float): electron density in cm^-3
pec (float): photon emissivity coefficient (need to decide on units here)
mu (float): relative mass of the ion
"""
def __init__(self, ne, Ti, pec, w, velocity=None, mu=40.0):
super(UniformPlasma, self).__init__(Ti, w, mu, velocity)
self.ne = ne
self.pec = pec
self.mu = mu
def ion_emission(self, r, wavelength, cos_theta=None):
"""Calculates ion emission at a radius r for wavelengths provided
Args:
r (Union[float, np.ndarray]): radii to calculate ion emission at
wavelength (Union[float, np.ndarray]): wavelength to calculate emission line
profile
cos_theta (Union[float, np.ndarray]): cos(theta) to project velocity onto a unit
vector an angle theta from the toroidal direction
Returns:
np.ndarray
"""
velocity = 0.0
if self.velocity is not None:
velocity = self.velocity
if callable(velocity):
velocity = velocity(r)
if cos_theta is not None:
print('im in the cos theta portion')
cosine = np.asarray(cos_theta)
print(cosine.max(), cosine.min())
velocity *= cosine
line_profile = self.gaussian(wavelength, velocity)
emission = self.ne ** 2 * self.pec * line_profile / (4 * np.pi)
return emission
def chord_emission(self, impact_factor, wavelength):
b = np.asarray(impact_factor)
if np.max(b) < 0.0:
raise ValueError('impact_factor must be greater than or equal to zero')
max_radii = 150.0
x_max = np.sqrt(max_radii ** 2 - b ** 2)
x_arr = np.linspace(0.0, x_max, 1000)
# I need the x_arr and subsequent arrays to be broadcastable with wavelength array
x_arr = x_arr[np.newaxis, :]
w = wavelength[:, np.newaxis]
# theta_arr = np.arctan2(b, x_arr)
cos_theta = b / np.sqrt(b ** 2 + x_arr ** 2)
rarr = np.sqrt(x_arr ** 2 + b ** 2)
print(rarr.shape)
print(w.shape)
emission = self.ion_emission(rarr, w, cos_theta=cos_theta)
radiance = 2.0 * np.trapz(emission, x=x_arr, axis=1)
print(emission.shape, x_arr.shape, w.shape, radiance.shape)
# fig, ax = plt.subplots()
# for i in range(1000):
# if i % 50 == 0:
# ax.plot(wavelength, emission[:, i] / emission.max())
# ax.plot(wavelength, radiance / radiance.max(), 'k')
# plt.show()
return radiance
def gaussian(self, wavelength, velocity):
"""Calculates doppler broadened and shifted gaussian
Args:
wavelength (Union[float, np.ndarray]): wavelength to calculate emission line profile
velocity (Union[float, np.ndarray]): velocity of ion for doppler shift
"""
w = np.asarray(wavelength)
v = np.asarray(velocity)
sigma = self.sigma
w_shift = self.calculate_shift(v)
norm = np.sqrt(2 * np.pi) * sigma
return np.exp(-0.5 * (w - w_shift) ** 2 / sigma ** 2) / norm
@property
def sigma(self):
"""Thermal doppler broadening"""
return np.sqrt(q * self.temperature / self.mass) * self.wavelength / c
def calculate_shift(self, velocity):
"""Calculate doppler shift from the ion velocity
Args:
velocity (Union[float, np.ndarray]): velocity in m/s
Returns:
np.ndarray
"""
return self.wavelength * (1.0 - velocity / c)
@property
def mass(self):
"""Mass of ion in kg"""
return self.mu * mp
def __repr__(self):
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r}, {!r}, velocity={!r}, mu={!r})".format(
class_name, self.ne, self.temperature, self.pec, self.wavelength,
self.velocity, self.mu)
def to_dict(self):
"""Returns a dict representation
"""
velocity = 0.0
if self.velocity is not None:
velocity = self.velocity
try:
velocity = velocity.to_dict()
except AttributeError:
pass
return {
'temperature': self.temperature,
'wavelength': self.wavelength,
'mu': self.mu,
'velocity': velocity,
'pec': self.pec,
'ne': self.ne
}
@classmethod
def from_dict(cls, plasma):
"""Creates a new instance of UniformPlasma from dict
Args:
plasma (dict): dictionary representation of a UniformPlasma
Returns:
UniformPlasma
"""
temperature = plasma.get('temperature', 0.5)
wavelength = plasma.get('wavelength', 488.0)
mu = plasma.get('mu', 40.0)
pec = plasma.get('pec')
ne = plasma.get("ne", 1e12)
# Oomph, not sure I like this, but I couldn't think of a better way
velocity = plasma.get('velocity', 0.0)
if isinstance(velocity, MutableMapping):
vel_class = velocity.get("class_name", VelocityProfile)
vel_class = globals().get(vel_class, None)
velocity.pop('class_name')
if vel_class is None:
velocity = 0.0
else:
velocity = vel_class(**velocity)
return cls(ne, temperature, pec, wavelength, velocity=velocity, mu=mu)
class VelocityProfile(object):
"""Represents a edge driven velocity profile
Attributes:
Vmax (float): maximum velocity
max_radius (float): radial location of the maximum velocity
length_scale (float): scale for the velocity gradient inward radially
edge_scale (float): scale for the velocity edge gradient
R0 (float): location of the edge
offset (float): offset velocity in the center
"""
def __init__(self, Vmax, max_radius, length_scale, edge_scale, R0=140.0, offset=0.0):
super(VelocityProfile, self).__init__()
self.Vmax = Vmax
self.max_radius = max_radius
self.length_scale = length_scale
self.edge_scale = edge_scale
self.R0 = R0
self.offset = offset
def vphi(self, r):
"""Returns the Torodial velocity at r
Args:
r (Union[float, np.ndarray]): radii to evaluate vphi at
Returns:
np.ndarray
"""
radii = np.asarray(r)
right_profile = self.right_edge_profile(radii)
left_profile = self.gaussian(radii)
vel = right_profile * left_profile
vel *= self.Vmax / np.max(vel)
return vel
def right_edge_profile(self, r):
"""Helper function for the edge profile
Args:
r (Union[float, np.ndarray]): radii to evaulate at
Returns:
np.ndarray
"""
return 0.5 * (1.0 - np.tanh((r - self.R0) / self.edge_scale))
def gaussian(self, r):
"""Helper function for the inward velocity gradient
Args:
r (Union[float], np.ndarray]): radii to evaluate at
Returns:
np.ndarray
"""
g = (1 - self.offset / self.Vmax) * np.exp(-(r - self.max_radius) ** 2 / self.length_scale ** 2)
g += self.offset / self.Vmax
# print(self.offset / self.Vmax, 1 + self.offset / self.Vmax)
# fig, ax = plt.subplots()
# ax.plot(r, g)
# plt.show()
return g
def __call__(self, r):
return self.vphi(r)
def __repr__(self):
cls = type(self).__name__
s = "{}({!r},{!r},{!r},{!r},R0={!r},offset={!r})".format(cls, self.Vmax, self.max_radius, self.length_scale,
self.edge_scale, self.R0, self.offset)
return s
def to_dict(self):
"""Returns a dict representation of the VelocityProfile
Returns:
dict
"""
output_dict = {'class_name': type(self).__name__,
'Vmax': self.Vmax,
'max_radius': self.max_radius,
'length_scale': self.length_scale,
'edge_scale': self.edge_scale,
'R0': self.R0,
'offset': self.offset,
}
return output_dict
@classmethod
def from_dict(cls, velocity):
"""Returns a new instance of VelocityProfile from a dict
Args:
velocity (dict): dict reprsentation of a VelocityProfile
Returns:
VelocityProfile
"""
Vmax = velocity.get('Vmax')
max_radius = velocity.get('max_radius')
length_scale = velocity.get('length_scale')
edge_scale = velocity.get('edge_scale')
R0 = velocity.get('R0', 140.0)
offset = velocity.get('offset', 0.0)
return cls(Vmax, max_radius, length_scale, edge_scale, R0=R0, offset=offset)
# def PCX_Plasma(LightSource):
#
# def __init__(self, Ti, w, mu, velocity_outer, R_outer, Lnu, impact_factor):
# super(PCX_Plasma, self).__init__(Ti, w, mu, velocity_outer)
# self.impact_factor = impact_factor
# r, _, _ = plasma.calculate_r_theta_x_from_impact_factor(self.impact_factor, rmax=40, npts=500)
#
# self.velocity =
| [
"numpy.trapz",
"numpy.sqrt",
"multiprocessing.Process",
"numpy.asarray",
"numpy.tanh",
"numpy.max",
"numpy.exp",
"numpy.zeros",
"numpy.linspace",
"numpy.concatenate",
"numpy.meshgrid",
"multiprocessing.Queue",
"numpy.arange"
] | [((1010, 1028), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (1018, 1028), True, 'import numpy as np\n'), ((1402, 1425), 'numpy.arange', 'np.arange', (['(1)', '(nx + 1)', '(1)'], {}), '(1, nx + 1, 1)\n', (1411, 1425), True, 'import numpy as np\n'), ((1438, 1461), 'numpy.arange', 'np.arange', (['(1)', '(ny + 1)', '(1)'], {}), '(1, ny + 1, 1)\n', (1447, 1461), True, 'import numpy as np\n'), ((1480, 1497), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1491, 1497), True, 'import numpy as np\n'), ((1511, 1561), 'numpy.sqrt', 'np.sqrt', (['((XX - self.x0) ** 2 + (YY - self.y0) ** 2)'], {}), '((XX - self.x0) ** 2 + (YY - self.y0) ** 2)\n', (1518, 1561), True, 'import numpy as np\n'), ((4373, 4386), 'multiprocessing.Queue', 'multi.Queue', ([], {}), '()\n', (4384, 4386), True, 'import multiprocessing as multi\n'), ((5014, 5038), 'numpy.concatenate', 'np.concatenate', (['emission'], {}), '(emission)\n', (5028, 5038), True, 'import numpy as np\n'), ((6292, 6313), 'numpy.concatenate', 'np.concatenate', (['model'], {}), '(model)\n', (6306, 6313), True, 'import numpy as np\n'), ((11873, 11898), 'numpy.asarray', 'np.asarray', (['impact_factor'], {}), '(impact_factor)\n', (11883, 11898), True, 'import numpy as np\n'), ((12054, 12086), 'numpy.sqrt', 'np.sqrt', (['(max_radii ** 2 - b ** 2)'], {}), '(max_radii ** 2 - b ** 2)\n', (12061, 12086), True, 'import numpy as np\n'), ((12103, 12132), 'numpy.linspace', 'np.linspace', (['(0.0)', 'x_max', '(1000)'], {}), '(0.0, x_max, 1000)\n', (12114, 12132), True, 'import numpy as np\n'), ((12413, 12441), 'numpy.sqrt', 'np.sqrt', (['(x_arr ** 2 + b ** 2)'], {}), '(x_arr ** 2 + b ** 2)\n', (12420, 12441), True, 'import numpy as np\n'), ((13288, 13310), 'numpy.asarray', 'np.asarray', (['wavelength'], {}), '(wavelength)\n', (13298, 13310), True, 'import numpy as np\n'), ((13323, 13343), 'numpy.asarray', 'np.asarray', (['velocity'], {}), '(velocity)\n', (13333, 13343), True, 'import numpy as np\n'), ((16844, 16857), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (16854, 16857), True, 'import numpy as np\n'), ((4493, 4666), 'multiprocessing.Process', 'multi.Process', ([], {'target': 'Etalon._calculate_emission', 'args': '(split_r[k], self.L / px_size, self.d, self.F, w, mu, amp, temp, vel)', 'kwargs': "{'out': out, 'label': labels[k]}"}), "(target=Etalon._calculate_emission, args=(split_r[k], self.L /\n px_size, self.d, self.F, w, mu, amp, temp, vel), kwargs={'out': out,\n 'label': labels[k]})\n", (4506, 4666), True, 'import multiprocessing as multi\n'), ((11546, 11567), 'numpy.asarray', 'np.asarray', (['cos_theta'], {}), '(cos_theta)\n', (11556, 11567), True, 'import numpy as np\n'), ((11910, 11919), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (11916, 11919), True, 'import numpy as np\n'), ((12368, 12396), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 + x_arr ** 2)'], {}), '(b ** 2 + x_arr ** 2)\n', (12375, 12396), True, 'import numpy as np\n'), ((12584, 12619), 'numpy.trapz', 'np.trapz', (['emission'], {'x': 'x_arr', 'axis': '(1)'}), '(emission, x=x_arr, axis=1)\n', (12592, 12619), True, 'import numpy as np\n'), ((13428, 13446), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (13435, 13446), True, 'import numpy as np\n'), ((13471, 13517), 'numpy.exp', 'np.exp', (['(-0.5 * (w - w_shift) ** 2 / sigma ** 2)'], {}), '(-0.5 * (w - w_shift) ** 2 / sigma ** 2)\n', (13477, 13517), True, 'import numpy as np\n'), ((17028, 17039), 'numpy.max', 'np.max', (['vel'], {}), '(vel)\n', (17034, 17039), True, 'import numpy as np\n'), ((17611, 17671), 'numpy.exp', 'np.exp', (['(-(r - self.max_radius) ** 2 / self.length_scale ** 2)'], {}), '(-(r - self.max_radius) ** 2 / self.length_scale ** 2)\n', (17617, 17671), True, 'import numpy as np\n'), ((13617, 13658), 'numpy.sqrt', 'np.sqrt', (['(q * self.temperature / self.mass)'], {}), '(q * self.temperature / self.mass)\n', (13624, 13658), True, 'import numpy as np\n'), ((17305, 17345), 'numpy.tanh', 'np.tanh', (['((r - self.R0) / self.edge_scale)'], {}), '((r - self.R0) / self.edge_scale)\n', (17312, 17345), True, 'import numpy as np\n'), ((6513, 6530), 'numpy.sqrt', 'np.sqrt', (['model[i]'], {}), '(model[i])\n', (6520, 6530), True, 'import numpy as np\n'), ((6624, 6641), 'numpy.sqrt', 'np.sqrt', (['model[i]'], {}), '(model[i])\n', (6631, 6641), True, 'import numpy as np\n')] |
from sklearn.tree import DecisionTreeClassifier
import unittest
import pandas as pd
import optuna
from optuna.samplers import TPESampler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import warnings
from avaliacao import Experimento, OtimizacaoObjetivoArvoreDecisao,OtimizacaoObjetivoRandomForest
from resultado import Resultado,Fold
from metodo import ScikitLearnAprendizadoDeMaquina
class TestResultado(unittest.TestCase):
y = np.array([0,0,1,1,1,2,2,2,2,2,2,2,2])
predict_y = np.array([0,1,1,2,2,1,2,1,2,0,2,2,1])
y_zero = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0])
predict_y_zero = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0])
def test_macro_f1(self):
resultado = Resultado(TestResultado.y,TestResultado.predict_y)
prec = [1/2,1/5,4/6]
rev = [1/2,1/3,4/8]
f1_esp = [2*(prec[i]*rev[i])/(prec[i]+rev[i]) for i in range(len(prec))]
macro_f1 = np.average(f1_esp)
self.assertAlmostEqual(resultado.macro_f1,macro_f1,msg="Macro F1 não está com o valor esperado")
def test_acuracia(self):
resultado = Resultado(TestResultado.y,TestResultado.predict_y)
self.assertAlmostEqual(resultado.acuracia,6/13,msg="Acuracia não está com o valor esperado")
class Dados:
df_treino = pd.DataFrame({"A":[1, 1, 2, 2, 3, 4, 4, 5, 6, 1],
"B": [True,False,True,False,True,False,False,False,False,True],
"C":[23, 3, 123, 55, 12,33,44,21,55,22],
"D":[1, 1, 1, 1, 1, 1 , 1 , 1 , 1 , 1 ],
"realClass":[1,1,0,0,0,1,1,0,1,0]})
df_teste = pd.DataFrame({"A":[1,1,1,2,3,3,3,3,4,4,4,4,5,5,5],
"B": [True,False,True,True,False,True,True,False,True,True,False,True,True,False,True],
"C":[333,-1,5,333,-12,52,3323,-12,52,3323,-41,53,3333,-12,51],
"D":[2, 2, 3,2, 2, 3,2, 23, 3,2, 21, 3,2, 22, 3],
"realClass":[1,0,1,1,0,1,1,0,1,1,0,0,0,0,1]})
df_dados = pd.DataFrame({"A":[1, 1, 2, 2, 3, 4, 4, 5, 6,1,1,1,1,2,3,3,3,3,4,4,4,4,5,5,5],
"B": [True,False,True,False,True,False,False,False,False,True,
True,False,True,True,False,True,True,False,True,True,False,True,True,False,True],
"C":[23, 3, 123, 55, 12,33,44,21,55,22,333,-1,5,333,-12,52,3323,-12,52,3323,-41,53,3333,-12,51],
"D":[1, 1, 1, 1, 1, 1, 1, 1, 1,1,2, 2, 3,2, 2, 3,2, 23, 3,2, 21, 3,2, 22, 3],
"realClass":[1,1,1,1,2,2,2,2,2,2,2,2,2,2,0,1,1,0,1,1,0,0,0,0,1]})
class MetodoTest(unittest.TestCase):
def test_eval(self):
clf_dtree = DecisionTreeClassifier(random_state=1)
metodo = ScikitLearnAprendizadoDeMaquina(clf_dtree)
resultado = metodo.eval(Dados.df_treino,Dados.df_teste,"realClass")
self.assertListEqual(list(Dados.df_teste["realClass"]),list(resultado.y),"A lista de classe alvo da partição de teste não é a esperada")
acuracia = resultado.acuracia
macro_f1 = resultado.macro_f1
print(f"Macro f1: {macro_f1} Acuracia: {acuracia}")
self.assertAlmostEqual(macro_f1, 0.5982142857142857,msg="Macro F1 não está com o valor esperado")
self.assertAlmostEqual(acuracia, 0.6,msg="Acuracia não está com o valor esperado")
class TestFold(unittest.TestCase):
@staticmethod
def folds_test(tester,df_dados,folds,k,is_cross_validation,num_repeticao):
#verifica se a soma das instancias de teste=1
lstTeste = set()
for i,f in enumerate(folds):
ids_teste = set(f.df_data_to_predict.index.values.tolist())
ids_treino = set(f.df_treino.index.values.tolist())
#verifica se o treino e teste possui algum item em comum
itens_comuns = ids_teste & ids_treino
tester.assertTrue(len(itens_comuns)==0,f"Existem instancias iguais no treino e na amostra para predição: {itens_comuns} no fold #{i} repeticao #{num_repeticao}")
#verifica se o todas as instancias foram usadas
tester.assertEqual(len(df_dados),len(ids_teste)+len(ids_treino),f"A soma do itens do treino e dos itens para predição não está igual ao dataset completo no fold #{i} repeticao #{num_repeticao}")
#verifica se o teste nao foi usado em outro fold
for j,fj in enumerate(folds):
if(i!=j):
ids_teste_j = set(fj.df_data_to_predict.index.values.tolist())
itens_comuns = ids_teste & ids_teste_j
tester.assertTrue(len(itens_comuns)==0,f"Instancias no teste do fold {i} repeticao #{num_repeticao} também foi usado em no fold {j}. Indices comuns:{itens_comuns}")
lstTeste = lstTeste | set(f.df_data_to_predict.index.values.tolist())
#verifica se o temanho do teste esta correto do dataset
#if(i<k-1):
# tester.assertEqual(tam_fold,len(ids_teste),"O tamanho do partição deveria ser floor(numero_de_itens/val_k) - exceto o ultimo que deve possuir mais.")
#else:
# tester.assertTrue(len(ids_teste)>=tam_fold, "No ultimo fold, o tamanho da particao deve ser maior ou igual a floor(numero_de_itens/val_k)")
#verifica se todas as instancias ficaram em alguma particao de teste
if(is_cross_validation):
tester.assertEqual(len(lstTeste),len(df_dados),"Algumas instancias não foram usadas no teste.")
def test_gerar_k_folds(self):
k = 7
num_repeticoes = 3
#print("DADOS: "+str(len(TestFold.df_dados)))
tam_fold = len(Dados.df_dados)//k
folds = Fold.gerar_k_folds(Dados.df_dados,col_classe="realClass",val_k=k,num_repeticoes=num_repeticoes,seed=1)
#verifica se foram 4 folds e 3 repetições
self.assertEqual(k*num_repeticoes,len(folds),"O número de folds criado não é quantidade solicitada")
#verifica se os dados estao embaralhados
arr_lista_fold0 = list(folds[0].df_data_to_predict.index.values)
self.assertTrue(arr_lista_fold0!=[0,1,2], "A lista não foi embaralhada!")
self.assertListEqual(arr_lista_fold0,[14, 13, 17], "A lista não foi embaralhada corretamente! Não esqueça de usar a seed=seed+num_repeticoes")
#verifica se os dados foram divididos corretamente
#testa cada repetição separadamente
for repeticao_i in range(num_repeticoes):
folds_por_repeticao = folds[repeticao_i*k:repeticao_i*k+k]
TestFold.folds_test(self,Dados.df_dados,folds_por_repeticao,k,True,repeticao_i)
for i,f in enumerate(folds_por_repeticao):
ids_teste = set(f.df_data_to_predict.index.values.tolist())
ids_treino = set(f.df_treino.index.values.tolist())
#verifica se o temanho do teste esta correto do dataset
if(i<k-1):
self.assertEqual(tam_fold,len(ids_teste),"O tamanho do partição deveria ser floor(numero_de_itens/val_k) - exceto o ultimo que deve possuir mais.")
else:
self.assertTrue(len(ids_teste)>=tam_fold, "No ultimo fold, o tamanho da particao deve ser maior ou igual a floor(numero_de_itens/val_k)")
def test_arr_validacao(self):
#testa a criação do fold
fold = Fold(Dados.df_treino,Dados.df_teste,"realClass", num_folds_validacao=3,num_repeticoes_validacao=2)
#verifica se foi criado 6 folds de validação
self.assertEqual(len(fold.arr_folds_validacao),6,"Foi solicitado 2 execuções de 3 folds, ou seja, no final 6 folds")
#os folds de validação nao possuem validação
for fold_validacao in fold.arr_folds_validacao:
self.assertEqual(len(fold_validacao.arr_folds_validacao),0,"O fold de validação não possuirá validação")
#verifica cada execução
arr_folds_execucao_1 = fold.arr_folds_validacao[:3]
TestFold.folds_test(self,Dados.df_treino,arr_folds_execucao_1,3,True,1)
arr_folds_execucao_2 = fold.arr_folds_validacao[3:]
TestFold.folds_test(self,Dados.df_treino,arr_folds_execucao_2,3,True,1)
class ExperimentoTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def get_experimento(self,ml_method=DecisionTreeClassifier(min_samples_split=1,random_state=1),ClasseObjetivoOtimizacao=OtimizacaoObjetivoArvoreDecisao):
folds = Fold.gerar_k_folds(Dados.df_dados,val_k=5,col_classe="realClass",
num_repeticoes=1,seed=1,
num_folds_validacao=3,num_repeticoes_validacao=2)
exp = Experimento(folds,ml_method, ClasseObjetivoOtimizacao, num_trials=10,
sampler=optuna.samplers.TPESampler(seed=1, n_startup_trials=3))
return exp
def test_macro_f1_avg(self):
exp = self.get_experimento()
#print("Macro F1 médio:"+str(exp.macro_f1_avg))
self.assertAlmostEqual(exp.macro_f1_avg, 0.39380952380952383, msg="Valor inesperado de Macro F1")
def test_resultados(self):
exp = self.get_experimento()
fold = exp.folds[0]
arrExpMacroF1 =[0.16666666666666666,0.4444444444444444,
0.48888888888888893,0.6190476190476191, 0.24999999999999997]
exp.calcula_resultados()
for i,macro_f1 in enumerate(arrExpMacroF1):
self.assertTrue(type(exp.resultados[i]) == Resultado, "O método calcula_resultados deve retornar uma lista de objetos da classe Resultado e não float.")
print(f"Fold: {i} Macro F1: {exp.resultados[i].macro_f1}")
#verifica se o melhor metodo foi usado
#verifica se o resultado é o mesmo
self.assertAlmostEqual(macro_f1,exp.resultados[i].macro_f1,msg=f"A Macro F1 do fold {i} não está com o valor esperado.")
class TestObjetivoOtimizacaoRF(unittest.TestCase):
def test_otimizacao(self):
fold = Fold(Dados.df_treino,Dados.df_teste,"realClass", num_folds_validacao=3,num_repeticoes_validacao=2)
otimiza_fold = OtimizacaoObjetivoRandomForest(fold)
tpe_sampler = TPESampler(n_startup_trials = 10,seed=1)
study_TP = optuna.create_study(sampler=tpe_sampler, direction="maximize")
study_TP.optimize(otimiza_fold, n_trials=30)
for trial in study_TP.trials:
print(trial.params)
arr_params_to_test = ["min_samples_split", "max_features", "num_arvores"]
for param_name in arr_params_to_test:
self.assertTrue(param_name in study_TP.best_trial.params, f"Não foi encontrado o parametro '{param_name}' certifique-se se você nomeou o parametro devidamente")
self.assertAlmostEqual(study_TP.best_trial.params["min_samples_split"],0.19829036364801306,places=5,msg="Otimização não deu resultado esperado")
self.assertAlmostEqual(study_TP.best_trial.params["max_features"],0.1939553705810037,places=5,msg="Otimização não deu resultado esperado")
self.assertAlmostEqual(study_TP.best_trial.params["num_arvores"],5,msg="Otimização não deu resultado esperado")
print(f"Melhor execução: {study_TP.best_trial.params}")
result = Resultado(np.array([1,1,1,1,0,0,0,0]),np.array([1,1,0,0,1,1,1,0]))
result_metrica = otimiza_fold.resultado_metrica_otimizacao(result)
print(f"Resultado: {result_metrica}")
self.assertAlmostEqual(result_metrica,0.3650793650793651,places=5,msg="Resultado da metrica de otimização não deu resultado esperado")
if __name__ == "__main__":
unittest.main()
| [
"resultado.Resultado",
"metodo.ScikitLearnAprendizadoDeMaquina",
"numpy.average",
"sklearn.tree.DecisionTreeClassifier",
"unittest.main",
"resultado.Fold.gerar_k_folds",
"numpy.array",
"resultado.Fold",
"warnings.simplefilter",
"pandas.DataFrame",
"optuna.samplers.TPESampler",
"optuna.create_s... | [((517, 566), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2]'], {}), '([0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2])\n', (525, 566), True, 'import numpy as np\n'), ((571, 620), 'numpy.array', 'np.array', (['[0, 1, 1, 2, 2, 1, 2, 1, 2, 0, 2, 2, 1]'], {}), '([0, 1, 1, 2, 2, 1, 2, 1, 2, 0, 2, 2, 1])\n', (579, 620), True, 'import numpy as np\n'), ((622, 671), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (630, 671), True, 'import numpy as np\n'), ((681, 730), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (689, 730), True, 'import numpy as np\n'), ((1333, 1599), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 1, 2, 2, 3, 4, 4, 5, 6, 1], 'B': [True, False, True, False, True,\n False, False, False, False, True], 'C': [23, 3, 123, 55, 12, 33, 44, 21,\n 55, 22], 'D': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'realClass': [1, 1, 0, 0,\n 0, 1, 1, 0, 1, 0]}"], {}), "({'A': [1, 1, 2, 2, 3, 4, 4, 5, 6, 1], 'B': [True, False, True,\n False, True, False, False, False, False, True], 'C': [23, 3, 123, 55, \n 12, 33, 44, 21, 55, 22], 'D': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n 'realClass': [1, 1, 0, 0, 0, 1, 1, 0, 1, 0]})\n", (1345, 1599), True, 'import pandas as pd\n'), ((1662, 2041), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5], 'B': [True, False, \n True, True, False, True, True, False, True, True, False, True, True, \n False, True], 'C': [333, -1, 5, 333, -12, 52, 3323, -12, 52, 3323, -41,\n 53, 3333, -12, 51], 'D': [2, 2, 3, 2, 2, 3, 2, 23, 3, 2, 21, 3, 2, 22, \n 3], 'realClass': [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1]}"], {}), "({'A': [1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5], 'B': [\n True, False, True, True, False, True, True, False, True, True, False, \n True, True, False, True], 'C': [333, -1, 5, 333, -12, 52, 3323, -12, 52,\n 3323, -41, 53, 3333, -12, 51], 'D': [2, 2, 3, 2, 2, 3, 2, 23, 3, 2, 21,\n 3, 2, 22, 3], 'realClass': [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1]})\n", (1674, 2041), True, 'import pandas as pd\n'), ((2055, 2644), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 1, 2, 2, 3, 4, 4, 5, 6, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5,\n 5, 5], 'B': [True, False, True, False, True, False, False, False, False,\n True, True, False, True, True, False, True, True, False, True, True, \n False, True, True, False, True], 'C': [23, 3, 123, 55, 12, 33, 44, 21, \n 55, 22, 333, -1, 5, 333, -12, 52, 3323, -12, 52, 3323, -41, 53, 3333, -\n 12, 51], 'D': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 2, 2, 3, 2, 23, 3,\n 2, 21, 3, 2, 22, 3], 'realClass': [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, \n 2, 2, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1]}"], {}), "({'A': [1, 1, 2, 2, 3, 4, 4, 5, 6, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4,\n 4, 4, 4, 5, 5, 5], 'B': [True, False, True, False, True, False, False, \n False, False, True, True, False, True, True, False, True, True, False, \n True, True, False, True, True, False, True], 'C': [23, 3, 123, 55, 12, \n 33, 44, 21, 55, 22, 333, -1, 5, 333, -12, 52, 3323, -12, 52, 3323, -41,\n 53, 3333, -12, 51], 'D': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 2, 2, \n 3, 2, 23, 3, 2, 21, 3, 2, 22, 3], 'realClass': [1, 1, 1, 1, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1]})\n", (2067, 2644), True, 'import pandas as pd\n'), ((11606, 11621), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11619, 11621), False, 'import unittest\n'), ((770, 821), 'resultado.Resultado', 'Resultado', (['TestResultado.y', 'TestResultado.predict_y'], {}), '(TestResultado.y, TestResultado.predict_y)\n', (779, 821), False, 'from resultado import Resultado, Fold\n'), ((979, 997), 'numpy.average', 'np.average', (['f1_esp'], {}), '(f1_esp)\n', (989, 997), True, 'import numpy as np\n'), ((1152, 1203), 'resultado.Resultado', 'Resultado', (['TestResultado.y', 'TestResultado.predict_y'], {}), '(TestResultado.y, TestResultado.predict_y)\n', (1161, 1203), False, 'from resultado import Resultado, Fold\n'), ((2709, 2747), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2731, 2747), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2765, 2807), 'metodo.ScikitLearnAprendizadoDeMaquina', 'ScikitLearnAprendizadoDeMaquina', (['clf_dtree'], {}), '(clf_dtree)\n', (2796, 2807), False, 'from metodo import ScikitLearnAprendizadoDeMaquina\n'), ((5698, 5808), 'resultado.Fold.gerar_k_folds', 'Fold.gerar_k_folds', (['Dados.df_dados'], {'col_classe': '"""realClass"""', 'val_k': 'k', 'num_repeticoes': 'num_repeticoes', 'seed': '(1)'}), "(Dados.df_dados, col_classe='realClass', val_k=k,\n num_repeticoes=num_repeticoes, seed=1)\n", (5716, 5808), False, 'from resultado import Resultado, Fold\n'), ((7364, 7469), 'resultado.Fold', 'Fold', (['Dados.df_treino', 'Dados.df_teste', '"""realClass"""'], {'num_folds_validacao': '(3)', 'num_repeticoes_validacao': '(2)'}), "(Dados.df_treino, Dados.df_teste, 'realClass', num_folds_validacao=3,\n num_repeticoes_validacao=2)\n", (7368, 7469), False, 'from resultado import Resultado, Fold\n'), ((8258, 8289), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (8279, 8289), False, 'import warnings\n'), ((8329, 8388), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'min_samples_split': '(1)', 'random_state': '(1)'}), '(min_samples_split=1, random_state=1)\n', (8351, 8388), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((8465, 8618), 'resultado.Fold.gerar_k_folds', 'Fold.gerar_k_folds', (['Dados.df_dados'], {'val_k': '(5)', 'col_classe': '"""realClass"""', 'num_repeticoes': '(1)', 'seed': '(1)', 'num_folds_validacao': '(3)', 'num_repeticoes_validacao': '(2)'}), "(Dados.df_dados, val_k=5, col_classe='realClass',\n num_repeticoes=1, seed=1, num_folds_validacao=3, num_repeticoes_validacao=2\n )\n", (8483, 8618), False, 'from resultado import Resultado, Fold\n'), ((10012, 10117), 'resultado.Fold', 'Fold', (['Dados.df_treino', 'Dados.df_teste', '"""realClass"""'], {'num_folds_validacao': '(3)', 'num_repeticoes_validacao': '(2)'}), "(Dados.df_treino, Dados.df_teste, 'realClass', num_folds_validacao=3,\n num_repeticoes_validacao=2)\n", (10016, 10117), False, 'from resultado import Resultado, Fold\n'), ((10134, 10170), 'avaliacao.OtimizacaoObjetivoRandomForest', 'OtimizacaoObjetivoRandomForest', (['fold'], {}), '(fold)\n', (10164, 10170), False, 'from avaliacao import Experimento, OtimizacaoObjetivoArvoreDecisao, OtimizacaoObjetivoRandomForest\n'), ((10193, 10232), 'optuna.samplers.TPESampler', 'TPESampler', ([], {'n_startup_trials': '(10)', 'seed': '(1)'}), '(n_startup_trials=10, seed=1)\n', (10203, 10232), False, 'from optuna.samplers import TPESampler\n'), ((10253, 10315), 'optuna.create_study', 'optuna.create_study', ([], {'sampler': 'tpe_sampler', 'direction': '"""maximize"""'}), "(sampler=tpe_sampler, direction='maximize')\n", (10272, 10315), False, 'import optuna\n'), ((11254, 11288), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 0, 0, 0, 0]'], {}), '([1, 1, 1, 1, 0, 0, 0, 0])\n', (11262, 11288), True, 'import numpy as np\n'), ((11282, 11316), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 1, 1, 1, 0]'], {}), '([1, 1, 0, 0, 1, 1, 1, 0])\n', (11290, 11316), True, 'import numpy as np\n'), ((8799, 8853), 'optuna.samplers.TPESampler', 'optuna.samplers.TPESampler', ([], {'seed': '(1)', 'n_startup_trials': '(3)'}), '(seed=1, n_startup_trials=3)\n', (8825, 8853), False, 'import optuna\n')] |
"""
This module is designed for final visualization code.
"""
# import all the necessory python packages
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import statistics as stats
import pylab as pl
import pandas as pd
# Set specific parameters for the visualizations
large = 22; med = 16; small = 12
params = {'axes.titlesize': large,
'legend.fontsize': med,
'figure.figsize': (16, 10),
'axes.labelsize': med,
'xtick.labelsize': med,
'ytick.labelsize': med,
'figure.titlesize': large}
plt.rcParams.update(params)
plt.style.use('seaborn-whitegrid')
sns.set_style("white")
def create_sample_dists(df, y_var=None, x_var=None, categories=[], samplesize=30, numsamples=400):
np.random.seed(5)
"""
takes the sample data and retun a list of sample distributions
"""
# df = cleaned_data
dflist = []
for cat in categories:
dftemp = df.loc[ df[x_var].str.contains(cat)][y_var]
sampler = np.random.choice(dftemp ,size=(samplesize,numsamples))
sample_prop = sampler.mean(axis=0)
dflist.append(sample_prop)
return dflist
def overlapping_density(package=None, input_vars=None, target_vars=None, categories=None, output_image_name=None):
"""
Set the characteristics of your overlapping density plot and returns fig
"""
# Set size of figure
fig = plt.figure(figsize=(16, 10), dpi=80)
sns.set(color_codes=True)
# Starter code for figuring out which package to use
if package == "sns":
for counter,value in enumerate(input_vars):
sns.kdeplot(value, label=categories[counter],shade=True)
plt.title('Overlapping mean desnsity', fontsize=large)#, figure = fig)
plt.legend('xyz', fontsize=med)
plt.xlabel('Means', fontsize=med)#, figure = fig)
plt.ylabel('Sample counts', fontsize=med)#, figure = fig)
plt.xticks(fontsize=med)
plt.yticks(fontsize=med)
elif package == 'matplotlib':
for variable in input_vars:
plt.plot(variable, label=None, linewidth=None, color=None, figure = fig)
# plt.savefig(f'img/{output_image_name}.png', transparent = True, figure = fig)
# return fig
def boxplot_plot(package=None, input_vars=None, target_vars=None):
"""
Same specifications and requirements as overlapping density plot
Function takes package name, input variables(categories), and target variable as input.
Returns a figure
PARAMETERS
:param package: should only take sns or matplotlib as inputs, any other value should throw and error
:param input_vars: should take the x variables/categories you want to plot
:param target_vars: the y variable of your plot, what you are comparing
:return: fig to be enhanced in subsequent visualization functions
"""
plt.figure(figsize=(16, 10), dpi=80)
pass
def commercial_ticket_plots(df, target_vars = None, input_vars= None, output_image_name=None):
"""
takes the dataframe and returns a plot
"""
# plot graph
df.groupby([input_vars,target_vars]).size().unstack().plot(kind='bar',stacked=True)
row_keys = df[input_vars].unique()
plt.xlabel('Vehicles colors', fontsize=med)#, figure = fig)
plt.ylabel('Citation rates', fontsize=med)#, figure = fig)
plt.title('Commercial vehicles', fontsize=large)#, figure = fig)
plt.xticks(np.arange(len(row_keys)),row_keys, fontsize=med)#,rotation=0
# plt.xticks(fontsize=med)
plt.yticks(fontsize=med)
# return fig
def color_plot(arr,categories=None, output_image_name=None):
arr_list = np.asarray(arr).mean(axis=1)
arr_lst = np.vstack((arr_list, 1-arr_list))
df = pd.DataFrame(arr_lst.T)
df.plot(kind='bar',stacked=True)
# sns.set(color_codes=True)
plt.xlabel('Vehicle makes', fontsize=med)
plt.ylabel('Citation rates', fontsize=med)
plt.title('Ticketed vs Non-Ticketed vehicle colors')
plt.xticks(np.arange(4),categories,rotation=0)#, fontsize=med)
plt.legend(labels=['Ticketed','Non-Ticketed'])
# plt.savefig(f'img/{output_image_name}.png', transparent = True)#, figure = fig)
pass
def visualization_three(output_image_name):
pass
def visualization_four(output_image_name):
pass
| [
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.arange",
"seaborn.set",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"numpy.vstack",
"pandas.DataFrame",
"matplotlib.pyplot.xticks... | [((572, 599), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (591, 599), True, 'import matplotlib.pyplot as plt\n'), ((600, 634), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (613, 634), True, 'import matplotlib.pyplot as plt\n'), ((635, 657), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (648, 657), True, 'import seaborn as sns\n'), ((762, 779), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (776, 779), True, 'import numpy as np\n'), ((1423, 1459), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)', 'dpi': '(80)'}), '(figsize=(16, 10), dpi=80)\n', (1433, 1459), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1489), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (1471, 1489), True, 'import seaborn as sns\n'), ((2954, 2990), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)', 'dpi': '(80)'}), '(figsize=(16, 10), dpi=80)\n', (2964, 2990), True, 'import matplotlib.pyplot as plt\n'), ((3305, 3348), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Vehicles colors"""'], {'fontsize': 'med'}), "('Vehicles colors', fontsize=med)\n", (3315, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3369, 3411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Citation rates"""'], {'fontsize': 'med'}), "('Citation rates', fontsize=med)\n", (3379, 3411), True, 'import matplotlib.pyplot as plt\n'), ((3432, 3480), 'matplotlib.pyplot.title', 'plt.title', (['"""Commercial vehicles"""'], {'fontsize': 'large'}), "('Commercial vehicles', fontsize=large)\n", (3441, 3480), True, 'import matplotlib.pyplot as plt\n'), ((3609, 3633), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'med'}), '(fontsize=med)\n', (3619, 3633), True, 'import matplotlib.pyplot as plt\n'), ((3789, 3824), 'numpy.vstack', 'np.vstack', (['(arr_list, 1 - arr_list)'], {}), '((arr_list, 1 - arr_list))\n', (3798, 3824), True, 'import numpy as np\n'), ((3832, 3855), 'pandas.DataFrame', 'pd.DataFrame', (['arr_lst.T'], {}), '(arr_lst.T)\n', (3844, 3855), True, 'import pandas as pd\n'), ((3929, 3970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Vehicle makes"""'], {'fontsize': 'med'}), "('Vehicle makes', fontsize=med)\n", (3939, 3970), True, 'import matplotlib.pyplot as plt\n'), ((3975, 4017), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Citation rates"""'], {'fontsize': 'med'}), "('Citation rates', fontsize=med)\n", (3985, 4017), True, 'import matplotlib.pyplot as plt\n'), ((4022, 4074), 'matplotlib.pyplot.title', 'plt.title', (['"""Ticketed vs Non-Ticketed vehicle colors"""'], {}), "('Ticketed vs Non-Ticketed vehicle colors')\n", (4031, 4074), True, 'import matplotlib.pyplot as plt\n'), ((4147, 4194), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['Ticketed', 'Non-Ticketed']"}), "(labels=['Ticketed', 'Non-Ticketed'])\n", (4157, 4194), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1075), 'numpy.random.choice', 'np.random.choice', (['dftemp'], {'size': '(samplesize, numsamples)'}), '(dftemp, size=(samplesize, numsamples))\n', (1036, 1075), True, 'import numpy as np\n'), ((4090, 4102), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (4099, 4102), True, 'import numpy as np\n'), ((1637, 1694), 'seaborn.kdeplot', 'sns.kdeplot', (['value'], {'label': 'categories[counter]', 'shade': '(True)'}), '(value, label=categories[counter], shade=True)\n', (1648, 1694), True, 'import seaborn as sns\n'), ((1706, 1760), 'matplotlib.pyplot.title', 'plt.title', (['"""Overlapping mean desnsity"""'], {'fontsize': 'large'}), "('Overlapping mean desnsity', fontsize=large)\n", (1715, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1820), 'matplotlib.pyplot.legend', 'plt.legend', (['"""xyz"""'], {'fontsize': 'med'}), "('xyz', fontsize=med)\n", (1799, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1866), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Means"""'], {'fontsize': 'med'}), "('Means', fontsize=med)\n", (1843, 1866), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1936), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sample counts"""'], {'fontsize': 'med'}), "('Sample counts', fontsize=med)\n", (1905, 1936), True, 'import matplotlib.pyplot as plt\n'), ((1975, 1999), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': 'med'}), '(fontsize=med)\n', (1985, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2036), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'med'}), '(fontsize=med)\n', (2022, 2036), True, 'import matplotlib.pyplot as plt\n'), ((3746, 3761), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (3756, 3761), True, 'import numpy as np\n'), ((2132, 2202), 'matplotlib.pyplot.plot', 'plt.plot', (['variable'], {'label': 'None', 'linewidth': 'None', 'color': 'None', 'figure': 'fig'}), '(variable, label=None, linewidth=None, color=None, figure=fig)\n', (2140, 2202), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import os
import importlib.util
import logging
import random
import numpy as np
from renormalizer.utils.utils import sizeof_fmt
logger = logging.getLogger(__name__)
GPU_KEY = "RENO_GPU"
USE_GPU = False
if importlib.util.find_spec("cupy"):
import cupy as xp
gpu_id = os.environ.get(GPU_KEY, 0)
logger.info(f"Using GPU: {gpu_id}")
xp.cuda.Device(gpu_id).use()
USE_GPU = True
else:
gpu_id = os.environ.get(GPU_KEY, None)
if gpu_id is not None:
logger.warning(f"Cupy is not installed. Setting {GPU_KEY} to {gpu_id} has no effect.")
xp = np
#USE_GPU = False
#xp = np
if not USE_GPU:
logger.info("use numpy as backend")
OE_BACKEND = "numpy"
else:
logger.info("use cupy as backend")
OE_BACKEND = "cupy"
xp.random.seed(2019)
np.random.seed(9012)
random.seed(1092)
class Backend:
_init_once_flag = False
def __new__(cls):
if cls._init_once_flag:
raise RuntimeError("Backend should only be initialized once")
cls._init_once_flag = True
return super().__new__(cls)
def __init__(self):
self.first_mp = False
self._real_dtype = None
self._complex_dtype = None
#self.use_32bits()
self.use_64bits()
def free_all_blocks(self):
if not USE_GPU:
return
# free memory
mempool = xp.get_default_memory_pool()
mempool.free_all_blocks()
def log_memory_usage(self, header=""):
if not USE_GPU:
return
mempool = xp.get_default_memory_pool()
logger.info(f"{header} GPU memory used/Total: {sizeof_fmt(mempool.used_bytes())}/{sizeof_fmt(mempool.total_bytes())}")
def sync(self):
# only works with one GPU
if USE_GPU:
xp.cuda.device.Device(gpu_id).synchronize()
def use_32bits(self):
logger.info("use 32 bits")
self.dtypes = (np.float32, np.complex64)
def use_64bits(self):
logger.info("use 64 bits")
self.dtypes = (np.float64, np.complex128)
@property
def is_32bits(self) -> bool:
return self._real_dtype == np.float32
@property
def real_dtype(self):
return self._real_dtype
@real_dtype.setter
def real_dtype(self, tp):
if not self.first_mp:
self._real_dtype = tp
else:
raise RuntimeError("Can't alter backend data type")
@property
def complex_dtype(self):
return self._complex_dtype
@complex_dtype.setter
def complex_dtype(self, tp):
if not self.first_mp:
self._complex_dtype = tp
else:
raise RuntimeError("Can't alter backend data type")
@property
def dtypes(self):
return self.real_dtype, self.complex_dtype
@dtypes.setter
def dtypes(self, target):
self.real_dtype, self.complex_dtype = target
backend = Backend()
| [
"logging.getLogger",
"cupy.random.seed",
"cupy.cuda.Device",
"cupy.cuda.device.Device",
"os.environ.get",
"random.seed",
"numpy.random.seed",
"cupy.get_default_memory_pool"
] | [((166, 193), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (183, 193), False, 'import logging\n'), ((790, 810), 'cupy.random.seed', 'xp.random.seed', (['(2019)'], {}), '(2019)\n', (804, 810), True, 'import cupy as xp\n'), ((811, 831), 'numpy.random.seed', 'np.random.seed', (['(9012)'], {}), '(9012)\n', (825, 831), True, 'import numpy as np\n'), ((832, 849), 'random.seed', 'random.seed', (['(1092)'], {}), '(1092)\n', (843, 849), False, 'import random\n'), ((307, 333), 'os.environ.get', 'os.environ.get', (['GPU_KEY', '(0)'], {}), '(GPU_KEY, 0)\n', (321, 333), False, 'import os\n'), ((445, 474), 'os.environ.get', 'os.environ.get', (['GPU_KEY', 'None'], {}), '(GPU_KEY, None)\n', (459, 474), False, 'import os\n'), ((1386, 1414), 'cupy.get_default_memory_pool', 'xp.get_default_memory_pool', ([], {}), '()\n', (1412, 1414), True, 'import cupy as xp\n'), ((1554, 1582), 'cupy.get_default_memory_pool', 'xp.get_default_memory_pool', ([], {}), '()\n', (1580, 1582), True, 'import cupy as xp\n'), ((378, 400), 'cupy.cuda.Device', 'xp.cuda.Device', (['gpu_id'], {}), '(gpu_id)\n', (392, 400), True, 'import cupy as xp\n'), ((1797, 1826), 'cupy.cuda.device.Device', 'xp.cuda.device.Device', (['gpu_id'], {}), '(gpu_id)\n', (1818, 1826), True, 'import cupy as xp\n')] |
#!/usr/bin/env python
"""Provides some common functionality for cop robots.
Much of a cop's functionality is defined by the ``robot`` module, but
this module provides cops with the tools it uses to hunt the robbers,
such as:
* sensors (both human and camera) to collect environment information;
* a fusion_engine (either particle or gaussian mixture) to make sense
of the environment information;
* animation to display its understanding of the world to the human.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
import numpy as np
from shapely.geometry import Point
from cops_and_robots.robo_tools.robot import Robot, ImaginaryRobot
from cops_and_robots.robo_tools.iRobot_create import iRobotCreate
from cops_and_robots.robo_tools.planner import MissionPlanner
from cops_and_robots.fusion.fusion_engine import FusionEngine
from cops_and_robots.fusion.camera import Camera
from cops_and_robots.robo_tools.questioner import Questioner
from cops_and_robots.human_tools.human import Human
from cops_and_robots.map_tools.map_elements import MapObject
class Cop(Robot):
"""The Cop subclass of the generic robot type.
Cops extend the functionality of basic robots, providing sensing (both
camera-based and human) and a fusion engine.
.. image:: img/classes_Cop.png
Parameters
----------
name : str, optional
The cop's name (defaults to 'Deckard').
pose : list of float, optional
The cop's initial [x, y, theta] (defaults to [0, 0.5, 90]).
fusion_engine_type : {'particle','gauss_sum'}
For particle filters or gaussian mixture filters, respectively.
planner_type: {'simple', 'particle', 'MAP'}
The cop's own type of planner.
robber_model: {'stationary', 'random walk', 'clockwise',
'counterclockwise'}
The type of planner this cop believes robbers use.
Attributes
----------
fusion_engine
planner
found_robbers : dict
All robbers found so far.
sensors : dict
All sensors owned by the cop.
mission_statuses : {'searching', 'capturing', 'retired'}
The possible mission-level statuses of any cop, where:
* `searching` means the cop is exploring the environment;
* `capturing` means the cop has detected a robber and is moving
to capture it;
* `retired` means all robbers have been captured.
"""
mission_planner_defaults = {}
goal_planner_defaults = {'type_': 'greedy',
'use_target_as_goal': False}
path_planner_defaults = {'type_': 'direct'}
questioner_defaults = {}
fusion_engine_defaults = {'probability_type': 'gauss sum',
'use_STM': False,
'use_velocity': False,
}
def __init__(self,
name,
pose=[0, 0, 90],
pose_source='python',
web_interface_topic='python',
ask_every_n=0,
robber_model='static',
other_robot_names={},
mission_planner_cfg={},
goal_planner_cfg={},
path_planner_cfg={},
map_cfg={},
fusion_engine_cfg={},
camera_cfg={},
questioner_cfg={},
rosbag_process=None,
**kwargs):
# Use class defaults for kwargs not included
mp_cfg = Cop.mission_planner_defaults.copy()
mp_cfg.update(mission_planner_cfg)
gp_cfg = Cop.goal_planner_defaults.copy()
gp_cfg.update(goal_planner_cfg)
pp_cfg = Cop.path_planner_defaults.copy()
pp_cfg.update(path_planner_cfg)
self.q_cfg = Cop.questioner_defaults.copy()
self.q_cfg.update(questioner_cfg)
fe_cfg = Cop.fusion_engine_defaults.copy()
fe_cfg.update(fusion_engine_cfg)
# Superclass and compositional attributes
super(Cop, self).__init__(name,
pose=pose,
pose_source=pose_source,
create_mission_planner=False,
goal_planner_cfg=gp_cfg,
path_planner_cfg=pp_cfg,
map_cfg=map_cfg,
color_str='darkgreen')
# Tracking attributes
self.other_robot_names = other_robot_names
self.missing_robber_names = self.other_robot_names['robbers']
try:
self.distracting_robot_names = self.other_robot_names['distractors']
except KeyError:
self.distracting_robot_names = []
logging.debug('No distractors.')
self.found_robbers = {}
# Create mission planner
self.mission_planner = CopMissionPlanner(self, **mp_cfg)
# Fusion and sensor attributes
# <>TODO: Fusion Engine owned and refrenced from imaginary robber?
self.fusion_engine = FusionEngine(fe_cfg['probability_type'],
self.missing_robber_names,
self.map.feasible_layer,
robber_model,
rosbag_process=rosbag_process,
use_STM=fe_cfg['use_STM'],
use_velocity=fe_cfg['use_velocity'],
)
self.sensors = {}
self.ask_every_n = ask_every_n
self.sensors['camera'] = Camera((0, 0, 0),
element_dict=self.map.element_dict,
**camera_cfg)
self.map.dynamic_elements.append(self.sensors['camera'].viewcone)
# Add self to map
self.map.add_cop(self.map_obj)
# Make others
self.make_others()
def add_human_sensor(self, human_sensor):
# Grab questioner config then unbloat Cop's attribute space
q_cfg = self.q_cfg
del self.q_cfg
# Add human sensor after robbers have been made
self.sensors['human'] = human_sensor
# Configure questioner
target_order = q_cfg['target_order']
for target in target_order:
if target not in self.other_robot_names['robbers']:
target_order.remove(target)
del q_cfg['target_order']
self.questioner = Questioner(human_sensor=self.sensors['human'],
target_order=target_order,
**q_cfg)
def make_others(self):
# <>TODO: Make generic, so each robot has an idea of all others
# <>TODO: Move to back to Robot
"""Generate robot objects for all other robots.
Create personal belief (not necessarily true!) of other robots,
largely regarding their map positions. Their positions are
known to the 'self' robot, but this function will be expanded
in the future to include registration between robots: i.e.,
optional pose and information sharing instead of predetermined
sharing.
"""
# Robot MapObject
shape_pts = Point([0, 0, 0]).buffer(iRobotCreate.DIAMETER / 2)\
.exterior.coords
# <>TODO: Implement imaginary class for more robust models
self.missing_robbers = {}
for name in self.missing_robber_names:
self.missing_robbers[name] = ImaginaryRobot(name)
# Add robber objects to map
self.missing_robbers[name].map_obj = MapObject(name,
shape_pts[:],
has_relations=False,
blocks_camera=False,
color_str='none')
# <>TODO: allow no display individually for each robber
self.map.add_robber(self.missing_robbers[name].map_obj)
# All will be at 0,0,0 until actually pose is given.
# init_pose =
# self.missing_robbers[name].map_obj.move_absolute(init_pose)
self.distracting_robots = {}
for name in self.distracting_robot_names:
self.distracting_robots[name] = ImaginaryRobot(name)
self.distracting_robots[name].map_obj = MapObject(name,
shape_pts[:],
has_relations=False,
blocks_camera=False,
color_str='none')
self.map.add_robot(self.distracting_robots[name].map_obj)
# <>TODO: Add config similar to plot_robbers in map_cfg
self.distracting_robots[name].map_obj.visible = False
def update(self, i=0):
super(Cop, self).update(i=i)
# Update sensor and fusion information
# irobber - Imaginary robber
for irobber in self.missing_robbers.values():
point = Point(irobber.pose2D.pose[0:2])
# Try to visually spot a robber
if self.sensors['camera'].viewcone.shape.contains(point):
# Quick and dirty hack to continue experiment.
logging.info('{} found!'.format(irobber.name))
if False:
self.map.found_robber(irobber.map_obj)
logging.info('{} captured!'.format(irobber.name))
self.mission_planner.found_robber(irobber.name)
self.fusion_engine.filters[irobber.name].robber_detected(irobber.pose2D.pose)
self.found_robbers.update({irobber.name:
self.missing_robbers.pop(irobber.name)})
self.questioner.remove_target(irobber.name)
# Update robber's shapes
else:
self.missing_robbers[irobber.name].map_obj.move_absolute(
irobber.pose2D.pose)
for idistractor in self.distracting_robots.values():
point = Point(idistractor.pose2D.pose[0:2])
# Try to visually spot a robot
if self.sensors['camera'].viewcone.shape.contains(point):
logging.info('{} found, but it is not a robber!'
.format(idistractor.name))
if not idistractor.map_obj.visible:
idistractor.map_obj.visible = True
idistractor.map_obj.color = 'cornflowerblue'
# Update robber's shapes
self.distracting_robots[idistractor.name].map_obj.move_absolute(
idistractor.pose2D.pose)
# Update probability model
# save_file = 'data/ACC 2016/output/'
save_file = None
self.fusion_engine.update(self.pose2D.pose, self.sensors,
self.missing_robbers)
# Ask a question
# <>TODO: Key error, make sure target is reassigned.
priors = {}
for name, filter_ in self.fusion_engine.filters.iteritems():
if name == 'combined':
continue
priors[name] = filter_.probability
if not hasattr(priors[name], 'pos'):
priors[name]._discretize(bounds=self.map.bounds, res=0.1)
#<>TODO: Generalize
pos = self.missing_robbers['Roy'].pose2D.pose[:2]
pos = np.array([pos])
robot_positions = {'Roy': pos}
self.questioner.ask(priors, i, robot_positions=robot_positions)
class CopMissionPlanner(MissionPlanner):
"""The Cop subclass of the generic MissionPlanner
"""
mission_statuses = ['searching', 'capturing', 'retired']
def __init__(self, robot, mission_status='searching', target_order=None):
if target_order is None:
target = None
else:
target = target_order[0]
self.target_order = target_order
super(CopMissionPlanner, self).__init__(robot,
mission_status=mission_status,
target=target)
def update(self):
"""Update the cop's high-level mission status.
Update the cop's status from one of:
1. retired (all robots have been captured)
2. searching (moving around to gather information)
"""
# <>TODO: @Matt Hacky, reimplement. Human sensor method needs clean up in general
try:
sensor_target = self.robot.sensors['human'].statement.target
except AttributeError:
sensor_target = ''
if (self.target is None or sensor_target == self.target):
self.robot.goal_planner.goal_status = 'without a goal'
logging.info('Recieved an update, replanning goal')
if self.mission_status is 'searching':
if len(self.robot.missing_robbers) is 0:
self.mission_status = 'retired'
self.stop_all_movement()
def found_robber(self, name):
# If no target order, do default behavior
if self.target_order is None:
return
else:
try:
self.target_order.remove(name)
if self.target_order == []:
self.target_order = None
logging.info('Completed target order')
else:
self.target = self.target_order[0]
logging.info('New target: {}'.format(self.target))
except:
logging.info('{} is not in target order'.format(name))
| [
"logging.debug",
"cops_and_robots.fusion.fusion_engine.FusionEngine",
"cops_and_robots.robo_tools.robot.ImaginaryRobot",
"shapely.geometry.Point",
"numpy.array",
"cops_and_robots.map_tools.map_elements.MapObject",
"logging.info",
"cops_and_robots.fusion.camera.Camera",
"cops_and_robots.robo_tools.qu... | [((5248, 5458), 'cops_and_robots.fusion.fusion_engine.FusionEngine', 'FusionEngine', (["fe_cfg['probability_type']", 'self.missing_robber_names', 'self.map.feasible_layer', 'robber_model'], {'rosbag_process': 'rosbag_process', 'use_STM': "fe_cfg['use_STM']", 'use_velocity': "fe_cfg['use_velocity']"}), "(fe_cfg['probability_type'], self.missing_robber_names, self.\n map.feasible_layer, robber_model, rosbag_process=rosbag_process,\n use_STM=fe_cfg['use_STM'], use_velocity=fe_cfg['use_velocity'])\n", (5260, 5458), False, 'from cops_and_robots.fusion.fusion_engine import FusionEngine\n'), ((5844, 5911), 'cops_and_robots.fusion.camera.Camera', 'Camera', (['(0, 0, 0)'], {'element_dict': 'self.map.element_dict'}), '((0, 0, 0), element_dict=self.map.element_dict, **camera_cfg)\n', (5850, 5911), False, 'from cops_and_robots.fusion.camera import Camera\n'), ((6731, 6817), 'cops_and_robots.robo_tools.questioner.Questioner', 'Questioner', ([], {'human_sensor': "self.sensors['human']", 'target_order': 'target_order'}), "(human_sensor=self.sensors['human'], target_order=target_order,\n **q_cfg)\n", (6741, 6817), False, 'from cops_and_robots.robo_tools.questioner import Questioner\n'), ((11894, 11909), 'numpy.array', 'np.array', (['[pos]'], {}), '([pos])\n', (11902, 11909), True, 'import numpy as np\n'), ((7782, 7802), 'cops_and_robots.robo_tools.robot.ImaginaryRobot', 'ImaginaryRobot', (['name'], {}), '(name)\n', (7796, 7802), False, 'from cops_and_robots.robo_tools.robot import Robot, ImaginaryRobot\n'), ((7892, 7985), 'cops_and_robots.map_tools.map_elements.MapObject', 'MapObject', (['name', 'shape_pts[:]'], {'has_relations': '(False)', 'blocks_camera': '(False)', 'color_str': '"""none"""'}), "(name, shape_pts[:], has_relations=False, blocks_camera=False,\n color_str='none')\n", (7901, 7985), False, 'from cops_and_robots.map_tools.map_elements import MapObject\n'), ((8650, 8670), 'cops_and_robots.robo_tools.robot.ImaginaryRobot', 'ImaginaryRobot', (['name'], {}), '(name)\n', (8664, 8670), False, 'from cops_and_robots.robo_tools.robot import Robot, ImaginaryRobot\n'), ((8723, 8816), 'cops_and_robots.map_tools.map_elements.MapObject', 'MapObject', (['name', 'shape_pts[:]'], {'has_relations': '(False)', 'blocks_camera': '(False)', 'color_str': '"""none"""'}), "(name, shape_pts[:], has_relations=False, blocks_camera=False,\n color_str='none')\n", (8732, 8816), False, 'from cops_and_robots.map_tools.map_elements import MapObject\n'), ((9489, 9520), 'shapely.geometry.Point', 'Point', (['irobber.pose2D.pose[0:2]'], {}), '(irobber.pose2D.pose[0:2])\n', (9494, 9520), False, 'from shapely.geometry import Point\n'), ((10550, 10585), 'shapely.geometry.Point', 'Point', (['idistractor.pose2D.pose[0:2]'], {}), '(idistractor.pose2D.pose[0:2])\n', (10555, 10585), False, 'from shapely.geometry import Point\n'), ((13262, 13313), 'logging.info', 'logging.info', (['"""Recieved an update, replanning goal"""'], {}), "('Recieved an update, replanning goal')\n", (13274, 13313), False, 'import logging\n'), ((4940, 4972), 'logging.debug', 'logging.debug', (['"""No distractors."""'], {}), "('No distractors.')\n", (4953, 4972), False, 'import logging\n'), ((13854, 13892), 'logging.info', 'logging.info', (['"""Completed target order"""'], {}), "('Completed target order')\n", (13866, 13892), False, 'import logging\n'), ((7511, 7527), 'shapely.geometry.Point', 'Point', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (7516, 7527), False, 'from shapely.geometry import Point\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LightSource
from mpl_toolkits.basemap import Basemap, shiftgrid, cm
from osgeo import gdal
print("Reading csv")
csv = np.genfromtxt('data/happiness.csv', delimiter=',')
dataByDate = {}
print("Grouping rows by date")
for row in csv:
dateKey = str(int(row[2]))
if dateKey not in dataByDate:
dataByDate[dateKey] = []
dataByDate[dateKey].append([row[0], row[1], row[3]])
print("Plotting data")
for dateKey in dataByDate.keys():
print("Generating plot for " + dateKey)
fig = plt.figure()
# Main axis
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_title(dateKey[0:4] + "-" + dateKey[4:6] + "-" + dateKey[6:8] + " H" + dateKey[8:10])
# Europe bounding box
lon1 = -27
lon2 = 45
lat1 = 33
lat2 = 73
# Create the map
m = Basemap(resolution="l", projection="merc", ax=ax, lat_ts=(lon1 + lon2) / 2,
llcrnrlat=lat1, urcrnrlat=lat2, llcrnrlon=lon1, urcrnrlon=lon2)
# Draw regions
m.drawcoastlines()
m.drawcountries()
# Draw parallels and meridians
m.drawparallels(np.arange(0, 90, 10), labels=[1, 0, 0, 1])
m.drawmeridians(np.arange(0, 360, 15), labels=[1, 0, 0, 1])
# Draw the levels as a color mesh
scalef = 100000
shape = (int(m.urcrnry / scalef), int(m.urcrnrx / scalef))
data = np.zeros(shape=shape, dtype=float)
for row in dataByDate[dateKey]:
x, y = m(row[1], row[0])
x = int(x / scalef)
y = int(y / scalef)
if y >= 0 and y < shape[0] and x >= 0 and x < shape[1]:
data[y][x] = (data[y][x] + row[2]) / 2
x = np.linspace(m.llcrnrx, m.urcrnrx, data.shape[1])
y = np.linspace(m.llcrnry, m.urcrnry, data.shape[0])
xx, yy = np.meshgrid(x, y)
colormesh = m.pcolormesh(xx, yy, data, vmin=0.0, vmax=1.0, cmap="plasma", shading="gouraud")
# Add a color bar
cb = m.colorbar(colormesh, label="Polarity")
# Save the plot
plt.savefig("./out/" + dateKey + ".png")
plt.close()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.zeros",
"mpl_toolkits.basemap.Basemap",
"numpy.linspace",
"numpy.meshgrid",
"numpy.genfromtxt",
"numpy.arange"
] | [((201, 251), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/happiness.csv"""'], {'delimiter': '""","""'}), "('data/happiness.csv', delimiter=',')\n", (214, 251), True, 'import numpy as np\n'), ((583, 595), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (593, 595), True, 'import matplotlib.pyplot as plt\n'), ((867, 1010), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'resolution': '"""l"""', 'projection': '"""merc"""', 'ax': 'ax', 'lat_ts': '((lon1 + lon2) / 2)', 'llcrnrlat': 'lat1', 'urcrnrlat': 'lat2', 'llcrnrlon': 'lon1', 'urcrnrlon': 'lon2'}), "(resolution='l', projection='merc', ax=ax, lat_ts=(lon1 + lon2) / 2,\n llcrnrlat=lat1, urcrnrlat=lat2, llcrnrlon=lon1, urcrnrlon=lon2)\n", (874, 1010), False, 'from mpl_toolkits.basemap import Basemap, shiftgrid, cm\n'), ((1384, 1418), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape', 'dtype': 'float'}), '(shape=shape, dtype=float)\n', (1392, 1418), True, 'import numpy as np\n'), ((1669, 1717), 'numpy.linspace', 'np.linspace', (['m.llcrnrx', 'm.urcrnrx', 'data.shape[1]'], {}), '(m.llcrnrx, m.urcrnrx, data.shape[1])\n', (1680, 1717), True, 'import numpy as np\n'), ((1726, 1774), 'numpy.linspace', 'np.linspace', (['m.llcrnry', 'm.urcrnry', 'data.shape[0]'], {}), '(m.llcrnry, m.urcrnry, data.shape[0])\n', (1737, 1774), True, 'import numpy as np\n'), ((1789, 1806), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1800, 1806), True, 'import numpy as np\n'), ((2001, 2041), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./out/' + dateKey + '.png')"], {}), "('./out/' + dateKey + '.png')\n", (2012, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2046, 2057), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2055, 2057), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1164), 'numpy.arange', 'np.arange', (['(0)', '(90)', '(10)'], {}), '(0, 90, 10)\n', (1153, 1164), True, 'import numpy as np\n'), ((1207, 1228), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(15)'], {}), '(0, 360, 15)\n', (1216, 1228), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the TU Delft nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL BAS NIJHOLT BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# General functions used for saving, processing, and generating data.
from datetime import timedelta
import functools
from glob import glob
import inspect
from itertools import product
import os
import subprocess
import sys
import numpy as np
import pandas as pd
from toolz import partition_all
from multiprocessing import pool
assert sys.version_info >= (3, 6), 'Use Python ≥3.6'
def upd(d, **kwargs):
# Update a `dict` inline and return the `dict`.
d = d.copy()
for k, v in kwargs.items():
d[k] = v
return d
def run_simulation(func, vals, parameters, fname_i, N=None, overwrite=False):
"""Run a simulation where one loops over `vals`. The simulation
yields len(vals) results, but by using `N`, you can split it up
in parts of length N.
Parameters
----------
lview : ipyparallel.client.view.LoadBalancedView object
LoadBalancedView for asynchronous map.
func : function
Function that takes a list of arguments: `vals`.
vals : list
Arguments for `func`.
parameters : dict
Dictionary that is saved with the data, used for constant
parameters.
fname_i : str
Name for the resulting HDF5 files. If the simulation is
split up in parts by using the `N` argument, it needs to
be a formatteble string, for example 'file_{}'.
N : int
Number of results in each pandas.DataFrame.
overwrite : bool
Overwrite the file even if it already exists.
"""
if N is None:
N = 1000000
if len(vals) > N:
raise Exception('You need to split up vals in smaller parts')
N_files = len(vals) // N + (0 if len(vals) % N == 0 else 1)
print('`vals` will be split in {} files.'.format(N_files))
time_elapsed = 0
parts_done = 0
for i, chunk in enumerate(partition_all(N, vals)):
fname = fname_i #.replace('{}', '{:03d}') .format(i)
print('Busy with file: {}.'.format(fname))
if not os.path.exists(fname) or overwrite:
map_async = pool.map_async(func, chunk)
# map_async = lview.map_async(func, chunk)
map_async.wait_interactive()
result = map_async.result()
df = pd.DataFrame(result)
df = df.assign(**parameters)
df = df.assign(git_hash=get_git_revision_hash())
os.makedirs(os.path.dirname(fname), exist_ok=True)
df.to_hdf(fname, 'all_data', mode='w', complib='zlib', complevel=9)
# Print useful information
N_files_left = N_files - (i + 1)
parts_done += 1
time_elapsed += map_async.elapsed
time_left = timedelta(seconds=(time_elapsed / parts_done) *
N_files_left)
print_str = ('Saved {}, {} more files to go, {} time left '
'before everything is done.')
print(print_str.format(fname, N_files_left, time_left))
else:
print('File: {} was already done.'.format(fname))
def change_var_name(func, from_name, to_name):
sig = inspect.signature(func)
pars = [(name, value) for name, value in sig.parameters.items()]
new_pars = []
for k, v in pars:
if k is not from_name:
new_pars.append(v)
else:
new_pars.append(inspect.Parameter(to_name, v.kind,
default=v.default))
def wrapped(*args, **kwargs):
kwargs[from_name] = kwargs.pop(to_name)
return func(*args, **kwargs)
wrapped.__signature__ = inspect.Signature(parameters=new_pars)
return wrapped
def parse_params(params):
for k, v in params.items():
if isinstance(v, str):
try:
params[k] = eval(v)
except NameError:
pass
return params
def combine_dfs(pattern, fname=None):
files = glob(pattern)
df = pd.concat([pd.read_hdf(f) for f in sorted(files)])
df = df.reset_index(drop=True)
if fname is not None:
os.makedirs(os.path.dirname(fname), exist_ok=True)
df.to_hdf(fname, 'all_data', mode='w', complib='zlib', complevel=9)
return df
def lat_from_syst(syst):
lats = set(s.family for s in syst.sites)
if len(lats) > 1:
raise Exception('No unique lattice in the system.')
return list(lats)[0]
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def named_product(**items):
names = items.keys()
vals = items.values()
return [dict(zip(names, res)) for res in product(*vals)]
def get_git_revision_hash():
"""Get the git hash to save with data to ensure reproducibility."""
git_output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
return git_output.decode("utf-8").replace('\n', '')
def find_nearest(array, value):
"""Find the nearest value in an array to a specified `value`."""
idx = np.abs(np.array(array) - value).argmin()
return array[idx]
def remove_unhashable_columns(df):
df = df.copy()
for col in df.columns:
if not hashable(df[col].iloc[0]):
df.drop(col, axis=1, inplace=True)
return df
def hashable(v):
"""Determine whether `v` can be hashed."""
try:
hash(v)
except TypeError:
return False
return True
def drop_constant_columns(df):
"""Taken from http://stackoverflow.com/a/20210048/3447047"""
df = remove_unhashable_columns(df)
df = df.reset_index(drop=True)
return df.loc[:, (df != df.ix[0]).any()]
| [
"subprocess.check_output",
"os.path.exists",
"inspect.Signature",
"multiprocessing.pool.map_async",
"itertools.product",
"inspect.signature",
"functools.wraps",
"os.path.dirname",
"numpy.array",
"pandas.read_hdf",
"inspect.Parameter",
"pandas.DataFrame",
"datetime.timedelta",
"toolz.partit... | [((4684, 4707), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (4701, 4707), False, 'import inspect\n'), ((5172, 5210), 'inspect.Signature', 'inspect.Signature', ([], {'parameters': 'new_pars'}), '(parameters=new_pars)\n', (5189, 5210), False, 'import inspect\n'), ((5496, 5509), 'glob.glob', 'glob', (['pattern'], {}), '(pattern)\n', (5500, 5509), False, 'from glob import glob\n'), ((6014, 6034), 'functools.wraps', 'functools.wraps', (['obj'], {}), '(obj)\n', (6029, 6034), False, 'import functools\n'), ((6491, 6544), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', 'HEAD']"], {}), "(['git', 'rev-parse', 'HEAD'])\n", (6514, 6544), False, 'import subprocess\n'), ((3417, 3439), 'toolz.partition_all', 'partition_all', (['N', 'vals'], {}), '(N, vals)\n', (3430, 3439), False, 'from toolz import partition_all\n'), ((3629, 3656), 'multiprocessing.pool.map_async', 'pool.map_async', (['func', 'chunk'], {}), '(func, chunk)\n', (3643, 3656), False, 'from multiprocessing import pool\n'), ((3809, 3829), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (3821, 3829), True, 'import pandas as pd\n'), ((4258, 4317), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(time_elapsed / parts_done * N_files_left)'}), '(seconds=time_elapsed / parts_done * N_files_left)\n', (4267, 4317), False, 'from datetime import timedelta\n'), ((5530, 5544), 'pandas.read_hdf', 'pd.read_hdf', (['f'], {}), '(f)\n', (5541, 5544), True, 'import pandas as pd\n'), ((5652, 5674), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (5667, 5674), False, 'import os\n'), ((6355, 6369), 'itertools.product', 'product', (['*vals'], {}), '(*vals)\n', (6362, 6369), False, 'from itertools import product\n'), ((3569, 3590), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (3583, 3590), False, 'import os\n'), ((3956, 3978), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (3971, 3978), False, 'import os\n'), ((4922, 4975), 'inspect.Parameter', 'inspect.Parameter', (['to_name', 'v.kind'], {'default': 'v.default'}), '(to_name, v.kind, default=v.default)\n', (4939, 4975), False, 'import inspect\n'), ((6721, 6736), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (6729, 6736), True, 'import numpy as np\n')] |
from commlib import qam_constellation
import matplotlib.pyplot as plt
import numpy as np
SNRbdBs = np.arange(0.5, 25, 0.5)
n = np.arange(1,7,1)
Ms = np.array([4, 16, 64, 256])
Ms = Ms.astype(int)
Pest = np.zeros( [SNRbdBs.size, Ms.size] )
Pebt = np.zeros( [SNRbdBs.size, Ms.size] )
threshold = 1e-4
for i, SNRbdB in enumerate(SNRbdBs):
for j, M in enumerate(Ms):
c = qam_constellation(M = M, SNRbdB = SNRbdB)
Pest[i,j] = c.ser()
Pebt[i,j] = c.ber()
print('M = %d, SNRbdB = %6.2f Pe = %e' % (M,SNRbdB, Pest[i,j]))
plt.close('all')
for j, M in enumerate(Ms):
plt.figure(1)
plt.semilogy( SNRbdBs, Pest[:,j], label = 'M = %d' % M)
plt.figure(2)
plt.semilogy( SNRbdBs, Pebt[:,j], label = 'M = %d' % M)
plt.figure(1)
plt.xlabel('SNRb [dB]')
plt.ylabel('SER')
plt.legend()
plt.ylim([1e-5, 1])
plt.figure(2)
plt.xlabel('SNRb [dB]')
plt.ylabel('BER')
plt.legend()
plt.ylim([1e-5, 1])
| [
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"commlib.qam_constellation",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"numpy.arange"
] | [((100, 123), 'numpy.arange', 'np.arange', (['(0.5)', '(25)', '(0.5)'], {}), '(0.5, 25, 0.5)\n', (109, 123), True, 'import numpy as np\n'), ((128, 146), 'numpy.arange', 'np.arange', (['(1)', '(7)', '(1)'], {}), '(1, 7, 1)\n', (137, 146), True, 'import numpy as np\n'), ((150, 176), 'numpy.array', 'np.array', (['[4, 16, 64, 256]'], {}), '([4, 16, 64, 256])\n', (158, 176), True, 'import numpy as np\n'), ((205, 238), 'numpy.zeros', 'np.zeros', (['[SNRbdBs.size, Ms.size]'], {}), '([SNRbdBs.size, Ms.size])\n', (213, 238), True, 'import numpy as np\n'), ((248, 281), 'numpy.zeros', 'np.zeros', (['[SNRbdBs.size, Ms.size]'], {}), '([SNRbdBs.size, Ms.size])\n', (256, 281), True, 'import numpy as np\n'), ((554, 570), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (563, 570), True, 'import matplotlib.pyplot as plt\n'), ((760, 773), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (770, 773), True, 'import matplotlib.pyplot as plt\n'), ((774, 797), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SNRb [dB]"""'], {}), "('SNRb [dB]')\n", (784, 797), True, 'import matplotlib.pyplot as plt\n'), ((798, 815), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SER"""'], {}), "('SER')\n", (808, 815), True, 'import matplotlib.pyplot as plt\n'), ((816, 828), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (826, 828), True, 'import matplotlib.pyplot as plt\n'), ((829, 849), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1e-05, 1]'], {}), '([1e-05, 1])\n', (837, 849), True, 'import matplotlib.pyplot as plt\n'), ((854, 867), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (864, 867), True, 'import matplotlib.pyplot as plt\n'), ((868, 891), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SNRb [dB]"""'], {}), "('SNRb [dB]')\n", (878, 891), True, 'import matplotlib.pyplot as plt\n'), ((892, 909), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""BER"""'], {}), "('BER')\n", (902, 909), True, 'import matplotlib.pyplot as plt\n'), ((910, 922), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (920, 922), True, 'import matplotlib.pyplot as plt\n'), ((923, 943), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1e-05, 1]'], {}), '([1e-05, 1])\n', (931, 943), True, 'import matplotlib.pyplot as plt\n'), ((603, 616), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (613, 616), True, 'import matplotlib.pyplot as plt\n'), ((621, 674), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['SNRbdBs', 'Pest[:, j]'], {'label': "('M = %d' % M)"}), "(SNRbdBs, Pest[:, j], label='M = %d' % M)\n", (633, 674), True, 'import matplotlib.pyplot as plt\n'), ((681, 694), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (691, 694), True, 'import matplotlib.pyplot as plt\n'), ((699, 752), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['SNRbdBs', 'Pebt[:, j]'], {'label': "('M = %d' % M)"}), "(SNRbdBs, Pebt[:, j], label='M = %d' % M)\n", (711, 752), True, 'import matplotlib.pyplot as plt\n'), ((383, 420), 'commlib.qam_constellation', 'qam_constellation', ([], {'M': 'M', 'SNRbdB': 'SNRbdB'}), '(M=M, SNRbdB=SNRbdB)\n', (400, 420), False, 'from commlib import qam_constellation\n')] |
""" Utility functions operating on operation matrices """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.linalg as _spl
import scipy.sparse as _sps
import scipy.sparse.linalg as _spsl
import warnings as _warnings
import collections as _collections
from . import jamiolkowski as _jam
from . import matrixtools as _mt
from . import lindbladtools as _lt
from . import basistools as _bt
from ..objects.basis import Basis as _Basis, ExplicitBasis as _ExplicitBasis, DirectSumBasis as _DirectSumBasis
from ..objects.label import Label as _Label
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
def _flat_mut_blks(i, j, blockDims):
# like _mut(i,j,dim).flatten() but works with basis *blocks*
N = sum(blockDims)
mx = _np.zeros((N, N), 'd'); mx[i, j] = 1.0
ret = _np.zeros(sum([d**2 for d in blockDims]), 'd')
i = 0; off = 0
for d in blockDims:
ret[i:i + d**2] = mx[off:off + d, off:off + d].flatten()
i += d**2; off += d
return ret
def _hack_sqrtm(A):
sqrt, _ = _spl.sqrtm(A, disp=False) # Travis found this scipy function
# to be incorrect in certain cases (we need a workaround)
if _np.any(_np.isnan(sqrt)): # this is sometimes a good fallback when sqrtm doesn't work.
ev, U = _np.linalg.eig(A)
sqrt = _np.dot(U, _np.dot(_np.diag(_np.sqrt(ev)), _np.linalg.inv(U)))
return sqrt
def fidelity(A, B):
"""
Returns the quantum state fidelity between density
matrices A and B given by :
F = Tr( sqrt{ sqrt(A) * B * sqrt(A) } )^2
To compute process fidelity, pass this function the
Choi matrices of the two processes, or just call
:function:`entanglement_fidelity` with the operation matrices.
Parameters
----------
A : numpy array
First density matrix.
B : numpy array
Second density matrix.
Returns
-------
float
The resulting fidelity.
"""
evals, U = _np.linalg.eig(A)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when A is rank 1, A = vec * vec^T and sqrt(A) = A
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(B, vec)).real # vec^T * B * vec
return float(F)
evals, U = _np.linalg.eig(B)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when B is rank 1 (recally fidelity is sym in args)
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(A, vec)).real # vec^T * A * vec
return float(F)
#if _np.array_equal(A, B): return 1.0 # HACK - some cases when A and B are perfecty equal sqrtm(A) fails...
sqrtA = _hack_sqrtm(A) # _spl.sqrtm(A)
# test the scipy sqrtm function - sometimes fails when rank defficient
#assert(_np.linalg.norm(_np.dot(sqrtA, sqrtA) - A) < 1e-8)
if _np.linalg.norm(_np.dot(sqrtA, sqrtA) - A) > 1e-8:
evals = _np.linalg.eigvals(A)
_warnings.warn(("sqrtm(A) failure when computing fidelity - beware result. "
"Maybe due to rank defficiency - eigenvalues of A are: %s") % evals)
F = (_mt.trace(_hack_sqrtm(_np.dot(sqrtA, _np.dot(B, sqrtA)))).real)**2 # Tr( sqrt{ sqrt(A) * B * sqrt(A) } )^2
return float(F)
def frobeniusdist(A, B):
"""
Returns the frobenius distance between gate
or density matrices A and B given by :
sqrt( sum( (A_ij-B_ij)^2 ) )
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm(A - B)
def frobeniusdist2(A, B):
"""
Returns the square of the frobenius distance between gate
or density matrices A and B given by :
sum( (A_ij-B_ij)^2 )
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm2(A - B)
def residuals(A, B):
"""
Calculate residuals between the elements of two matrices
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
np.array
residuals
"""
return (A - B).flatten()
def tracenorm(A):
"""
Compute the trace norm of matrix A given by:
Tr( sqrt{ A^dagger * A } )
Parameters
----------
A : numpy array
The matrix to compute the trace norm of.
"""
if _np.linalg.norm(A - _np.conjugate(A.T)) < 1e-8:
#Hermitian, so just sum eigenvalue magnitudes
return _np.sum(_np.abs(_np.linalg.eigvals(A)))
else:
#Sum of singular values (positive by construction)
return _np.sum(_np.linalg.svd(A, compute_uv=False))
def tracedist(A, B):
"""
Compute the trace distance between matrices A and B,
given by:
D = 0.5 * Tr( sqrt{ (A-B)^dagger * (A-B) } )
Parameters
----------
A, B : numpy array
The matrices to compute the distance between.
"""
return 0.5 * tracenorm(A - B)
def diamonddist(A, B, mxBasis='pp', return_x=False):
"""
Returns the approximate diamond norm describing the difference between gate
matrices A and B given by :
D = ||A - B ||_diamond = sup_rho || AxI(rho) - BxI(rho) ||_1
Parameters
----------
A, B : numpy array
The *gate* matrices to use when computing the diamond norm.
mxBasis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_x : bool, optional
Whether to return a numpy array encoding the state (rho) at
which the maximal trace distance occurs.
Returns
-------
dm : float
Diamond norm
W : numpy array
Only returned if `return_x = True`. Encodes the state rho, such that
`dm = trace( |(J(A)-J(B)).T * W| )`.
"""
mxBasis = _bt.build_basis_for_matrix(A, mxBasis)
#currently cvxpy is only needed for this function, so don't import until here
import cvxpy as _cvxpy
#Check if using version < 1.0
old_cvxpy = bool(tuple(map(int, _cvxpy.__version__.split('.'))) < (1, 0))
# This SDP implementation is a modified version of Kevin's code
#Compute the diamond norm
#Uses the primal SDP from arXiv:1207.5726v2, Sec 3.2
#Maximize 1/2 ( < J(phi), X > + < J(phi).dag, X.dag > )
#Subject to [[ I otimes rho0, X],
# [X.dag, I otimes rho1]] >> 0
# rho0, rho1 are density matrices
# X is linear operator
#Jamiolkowski representation of the process
# J(phi) = sum_ij Phi(Eij) otimes Eij
#< A, B > = Tr(A.dag B)
#def vec(matrix_in):
# # Stack the columns of a matrix to return a vector
# return _np.transpose(matrix_in).flatten()
#
#def unvec(vector_in):
# # Slice a vector into columns of a matrix
# d = int(_np.sqrt(vector_in.size))
# return _np.transpose(vector_in.reshape( (d,d) ))
#Code below assumes *un-normalized* Jamiol-isomorphism, so multiply by
# density mx dimension (`smallDim`) below
JAstd = _jam.fast_jamiolkowski_iso_std(A, mxBasis)
JBstd = _jam.fast_jamiolkowski_iso_std(B, mxBasis)
#Do this *after* the fast_jamiolkowski_iso calls above because these will convert
# A & B to a "single-block" basis representation when mxBasis has multiple blocks.
dim = JAstd.shape[0]
smallDim = int(_np.sqrt(dim))
JAstd *= smallDim # see above comment
JBstd *= smallDim # see above comment
assert(dim == JAstd.shape[1] == JBstd.shape[0] == JBstd.shape[1])
#CHECK: Kevin's jamiolowski, which implements the un-normalized isomorphism:
# smallDim * _jam.jamiolkowski_iso(M, "std", "std")
#def kevins_jamiolkowski(process, representation = 'superoperator'):
# # Return the Choi-Jamiolkowski representation of a quantum process
# # Add methods as necessary to accept different representations
# process = _np.array(process)
# if representation == 'superoperator':
# # Superoperator is the linear operator acting on vec(rho)
# dimension = int(_np.sqrt(process.shape[0]))
# print "dim = ",dimension
# jamiolkowski_matrix = _np.zeros([dimension**2, dimension**2], dtype='complex')
# for i in range(dimension**2):
# Ei_vec= _np.zeros(dimension**2)
# Ei_vec[i] = 1
# output = unvec(_np.dot(process,Ei_vec))
# tmp = _np.kron(output, unvec(Ei_vec))
# print "E%d = \n" % i,unvec(Ei_vec)
# #print "contrib =",_np.kron(output, unvec(Ei_vec))
# jamiolkowski_matrix += tmp
# return jamiolkowski_matrix
#JAstd_kev = jamiolkowski(A)
#JBstd_kev = jamiolkowski(B)
#print "diff A = ",_np.linalg.norm(JAstd_kev/2.0-JAstd)
#print "diff B = ",_np.linalg.norm(JBstd_kev/2.0-JBstd)
#Kevin's function: def diamondnorm( jamiolkowski_matrix ):
jamiolkowski_matrix = JBstd - JAstd
# Here we define a bunch of auxiliary matrices because CVXPY doesn't use complex numbers
K = jamiolkowski_matrix.real # J.real
L = jamiolkowski_matrix.imag # J.imag
if old_cvxpy:
Y = _cvxpy.Variable(dim, dim) # X.real
Z = _cvxpy.Variable(dim, dim) # X.imag
sig0 = _cvxpy.Variable(smallDim, smallDim) # rho0.real
sig1 = _cvxpy.Variable(smallDim, smallDim) # rho1.real
tau0 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
tau1 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
else:
Y = _cvxpy.Variable(shape=(dim, dim)) # X.real
Z = _cvxpy.Variable(shape=(dim, dim)) # X.imag
sig0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho0.real
sig1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.real
tau0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
tau1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
ident = _np.identity(smallDim, 'd')
objective = _cvxpy.Maximize(_cvxpy.trace(K.T * Y + L.T * Z))
constraints = [_cvxpy.bmat([
[_cvxpy.kron(ident, sig0), Y, -_cvxpy.kron(ident, tau0), -Z],
[Y.T, _cvxpy.kron(ident, sig1), Z.T, -_cvxpy.kron(ident, tau1)],
[_cvxpy.kron(ident, tau0), Z, _cvxpy.kron(ident, sig0), Y],
[-Z.T, _cvxpy.kron(ident, tau1), Y.T, _cvxpy.kron(ident, sig1)]]) >> 0,
_cvxpy.bmat([[sig0, -tau0],
[tau0, sig0]]) >> 0,
_cvxpy.bmat([[sig1, -tau1],
[tau1, sig1]]) >> 0,
sig0 == sig0.T,
sig1 == sig1.T,
tau0 == -tau0.T,
tau1 == -tau1.T,
_cvxpy.trace(sig0) == 1.,
_cvxpy.trace(sig1) == 1.]
prob = _cvxpy.Problem(objective, constraints)
try:
prob.solve(solver="CVXOPT")
# prob.solve(solver="ECOS")
# prob.solve(solver="SCS")#This always fails
except _cvxpy.error.SolverError as e:
_warnings.warn("CVXPY failed: %s - diamonddist returning -2!" % str(e))
return (-2, _np.zeros((dim, dim))) if return_x else -2
except:
_warnings.warn("CVXOPT failed (uknown err) - diamonddist returning -2!")
return (-2, _np.zeros((dim, dim))) if return_x else -2
#Validate result
#assert( abs(_np.trace(_np.dot(K.T,Y.value) + _np.dot(L.T,Z.value))-prob.value) < 1e-6 ), \
# "Diamondnorm mismatch"
if return_x:
X = Y.value + 1j * Z.value # encodes state at which maximum trace-distance occurs
return prob.value, X
else:
return prob.value
def jtracedist(A, B, mxBasis='pp'): # Jamiolkowski trace distance: Tr(|J(A)-J(B)|)
"""
Compute the Jamiolkowski trace distance between operation matrices A and B,
given by:
D = 0.5 * Tr( sqrt{ (J(A)-J(B))^2 } )
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the distance between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
"""
JA = _jam.fast_jamiolkowski_iso_std(A, mxBasis)
JB = _jam.fast_jamiolkowski_iso_std(B, mxBasis)
return tracedist(JA, JB)
def entanglement_fidelity(A, B, mxBasis='pp'):
"""
Returns the "entanglement" process fidelity between gate
matrices A and B given by :
F = Tr( sqrt{ sqrt(J(A)) * J(B) * sqrt(J(A)) } )^2
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the fidelity between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
"""
d2 = A.shape[0]
def isTP(x): return _np.isclose(x[0, 0], 1.0) and all(
[_np.isclose(x[0, i], 0) for i in range(d2)])
def isUnitary(x): return _np.allclose(_np.identity(d2, 'd'), _np.dot(x, x.conjugate().T))
if isTP(A) and isTP(B) and isUnitary(B): # then assume TP-like gates & use simpler formula
TrLambda = _np.trace(_np.dot(A, B.conjugate().T)) # same as using _np.linalg.inv(B)
d2 = A.shape[0]
return TrLambda / d2
JA = _jam.jamiolkowski_iso(A, mxBasis, mxBasis)
JB = _jam.jamiolkowski_iso(B, mxBasis, mxBasis)
return fidelity(JA, JB)
def average_gate_fidelity(A, B, mxBasis='pp'):
"""
Computes the average gate fidelity (AGF) between two gates.
Average gate fidelity (F_g) is related to entanglement fidelity
(F_p), via:
F_g = (d * F_p + 1)/(1 + d),
where d is the Hilbert space dimension. This formula, and the
definition of AGF, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
A : array or gate
The gate to compute the AGI to B of. E.g., an imperfect
implementation of B.
B : array or gate
The gate to compute the AGI to A of. E.g., the target gate
corresponding to A.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
Returns
-------
AGI : float
The AGI of A to B.
"""
d = int(round(_np.sqrt(A.shape[0])))
PF = entanglement_fidelity(A, B, mxBasis=mxBasis)
AGF = (d * PF + 1) / (1 + d)
return float(AGF)
def average_gate_infidelity(A, B, mxBasis="gm"):
"""
Computes the average gate infidelity (AGI) between two gates.
Average gate infidelity is related to entanglement infidelity
(EI) via:
AGI = (d * (1-EI) + 1)/(1 + d),
where d is the Hilbert space dimension. This formula, and the
definition of AGI, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
A : array or gate
The gate to compute the AGI to B of. E.g., an imperfect
implementation of B.
B : array or gate
The gate to compute the AGI to A of. E.g., the target gate
corresponding to A.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
Returns
----------
AGI : float
The AGI of A to B.
"""
return 1 - average_gate_fidelity(A, B, mxBasis)
def entanglement_infidelity(A, B, mxBasis='pp'):
"""
Returns the entanglement infidelity (EI) between gate
matrices A and B given by :
EI = 1 - Tr( sqrt{ sqrt(J(A)) * J(B) * sqrt(J(A)) } )^2
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the fidelity between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
Returns
-------
EI : float
The EI of A to B.
"""
return 1 - float(entanglement_fidelity(A, B, mxBasis))
def gateset_infidelity(mdl, target_model, itype='EI',
weights=None, mxBasis=None):
"""
Computes the average-over-gates of the infidelity between gates in `mdl`
and the gates in `target_model`. If `itype` is 'EI' then the "infidelity"
is the entanglement infidelity; if `itype` is 'AGI' then the "infidelity"
is the average gate infidelity (AGI and EI are related by a dimension
dependent constant).
This is the quantity that RB error rates are sometimes claimed to be
related to directly related.
Parameters
----------
mdl : Model
The model to calculate the average infidelity, to `target_model`, of.
target_model : Model
The model to calculate the average infidelity, to `mdl`, of.
itype : str, optional
The infidelity type. Either 'EI', corresponding to entanglement
infidelity, or 'AGI', corresponding to average gate infidelity.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `mdl` and the values are, possibly unnormalized, probabilities.
These probabilities corresponding to the weighting in the average,
so if the model contains gates A and B and weights[A] = 2 and
weights[B] = 1 then the output is Inf(A)*2/3 + Inf(B)/3 where
Inf(X) is the infidelity (to the corresponding element in the other
model) of X. If None, a uniform-average is taken, equivalent to
setting all the weights to 1.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the models. If None, the basis is obtained from
the model.
Returns
-------
float
The weighted average-over-gates infidelity between the two models.
"""
assert(itype == 'AGI' or itype == 'EI'), \
"The infidelity type must be `AGI` (average gate infidelity) or `EI` (entanglement infidelity)"
if mxBasis is None: mxBasis = mdl.basis
sum_of_weights = 0
I_list = []
for gate in list(target_model.operations.keys()):
if itype == 'AGI':
I = average_gate_infidelity(mdl.operations[gate], target_model.operations[gate], mxBasis=mxBasis)
if itype == 'EI':
I = entanglement_infidelity(mdl.operations[gate], target_model.operations[gate], mxBasis=mxBasis)
if weights is None:
w = 1
else:
w = weights[gate]
I_list.append(w * I)
sum_of_weights += w
assert(sum_of_weights > 0), "The sum of the weights should be positive!"
AI = _np.sum(I_list) / sum_of_weights
return AI
def unitarity(A, mxBasis="gm"):
"""
Returns the "unitarity" of a channel, as defined in Wallman et al,
``Estimating the Coherence of noise'' NJP 17 113020 (2015). The
unitarity is given by (Prop 1 in Wallman et al):
u(A) = Tr( A_u^{\dagger} A_u ) / (d^2 - 1),
where A_u is the unital submatrix of A, and d is the dimension of
the Hilbert space. When A is written in any basis for which the
first element is the normalized identity (e.g., the pp or gm
bases), The unital submatrix of A is the matrix obtained when the
top row and left hand column is removed from A.
Parameters
----------
A : array or gate
The gate for which the unitarity is to be computed.
mxBasis : {"std","gm","pp"} or a Basis object, optional
The basis of the matrix.
d : int, optional
The dimension of the Hilbert space.
Returns
----------
u : float
The unitarity of the gate A.
"""
d = int(round(_np.sqrt(A.shape[0])))
basisMxs = _bt.basis_matrices(mxBasis, A.shape[0])
if _np.allclose(basisMxs[0], _np.identity(d, 'd')):
B = A
else:
B = _bt.change_basis(A, mxBasis, "gm") # everything should be able to be put in the "gm" basis
unital = B[1:d**2, 1:d**2]
u = _np.trace(_np.dot(_np.conj(_np.transpose(unital)), unital)) / (d**2 - 1)
return u
def fidelity_upper_bound(operationMx):
"""
Get an upper bound on the fidelity of the given
operation matrix with any unitary operation matrix.
The closeness of the result to one tells
how "unitary" the action of operationMx is.
Parameters
----------
operationMx : numpy array
The operation matrix to act on.
Returns
-------
float
The resulting upper bound on fidelity(operationMx, anyUnitaryGateMx)
"""
choi = _jam.jamiolkowski_iso(operationMx, choiMxBasis="std")
choi_evals, choi_evecs = _np.linalg.eig(choi)
maxF_direct = max([_np.sqrt(max(ev.real, 0.0)) for ev in choi_evals]) ** 2
iMax = _np.argmax([ev.real for ev in choi_evals]) # index of maximum eigenval
closestVec = choi_evecs[:, iMax:(iMax + 1)]
# #print "DEBUG: closest evec = ", closestUnitaryVec
# new_evals = _np.zeros( len(closestUnitaryVec) ); new_evals[iClosestU] = 1.0
# # gives same result:
# closestUnitaryJmx = _np.dot(choi_evecs, _np.dot( _np.diag(new_evals), _np.linalg.inv(choi_evecs) ) )
closestJmx = _np.kron(closestVec, _np.transpose(_np.conjugate(closestVec))) # closest rank-1 Jmx
closestJmx /= _mt.trace(closestJmx) # normalize so trace of Jmx == 1.0
maxF = fidelity(choi, closestJmx)
if not _np.isnan(maxF):
#Uncomment for debugging
#if abs(maxF - maxF_direct) >= 1e-6:
# print "DEBUG: operationMx:\n",operationMx
# print "DEBUG: choiMx:\n",choi
# print "DEBUG choi_evals = ",choi_evals, " iMax = ",iMax
# #print "DEBUG: J = \n", closestUnitaryJmx
# print "DEBUG: eigvals(J) = ", _np.linalg.eigvals(closestJmx)
# print "DEBUG: trace(J) = ", _mt.trace(closestJmx)
# print "DEBUG: maxF = %f, maxF_direct = %f" % (maxF, maxF_direct)
# raise ValueError("ERROR: maxF - maxF_direct = %f" % (maxF -maxF_direct))
assert(abs(maxF - maxF_direct) < 1e-6)
else:
maxF = maxF_direct # case when maxF is nan, due to scipy sqrtm function being buggy - just use direct F
closestOpMx = _jam.jamiolkowski_iso_inv(closestJmx, choiMxBasis="std")
return maxF, closestOpMx
#closestU_evals, closestU_evecs = _np.linalg.eig(closestUnitaryGateMx)
#print "DEBUG: U = \n", closestUnitaryGateMx
#print "DEBUG: closest U evals = ",closestU_evals
#print "DEBUG: evecs = \n",closestU_evecs
def get_povm_map(model, povmlbl):
"""
Constructs a gate-like quantity for the POVM within `model`.
This is done by embedding the `k`-outcome classical output space of the POVM
in the Hilbert-Schmidt space of `k` by `k` density matrices by placing the
classical probability distribution along the diagonal of the density matrix.
Currently, this is only implemented for the case when `k` equals `d`, the
dimension of the POVM's Hilbert space.
Parameters
----------
model : Model
The model supplying the POVM effect vectors and the basis those
vectors are in.
povmlbl : str
The POVM label
Returns
-------
numpy.ndarray
The matrix of the "POVM map" in the `model.basis` basis.
"""
povmVectors = [v.todense()[:, None] for v in model.povms[povmlbl].values()]
if isinstance(model.basis, _DirectSumBasis): # HACK - need to get this to work with general bases
blkDims = [int(_np.sqrt(comp.dim)) for comp in model.basis.component_bases]
else:
blkDims = [int(round(_np.sqrt(model.dim)))] # [d] where density matrix is dxd
nV = len(povmVectors)
#assert(d**2 == model.dim), "Model dimension (%d) is not a perfect square!" % model.dim
#assert( nV**2 == d ), "Can only compute POVM metrics when num of effects == H space dimension"
# I don't think above assert is needed - should work in general (Robin?)
povm_mx = _np.concatenate(povmVectors, axis=1).T # "povm map" ( B(H) -> S_k ) (shape= nV,model.dim)
Sk_embedding_in_std = _np.zeros((model.dim, nV))
for i in range(nV):
Sk_embedding_in_std[:, i] = _flat_mut_blks(i, i, blkDims)
std_to_basis = model.basis.reverse_transform_matrix("std") # _bt.transform_matrix("std", model.basis, blkDims)
assert(std_to_basis.shape == (model.dim, model.dim))
return _np.dot(std_to_basis, _np.dot(Sk_embedding_in_std, povm_mx))
def povm_fidelity(model, targetModel, povmlbl):
"""
Computes the process (entanglement) fidelity between POVM maps.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return entanglement_fidelity(povm_mx, target_povm_mx, targetModel.basis)
def povm_jtracedist(model, targetModel, povmlbl):
"""
Computes the Jamiolkowski trace distance between POVM maps using :func:`jtracedist`.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return jtracedist(povm_mx, target_povm_mx, targetModel.basis)
def povm_diamonddist(model, targetModel, povmlbl):
"""
Computes the diamond distance between POVM maps using :func:`diamonddist`.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return diamonddist(povm_mx, target_povm_mx, targetModel.basis)
#decompose operation matrix into axis of rotation, etc
def decompose_gate_matrix(operationMx):
"""
Compute how the action of a operation matrix can be
is decomposed into fixed points, axes of rotation,
angles of rotation, and decays. Also determines
whether a gate appears to be valid and/or unitary.
Parameters
----------
operationMx : numpy array
The operation matrix to act on.
Returns
-------
dict
A dictionary describing the decomposed action. Keys are:
'isValid' : bool
whether decomposition succeeded
'isUnitary' : bool
whether operationMx describes unitary action
'fixed point' : numpy array
the fixed point of the action
'axis of rotation' : numpy array or nan
the axis of rotation
'decay of diagonal rotation terms' : float
decay of diagonal terms
'rotating axis 1' : numpy array or nan
1st axis orthogonal to axis of rotation
'rotating axis 2' : numpy array or nan
2nd axis orthogonal to axis of rotation
'decay of off diagonal rotation terms' : float
decay of off-diagonal terms
'pi rotations' : float
angle of rotation in units of pi radians
"""
op_evals, op_evecs = _np.linalg.eig(_np.asarray(operationMx))
# fp_eigenvec = None
# aor_eval = None; aor_eigenvec = None
# ra_eval = None; ra1_eigenvec = None; ra2_eigenvec = None
TOL = 1e-4 # 1e-7
unit_eval_indices = [i for (i, ev) in enumerate(op_evals) if abs(ev - 1.0) < TOL]
#unit_eval_indices = [ i for (i,ev) in enumerate(op_evals) if ev > (1.0-TOL) ]
conjpair_eval_indices = []
for (i, ev) in enumerate(op_evals):
if i in unit_eval_indices: continue # don't include the unit eigenvalues in the conjugate pair count
# don't include existing conjugate pairs
if any([(i in conjpair) for conjpair in conjpair_eval_indices]): continue
for (j, ev2) in enumerate(op_evals[i + 1:]):
if abs(ev - _np.conjugate(ev2)) < TOL:
conjpair_eval_indices.append((i, j + (i + 1)))
break # don't pair i-th eigenvalue with any other (pairs should be disjoint)
real_eval_indices = [] # indices of real eigenvalues that are not units or a part of any conjugate pair
complex_eval_indices = [] # indices of complex eigenvalues that are not units or a part of any conjugate pair
for (i, ev) in enumerate(op_evals):
if i in unit_eval_indices: continue # don't include the unit eigenvalues
if any([(i in conjpair) for conjpair in conjpair_eval_indices]): continue # don't include the conjugate pairs
if abs(ev.imag) < TOL: real_eval_indices.append(i)
else: complex_eval_indices.append(i)
#if len(real_eval_indices + unit_eval_indices) > 0:
# max_real_eval = max([ op_evals[i] for i in real_eval_indices + unit_eval_indices])
# min_real_eval = min([ op_evals[i] for i in real_eval_indices + unit_eval_indices])
#else:
# max_real_eval = _np.nan
# min_real_eval = _np.nan
#
#fixed_points = [ op_evecs[:,i] for i in unit_eval_indices ]
#real_eval_axes = [ op_evecs[:,i] for i in real_eval_indices ]
#conjpair_eval_axes = [ (op_evecs[:,i],op_evecs[:,j]) for (i,j) in conjpair_eval_indices ]
#
#ret = { }
nQubits = _np.log2(operationMx.shape[0]) / 2
if nQubits == 1:
#print "DEBUG: 1 qubit decomp --------------------------"
#print " --> evals = ", op_evals
#print " --> unit eval indices = ", unit_eval_indices
#print " --> conj eval indices = ", conjpair_eval_indices
#print " --> unpaired real eval indices = ", real_eval_indices
#Special case: if have two conjugate pairs, check if one (or both) are real
# and break the one with the largest (real) value into two unpaired real evals.
if len(conjpair_eval_indices) == 2:
iToBreak = None
if abs(_np.imag(op_evals[conjpair_eval_indices[0][0]])) < TOL and \
abs(_np.imag(op_evals[conjpair_eval_indices[1][0]])) < TOL:
iToBreak = _np.argmax([_np.real(conjpair_eval_indices[0][0]), _np.real(conjpair_eval_indices[1][0])])
elif abs(_np.imag(op_evals[conjpair_eval_indices[0][0]])) < TOL: iToBreak = 0
elif abs(_np.imag(op_evals[conjpair_eval_indices[1][0]])) < TOL: iToBreak = 1
if iToBreak is not None:
real_eval_indices.append(conjpair_eval_indices[iToBreak][0])
real_eval_indices.append(conjpair_eval_indices[iToBreak][1])
del conjpair_eval_indices[iToBreak]
#Find eigenvector corresponding to fixed point (or closest we can get). This
# should be a unit eigenvalue with identity eigenvector.
if len(unit_eval_indices) > 0:
#Find linear least squares solution within possibly degenerate unit-eigenvalue eigenspace
# of eigenvector closest to identity density mx (the desired fixed point), then orthogonalize
# the remaining eigenvectors w.r.t this one.
A = _np.take(op_evecs, unit_eval_indices, axis=1)
b = _np.array([[1], [0], [0], [0]], 'd') # identity density mx
x = _np.dot(_np.linalg.pinv(_np.dot(A.T, A)), _np.dot(A.T, b))
fixedPtVec = _np.dot(A, x) # fixedPtVec / _np.linalg.norm(fixedPtVec)
fixedPtVec = fixedPtVec[:, 0]
iLargestContrib = _np.argmax(_np.abs(x)) # index of gate eigenvector which contributed the most
for ii, i in enumerate(unit_eval_indices):
if ii == iLargestContrib:
op_evecs[:, i] = fixedPtVec
iFixedPt = i
else:
op_evecs[:, i] = op_evecs[:, i] - _np.vdot(fixedPtVec, op_evecs[:, i]) * fixedPtVec
for jj, j in enumerate(unit_eval_indices[:ii]):
if jj == iLargestContrib: continue
op_evecs[:, i] = op_evecs[:, i] - _np.vdot(op_evecs[:, j], op_evecs[:, i]) * op_evecs[:, j]
op_evecs[:, i] /= _np.linalg.norm(op_evecs[:, i])
elif len(real_eval_indices) > 0:
# just take eigenvector corresponding to the largest real eigenvalue?
#iFixedPt = real_eval_indices[ _np.argmax( [ op_evals[i] for i in real_eval_indices ] ) ]
# ...OR take eigenvector corresponding to a real unpaired eigenvalue closest to identity:
idmx = _np.array([[1], [0], [0], [0]], 'd') # identity density mx
iFixedPt = real_eval_indices[_np.argmin([_np.linalg.norm(op_evecs[i] - idmx) for i in real_eval_indices])]
else:
#No unit or real eigenvalues => two complex conjugate pairs or unpaired complex evals --> bail out
return {'isValid': False, 'isUnitary': False, 'msg': "All evals are complex."}
#Find eigenvector corresponding to axis of rotation: find the *largest* unpaired real/unit eval
indsToConsider = (unit_eval_indices + real_eval_indices)[:]
del indsToConsider[indsToConsider.index(iFixedPt)] # don't consider fixed pt evec
if len(indsToConsider) > 0:
iRotAxis = indsToConsider[_np.argmax([op_evals[i] for i in indsToConsider])]
else:
#No unit or real eigenvalues => an unpaired complex eval --> bail out
return {'isValid': False, 'isUnitary': False, 'msg': "Unpaired complex eval."}
#There are only 2 eigenvalues left -- hopefully a conjugate pair giving rotation
inds = list(range(4))
del inds[inds.index(iFixedPt)]
del inds[inds.index(iRotAxis)]
if abs(op_evals[inds[0]] - _np.conjugate(op_evals[inds[1]])) < TOL:
iConjPair1, iConjPair2 = inds
else:
return {'isValid': False, 'isUnitary': False, 'msg': "No conjugate pair for rotn."}
return {'isValid': True,
'isUnitary': bool(len(unit_eval_indices) >= 2),
'fixed point': op_evecs[:, iFixedPt],
'axis of rotation': op_evecs[:, iRotAxis],
'rotating axis 1': op_evecs[:, iConjPair1],
'rotating axis 2': op_evecs[:, iConjPair2],
'decay of diagonal rotation terms': 1.0 - abs(op_evals[iRotAxis]),
'decay of off diagonal rotation terms': 1.0 - abs(op_evals[iConjPair1]),
'pi rotations': _np.angle(op_evals[iConjPair1]) / _np.pi,
'msg': "Success"}
else:
return {'isValid': False,
'isUnitary': False,
'msg': "Unsupported number of qubits: %d" % nQubits}
def state_to_dmvec(psi):
"""
Compute the vectorized density matrix which acts as the state `psi`.
This is just the outer product map |psi> => |psi><psi| with the
output flattened, i.e. `dot(psi, conjugate(psi).T)`.
Parameters
----------
psi : numpy array
The state vector.
Returns
-------
numpy array
The vectorized density matrix.
"""
psi = psi.reshape((psi.size, 1)) # convert to (N,1) shape if necessary
dm = _np.dot(psi, _np.conjugate(psi.T))
return dm.flatten()
def dmvec_to_state(dmvec, tol=1e-6):
"""
Compute the pure state describing the action of density matrix vector `dmvec`.
If `dmvec` represents a mixed state, ValueError is raised.
Parameters
----------
dmvec : numpy array
The vectorized density matrix, assumed to be in the standard (matrix
unit) basis.
tol : float, optional
tolerance for determining whether an eigenvalue is zero.
Returns
-------
numpy array
The pure state, as a column vector of shape = (N,1)
"""
d2 = dmvec.size; d = int(round(_np.sqrt(d2)))
dm = dmvec.reshape((d, d))
evals, evecs = _np.linalg.eig(dm)
k = None
for i, ev in enumerate(evals):
if abs(ev) > tol:
if k is None: k = i
else: raise ValueError("Cannot convert mixed dmvec to pure state!")
if k is None: raise ValueError("Cannot convert zero dmvec to puse state!")
psi = evecs[:, k] * _np.sqrt(evals[k])
psi.shape = (d, 1)
return psi
def unitary_to_process_mx(U):
"""
Compute the super-operator which acts on (row)-vectorized
density matrices from a unitary operator (matrix) U which
acts on state vectors. This super-operator is given by
the tensor product of U and conjugate(U), i.e. kron(U,U.conj).
Parameters
----------
U : numpy array
The unitary matrix which acts on state vectors.
Returns
-------
numpy array
The super-operator process matrix.
"""
# U -> kron(U,Uc) since U rho U_dag -> kron(U,Uc)
# since AXB --row-vectorize--> kron(A,B.T)*vec(X)
return _np.kron(U, _np.conjugate(U))
def process_mx_to_unitary(superop):
"""
Compute the unitary corresponding to the (unitary-action!)
super-operator `superop` which acts on (row)-vectorized
density matrices. The super-operator must be of the form
`kron(U,U.conj)` or an error will be thrown.
Parameters
----------
superop : numpy array
The superoperator matrix which acts on vectorized
density matrices (in the 'std' matrix-unit basis).
Returns
-------
numpy array
The unitary matrix which acts on state vectors.
"""
d2 = superop.shape[0]; d = int(round(_np.sqrt(d2)))
U = _np.empty((d, d), 'complex')
for i in range(d):
densitymx_i = _np.zeros((d, d), 'd'); densitymx_i[i, i] = 1.0 # |i><i|
UiiU = _np.dot(superop, densitymx_i.flat).reshape((d, d)) # U|i><i|U^dag
if i > 0:
j = 0
densitymx_ij = _np.zeros((d, d), 'd'); densitymx_ij[i, j] = 1.0 # |i><i|
UijU = _np.dot(superop, densitymx_ij.flat).reshape((d, d)) # U|i><j|U^dag
Uj = U[:, j]
Ui = _np.dot(UijU, Uj)
else:
##method1: use random state projection
#rand_state = _np.random.rand(d)
#projected_rand_state = _np.dot(UiiU, rand_state)
#assert(_np.linalg.norm(projected_rand_state) > 1e-8)
#projected_rand_state /= _np.linalg.norm(projected_rand_state)
#Ui = projected_rand_state
#method2: get eigenvector corresponding to largest eigenvalue (more robust)
evals, evecs = _np.linalg.eig(UiiU)
imaxeval = _np.argmax(_np.abs(evals))
#TODO: assert other eigenvalues are much smaller?
Ui = evecs[:, imaxeval]
Ui /= _np.linalg.norm(Ui)
U[:, i] = Ui
return U
def spam_error_generator(spamvec, target_spamvec, mxBasis, typ="logGTi"):
"""
Construct an error generator from a SPAM vector and it's target.
Computes the value of the error generator given by
`errgen = log( diag(spamvec / target_spamvec) )`, where division is
element-wise. This results in a (non-unique) error generator matrix
`E` such that `spamvec = exp(E) * target_spamvec`.
Note: This is currently of very limited use, as the above algorithm fails
whenever `target_spamvec` has zero elements where `spamvec` doesn't.
Parameters
----------
spamvec : ndarray
The SPAM vector.
target_spamvec : ndarray
The target SPAM vector.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
typ : {"logGTi"}
The type of error generator to compute. Allowed values are:
- "logGTi" : errgen = log( diag(spamvec / target_spamvec) )
Returns
-------
errgen : ndarray
The error generator.
"""
# Compute error generator for rho: rho = exp(E)rho0 => rho = A*rho0 => A = diag(rho/rho0)
assert(typ == "logGTi"), "Only logGTi type is supported so far"
d2 = len(spamvec)
errgen = _np.zeros((d2, d2), 'd') # type assumes this is density-mx evolution
diags = []
for a, b in zip(spamvec, target_spamvec):
if _np.isclose(b, 0.0):
if _np.isclose(a, b): d = 1
else: raise ValueError("Cannot take spam_error_generator")
else:
d = a / b
diags.append(d)
errgen[_np.diag_indices(d2)] = diags
return _spl.logm(errgen)
def error_generator(gate, target_op, mxBasis, typ="logG-logT"):
"""
Construct the error generator from a gate and its target.
Computes the value of the error generator given by
errgen = log( inv(target_op) * gate ), so that
gate = target_op * exp(errgen).
Parameters
----------
gate : ndarray
The operation matrix
target_op : ndarray
The target operation matrix
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
typ : {"logG-logT", "logTiG", "logGTi"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
- "logGTi" : errgen = log( dot(gate,inv(target_op)) )
Returns
-------
errgen : ndarray
The error generator.
"""
TOL = 1e-8
if typ == "logG-logT":
try:
logT = _mt.unitary_superoperator_matrix_log(target_op, mxBasis)
except AssertionError: # if not unitary, fall back to just taking the real log
logT = _mt.real_matrix_log(target_op, "raise", TOL) # make a fuss if this can't be done
logG = _mt.approximate_matrix_log(gate, logT)
# Both logG and logT *should* be real, so we just take the difference.
if _np.linalg.norm(_np.imag(logG)) < TOL and \
_np.linalg.norm(_np.imag(logT)) < TOL:
return _np.real(logG - logT)
#Otherwise, there could be branch cut issues or worse, so just
# raise an error for now (maybe return a dummy if needed elsewhere?)
raise ValueError("Could not construct a real logarithms for the "
"'logG-logT' generator. Perhaps you should use "
"the 'logTiG' or 'logGTi' generator instead?")
elif typ == "logTiG":
target_op_inv = _spl.inv(target_op)
try:
errgen = _mt.near_identity_matrix_log(_np.dot(target_op_inv, gate), TOL)
except AssertionError: # not near the identity, fall back to the real log
_warnings.warn(("Near-identity matrix log failed; falling back "
"to approximate log for logTiG error generator"))
errgen = _mt.real_matrix_log(_np.dot(target_op_inv, gate), "warn", TOL)
if _np.linalg.norm(errgen.imag) > TOL:
_warnings.warn("Falling back to approximate log for logTiG error generator")
errgen = _mt.approximate_matrix_log(_np.dot(target_op_inv, gate),
_np.zeros(gate.shape, 'd'), TOL=TOL)
elif typ == "logGTi":
target_op_inv = _spl.inv(target_op)
try:
errgen = _mt.near_identity_matrix_log(_np.dot(gate, target_op_inv), TOL)
except AssertionError as e: # not near the identity, fall back to the real log
_warnings.warn(("Near-identity matrix log failed; falling back "
"to approximate log for logGTi error generator:\n%s") % str(e))
errgen = _mt.real_matrix_log(_np.dot(gate, target_op_inv), "warn", TOL)
if _np.linalg.norm(errgen.imag) > TOL:
_warnings.warn("Falling back to approximate log for logGTi error generator")
errgen = _mt.approximate_matrix_log(_np.dot(gate, target_op_inv),
_np.zeros(gate.shape, 'd'), TOL=TOL)
else:
raise ValueError("Invalid error-generator type: %s" % typ)
if _np.linalg.norm(_np.imag(errgen)) > TOL:
raise ValueError("Could not construct a real generator!")
#maybe this is actually ok, but a complex error generator will
# need to be plotted differently, etc -- TODO
return _np.real(errgen)
def operation_from_error_generator(error_gen, target_op, typ="logG-logT"):
"""
Construct a gate from an error generator and a target gate.
Inverts the computation fone in :func:`error_generator` and
returns the value of the gate given by
gate = target_op * exp(error_gen).
Parameters
----------
error_gen : ndarray
The error generator matrix
target_op : ndarray
The target operation matrix
typ : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
ndarray
The operation matrix.
"""
if typ == "logG-logT":
return _spl.expm(error_gen + _spl.logm(target_op))
elif typ == "logTiG":
return _np.dot(target_op, _spl.expm(error_gen))
elif typ == "logGTi":
return _np.dot(_spl.expm(error_gen), target_op)
else:
raise ValueError("Invalid error-generator type: %s" % typ)
def std_scale_factor(dim, projection_type):
"""
Returns the multiplicative scaling that should be applied to the output of
:func"`std_error_generators`, before using them as projectors, in order to
compute the "standard" reported projection onto that type of error (i.e.
the coefficient of the standard generator terms built un-normalized-Paulis).
Parameters
----------
dim : int
The dimension of the error generators; also the associated gate
dimension. This must be a perfect square, as `sqrt(dim)`
is the dimension of density matrices. For a single qubit, dim == 4.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type/class of error generators to get the scaling for.
Returns
-------
float
"""
d2 = dim
d = int(_np.sqrt(d2))
if projection_type == "hamiltonian":
scaleFctr = 1.0 / (d * _np.sqrt(2))
# so projection is coefficient of Hamiltonian term (w/un-normalized Paulis)
elif projection_type == "stochastic":
scaleFctr = 1.0 / d
# so projection is coefficient of P*rho*P stochastic term in generator (w/un-normalized Paulis)
elif projection_type == "affine":
scaleFctr = 1.0 # so projection is coefficient of P affine term in generator (w/un-normalized Paulis)
else:
raise ValueError("Invalid projection_type argument: %s"
% projection_type)
return scaleFctr
def std_error_generators(dim, projection_type, projection_basis):
"""
Compute the gate error generators for a standard set of errors which
correspond to "Hamiltonian"- or "Stochastic"-type errors in terms of the
elements of the specified basis.
Parameters
----------
dim : int
The dimension of the error generators to be returned. This is also the
associated gate dimension, and must be a perfect square, as `sqrt(dim)`
is the dimension of density matrices. For a single qubit, dim == 4.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type of error generators to construct. If "hamiltonian", then the
Hamiltonian generators which take a density matrix rho -> -i*[ H, rho ]
for Pauli-product matrix H. If "stochastic", then the Stochastic error
generators which take rho -> P*rho*P for Pauli-product matrix P. If
"affine", then the affine generators which take rho -> P.
projection_basis : {'std', 'gm', 'pp', 'qt'}
Which basis is used to construct the error generators. Allowed
values are Matrix-unit (std), Gell-Mann (gm),
Pauli-product (pp) and Qutrit (qt).
Returns
-------
generators : numpy.ndarray
An array of shape (#basis-elements,dim,dim). `generators[i]` is the
generator corresponding to the ith basis matrix in the
*std* (matrix unit) basis. (Note that in most cases #basis-elements
== dim, so the size of `generators` is (dim,dim,dim) ). Each
generator is normalized so that as a vector it has unit Frobenius norm.
"""
d2 = dim
d = int(_np.sqrt(d2))
#Get a list of the basis matrices
mxs = _bt.basis_matrices(projection_basis, d2)
assert(len(mxs) <= d2) # OK if there are fewer basis matrices (e.g. for bases w/multiple blocks)
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
lindbladMxs = _np.empty((len(mxs), d2, d2), 'complex')
for i, basisMx in enumerate(mxs):
if projection_type == "hamiltonian":
lindbladMxs[i] = _lt.hamiltonian_to_lindbladian(basisMx) # in std basis
elif projection_type == "stochastic":
lindbladMxs[i] = _lt.stochastic_lindbladian(basisMx) # in std basis
elif projection_type == "affine":
lindbladMxs[i] = _lt.affine_lindbladian(basisMx) # in std basis
else:
raise ValueError("Invalid projection_type argument: %s"
% projection_type)
norm = _np.linalg.norm(lindbladMxs[i].flat)
if not _np.isclose(norm, 0):
lindbladMxs[i] /= norm # normalize projector
assert(_np.isclose(_np.linalg.norm(lindbladMxs[i].flat), 1.0))
return lindbladMxs
def std_errgen_projections(errgen, projection_type, projection_basis,
mxBasis="gm", return_generators=False,
return_scale_fctr=False):
"""
Compute the projections of a gate error generator onto generators
for a standard set of errors constructed from the elements of a
specified basis.
Parameters
----------
errgen: : ndarray
The error generator matrix to project.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type of error generators to project the gate error generator onto.
If "hamiltonian", then use the Hamiltonian generators which take a density
matrix rho -> -i*[ H, rho ] for Pauli-product matrix H. If "stochastic",
then use the Stochastic error generators which take rho -> P*rho*P for
Pauli-product matrix P (recall P is self adjoint). If "affine", then
use the affine error generators which take rho -> P (superop is |P>><<1|).
projection_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_generators : bool, optional
If True, return the error generators projected against along with the
projection values themseves.
return_scale_fctr : bool, optional
If True, also return the scaling factor that was used to multply the
projections onto *normalized* error generators to get the returned
values.
Returns
-------
projections : numpy.ndarray
An array of length equal to the number of elements in the
basis used to construct the projectors. Typically this is
is also the dimension of the gate (e.g. 4 for a single qubit).
generators : numpy.ndarray
Only returned when `return_generators == True`. An array of shape
(#basis-els,op_dim,op_dim) such that `generators[i]` is the
generator corresponding to the i-th basis element. Note
that these matricies are in the *std* (matrix unit) basis.
scale : float
Only returned when `return_scale_fctr == True`. A mulitplicative
scaling constant that *has already been applied* to `projections`.
"""
if isinstance(mxBasis, _Basis):
errgen_std = _bt.change_basis(errgen, mxBasis, mxBasis.equivalent('std'))
#expand operation matrix so it acts on entire space of dmDim x dmDim density matrices
errgen_std = _bt.resize_std_mx(errgen_std, 'expand', mxBasis.equivalent('std'),
mxBasis.simple_equivalent('std'))
else:
errgen_std = _bt.change_basis(errgen, mxBasis, "std")
d2 = errgen_std.shape[0]
d = int(_np.sqrt(d2))
# nQubits = _np.log2(d)
#Get a list of the d2 generators (in corresspondence with the
# Pauli-product matrices given by _basis.pp_matrices(d) ).
lindbladMxs = std_error_generators(d2, projection_type, projection_basis) # in std basis
assert(len(lindbladMxs) <= d2) # can be fewer projection matrices (== lenght of projection_basis)
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
projections = _np.empty(len(lindbladMxs), 'd')
for i, lindbladMx in enumerate(lindbladMxs):
proj = _np.real_if_close(_np.vdot(errgen_std.flatten(), lindbladMx.flatten()), tol=1000)
# # DEBUG - for checking why perfect gates gave weird projections --> log ambiguity
# print("DB: rawproj(%d) = " % i, proj)
# errgen_pp = errgen.copy() #_bt.change_basis(errgen_std,"std","pp")
# lindbladMx_pp = _bt.change_basis(lindbladMx,"std","pp")
# if proj > 1.0:
# for k in range(errgen_std.shape[0]):
# for j in range(errgen_std.shape[1]):
# if abs(errgen_pp[k,j].conjugate() * lindbladMx_pp[k,j]) > 1e-2:
# print(" [%d,%d]: + " % (k,j), errgen_pp[k,j].conjugate(),
# "*", lindbladMx_pp[k,j],
# "=", (errgen_pp[k,j].conjugate() * lindbladMx_pp[i,j]))
#assert(_np.isreal(proj)), "non-real projection: %s" % str(proj) #just a warning now
if not _np.isreal(proj):
_warnings.warn("Taking abs() of non-real projection: %s" % str(proj))
proj = abs(proj)
projections[i] = proj
scaleFctr = std_scale_factor(d2, projection_type)
projections *= scaleFctr
lindbladMxs /= scaleFctr # so projections * generators give original
ret = [projections]
if return_generators: ret.append(lindbladMxs)
if return_scale_fctr: ret.append(scaleFctr)
return ret[0] if len(ret) == 1 else tuple(ret)
def _assert_shape(ar, shape, sparse=False):
""" Asserts ar.shape == shape ; works with sparse matrices too """
if not sparse or len(shape) == 2:
assert(ar.shape == shape), \
"Shape mismatch: %s != %s!" % (str(ar.shape), str(shape))
else:
if len(shape) == 3: # first "dim" is a list
assert(len(ar) == shape[0]), \
"Leading dim mismatch: %d != %d!" % (len(ar), shape[0])
assert(shape[0] == 0 or ar[0].shape == (shape[1], shape[2])), \
"Shape mismatch: %s != %s!" % (str(ar[0].shape), str(shape[1:]))
elif len(shape) == 4: # first 2 dims are lists
assert(len(ar) == shape[0]), \
"Leading dim mismatch: %d != %d!" % (len(ar), shape[0])
assert(shape[0] == 0 or len(ar[0]) == shape[1]), \
"Second dim mismatch: %d != %d!" % (len(ar[0]), shape[1])
assert(shape[0] == 0 or shape[1] == 0 or ar[0][0].shape == (shape[2], shape[3])), \
"Shape mismatch: %s != %s!" % (str(ar[0][0].shape), str(shape[2:]))
else:
raise NotImplementedError("Number of dimensions must be <= 4!")
def lindblad_error_generators(dmbasis_ham, dmbasis_other, normalize,
other_mode="all"):
"""
Compute the superoperator-generators corresponding to Lindblad terms.
This routine computes the Hamiltonian and Non-Hamiltonian ("other")
superoperator generators which correspond to the terms of the Lindblad
expression:
L(rho) = sum_i( h_i [A_i,rho] ) +
sum_ij( o_ij * (B_i rho B_j^dag -
0.5( rho B_j^dag B_i + B_j^dag B_i rho) ) )
where {A_i} and {B_i} are bases (possibly the same) for Hilbert Schmidt
(density matrix) space with the identity element removed so that each
A_i and B_i are traceless. If we write L(rho) in terms of superoperators
H_i and O_ij,
L(rho) = sum_i( h_i H_i(rho) ) + sum_ij( o_ij O_ij(rho) )
then this function computes the matrices for H_i and O_ij using the given
density matrix basis. Thus, if `dmbasis` is expressed in the standard
basis (as it should be), the returned matrices are also in this basis.
If these elements are used as projectors it may be usedful to normalize
them (by setting `normalize=True`). Note, however, that these projectors
are not all orthogonal - in particular the O_ij's are not orthogonal to
one another.
Parameters
----------
dmbasis_ham : list
A list of basis matrices {B_i} *including* the identity as the first
element, for the returned Hamiltonian-type error generators. This
argument is easily obtained by call to :func:`pp_matrices` or a
similar function. The matrices are expected to be in the standard
basis, and should be traceless except for the identity. Matrices
should be NumPy arrays or SciPy CSR sparse matrices.
dmbasis_other : list
A list of basis matrices {B_i} *including* the identity as the first
element, for the returned Stochastic-type error generators. This
argument is easily obtained by call to :func:`pp_matrices` or a
similar function. The matrices are expected to be in the standard
basis, and should be traceless except for the identity. Matrices
should be NumPy arrays or SciPy CSR sparse matrices.
normalize : bool
Whether or not generators should be normalized so that
numpy.linalg.norm(generator.flat) == 1.0 Note that the generators
will still, in general, be non-orthogonal.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error generators to construct.
Allowed values are: `"diagonal"` (only the diagonal Stochastic
generators are returned; that is, the generators corresponding to the
`i==j` terms in the Lindblad expression.), `"diag_affine"` (diagonal +
affine generators), and `"all"` (all generators).
Returns
-------
ham_generators : numpy.ndarray or list of SciPy CSR matrices
If dense matrices where given, an array of shape (d-1,d,d), where d is
the size of the basis, i.e. d == len(dmbasis). `ham_generators[i]`
gives the matrix for H_i. If sparse matrices were given, a list
of shape (d,d) CSR matrices.
other_generators : numpy.ndarray or list of lists of SciPy CSR matrices
If dense matrices where given, An array of shape (d-1,d-1,d,d),
(2,d-1,d,d), or (d-1,d,d), where d is the size of the basis, for
`other_mode` equal to `"all"`, `"diag_affine"`, or `"diagonal"`,
respectively. For instance, in the `"all"` case,
`other_generators[i,j]` gives the matrix for O_ij. If sparse matrices
were given, the all but the final 2 dimensions are lists (e.g. the
`"all"` case returns a list of lists of shape (d,d) CSR matrices).
"""
if dmbasis_ham is not None:
ham_mxs = dmbasis_ham # list of basis matrices (assumed to be in std basis)
ham_nMxs = len(ham_mxs) # usually == d2, but not necessary (e.g. w/maxWeight)
else:
ham_nMxs = 0
if dmbasis_other is not None:
other_mxs = dmbasis_other # list of basis matrices (assumed to be in std basis)
other_nMxs = len(other_mxs) # usually == d2, but not necessary (e.g. w/maxWeight)
else:
other_nMxs = 0
if ham_nMxs > 0:
d = ham_mxs[0].shape[0]
sparse = _sps.issparse(ham_mxs[0])
elif other_nMxs > 0:
d = other_mxs[0].shape[0]
sparse = _sps.issparse(other_mxs[0])
else:
d = 0 # will end up returning no generators
sparse = False
d2 = d**2
normfn = _spsl.norm if sparse else _np.linalg.norm
identityfn = (lambda d: _sps.identity(d, 'd', 'csr')) if sparse else _np.identity
if ham_nMxs > 0 and other_nMxs > 0:
assert(other_mxs[0].shape[0] == ham_mxs[0].shape[0]), \
"Bases must have the same dimension!"
if ham_nMxs > 0:
assert(_np.isclose(normfn(ham_mxs[0] - identityfn(d) / _np.sqrt(d)), 0)),\
"The first matrix in 'dmbasis_ham' must be the identity"
hamLindbladTerms = [None] * (ham_nMxs - 1) if sparse else \
_np.empty((ham_nMxs - 1, d2, d2), 'complex')
for i, B in enumerate(ham_mxs[1:]): # don't include identity
hamLindbladTerms[i] = _lt.hamiltonian_to_lindbladian(B, sparse) # in std basis
if normalize:
norm = normfn(hamLindbladTerms[i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
hamLindbladTerms[i] /= norm # normalize projector
assert(_np.isclose(normfn(hamLindbladTerms[i]), 1.0))
else:
hamLindbladTerms = None
if other_nMxs > 0:
assert(_np.isclose(normfn(other_mxs[0] - identityfn(d) / _np.sqrt(d)), 0)),\
"The first matrix in 'dmbasis_other' must be the identity"
if other_mode == "diagonal":
otherLindbladTerms = [None] * (other_nMxs - 1) if sparse else \
_np.empty((other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
otherLindbladTerms[i] = _lt.nonham_lindbladian(Lm, Lm, sparse)
if normalize:
norm = normfn(otherLindbladTerms[i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[i] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[i]), 1.0))
elif other_mode == "diag_affine":
otherLindbladTerms = [[None] * (other_nMxs - 1)] * 2 if sparse else \
_np.empty((2, other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
otherLindbladTerms[0][i] = _lt.nonham_lindbladian(Lm, Lm, sparse)
otherLindbladTerms[1][i] = _lt.affine_lindbladian(Lm, sparse)
if normalize:
for k in (0, 1):
norm = normfn(otherLindbladTerms[k][i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[k][i] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[k][i]), 1.0))
else: # other_mode == "all"
otherLindbladTerms = \
[[None] * (other_nMxs - 1) for i in range(other_nMxs - 1)] if sparse else \
_np.empty((other_nMxs - 1, other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
for j, Ln in enumerate(other_mxs[1:]): # don't include identity
#print("DEBUG NONHAM LIND (%d,%d)" % (i,j)) #DEBUG!!!
otherLindbladTerms[i][j] = _lt.nonham_lindbladian(Lm, Ln, sparse)
if normalize:
norm = normfn(otherLindbladTerms[i][j]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[i][j] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[i][j]), 1.0))
#I don't think this is true in general, but appears to be true for "pp" basis (why?)
#if j < i: # check that other[i,j] == other[j,i].C, i.e. other is Hermitian
# assert(_np.isclose(_np.linalg.norm(
# otherLindbladTerms[i][j]-
# otherLindbladTerms[j][i].conjugate()),0))
else:
otherLindbladTerms = None
#Check for orthogonality - otherLindblad terms are *not* orthogonal!
#N = otherLindbladTerms.shape[0]
#for i in range(N):
# for j in range(N):
# v1 = otherLindbladTerms[i,j].flatten()
# for k in range(N):
# for l in range(N):
# if k == i and l == j: continue
# v2 = otherLindbladTerms[k,l].flatten()
# if not _np.isclose(0, _np.vdot(v1,v2)):
# print("%d,%d <-> %d,%d dot = %g [%g]" % (i,j,k,l,_np.vdot(v1,v2),_np.dot(v1,v2)))
# #print("v1 = ",v1)
# #print("v2 = ",v2)
# # assert(False)
# #assert(_np.isclose(0, _np.vdot(v1,v2)))
#Check hamiltonian error gens are orthogonal to others
#N = otherLindbladTerms.shape[0]
#for i,hlt in enumerate(hamLindbladTerms):
# v1 = hlt.flatten()
# for j in range(N):
# for k in range(N):
# v2 = otherLindbladTerms[j,k].flatten()
# assert(_np.isclose(0, _np.vdot(v1,v2)))
return hamLindbladTerms, otherLindbladTerms
def lindblad_errgen_projections(errgen, ham_basis,
other_basis, mxBasis="gm",
normalize=True, return_generators=False,
other_mode="all", sparse=False):
"""
Compute the projections of a gate error generator onto generators
for the Lindblad-term errors when expressed in the given
"projection basis".
Parameters
----------
errgen: : ndarray
The error generator matrix to project.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Hamiltonian-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
other_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Stochastic-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source basis. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
normalize : bool, optional
Whether or not the generators being projected onto are normalized, so
that numpy.linalg.norm(generator.flat) == 1.0. Note that the generators
will still, in general, be non-orthogonal.
return_generators : bool, optional
If True, return the error generators projected against along with the
projection values themseves.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections to obtain.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
sparse : bool, optional
Whether to create sparse or dense basis matrices when strings
are given as `ham_basis` and `other_basis`
Returns
-------
ham_projections : numpy.ndarray
An array of length d-1, where d is the dimension of the gate,
giving the projections onto the Hamiltonian-type Lindblad terms.
other_projections : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the dimension
of the gate, for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto the
non-Hamiltonian-type Lindblad terms.
ham_generators : numpy.ndarray
The Hamiltonian-type Lindblad term generators, as would be returned
from `lindblad_error_generators(pp_matrices(sqrt(d)), normalize)`.
Shape is (d-1,d,d), and `ham_generators[i]` is in the standard basis.
other_generators : numpy.ndarray
The Stochastic-type Lindblad term generators, as would be returned
from `lindblad_error_generators(pp_matrices(sqrt(d)), normalize)`.
Shape is (d-1,d-1,d,d), (2,d-1,d,d), or (d-1,d,d) for `other_mode`
equal to `"all"`, `"diag_affine"`, or `"diagonal"`, respectively,
and `other_generators[i]` is in the std basis.
"""
errgen_std = _bt.change_basis(errgen, mxBasis, "std")
if _sps.issparse(errgen_std):
errgen_std_flat = errgen_std.tolil().reshape(
(errgen_std.shape[0] * errgen_std.shape[1], 1)).tocsr() # b/c lil's are only type that can reshape...
else:
errgen_std_flat = errgen_std.flatten()
errgen_std = None # ununsed below, and sparse reshape doesn't copy, so mark as None
d2 = errgen.shape[0]
d = int(_np.sqrt(d2))
#nQubits = _np.log2(d)
#Get a list of the generators in corresspondence with the
# specified basis elements.
if isinstance(ham_basis, _Basis):
hamBasisMxs = ham_basis.elements
elif isinstance(ham_basis, str):
hamBasisMxs = _bt.basis_matrices(ham_basis, d2, sparse=sparse)
else:
hamBasisMxs = ham_basis
if isinstance(other_basis, _Basis):
otherBasisMxs = other_basis.elements
elif isinstance(other_basis, str):
otherBasisMxs = _bt.basis_matrices(other_basis, d2, sparse=sparse)
else:
otherBasisMxs = other_basis
hamGens, otherGens = lindblad_error_generators(
hamBasisMxs, otherBasisMxs, normalize, other_mode) # in std basis
if hamBasisMxs is not None:
bsH = len(hamBasisMxs) # basis size (not necessarily d2)
else: bsH = 0
if otherBasisMxs is not None:
bsO = len(otherBasisMxs) # basis size (not necessarily d2)
else: bsO = 0
if bsH > 0: sparse = _sps.issparse(hamBasisMxs[0])
elif bsO > 0: sparse = _sps.issparse(otherBasisMxs[0])
else: sparse = False # default?
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
if bsH > 0:
_assert_shape(hamGens, (bsH - 1, d2, d2), sparse)
if bsO > 0:
if other_mode == "diagonal":
_assert_shape(otherGens, (bsO - 1, d2, d2), sparse)
elif other_mode == "diag_affine":
_assert_shape(otherGens, (2, bsO - 1, d2, d2), sparse)
else: # other_mode == "all"
_assert_shape(otherGens, (bsO - 1, bsO - 1, d2, d2), sparse)
#Perform linear least squares solve to find "projections" onto each otherGens element - defined so that
# sum_i projection_i * otherGen_i = (errgen_std-ham_errgen) as well as possible.
#ham_error_gen = _np.einsum('i,ijk', hamProjs, hamGens)
#other_errgen = errgen_std - ham_error_gen #what's left once hamiltonian errors are projected out
#Do linear least squares soln to expressing errgen_std as a linear combo
# of the lindblad generators
if bsH > 0:
if not sparse:
H = hamGens.reshape((bsH - 1, d2**2)).T # ham generators == columns
Hdag = H.T.conjugate()
#Do linear least squares: this is what takes the bulk of the time
hamProjs = _np.linalg.solve(_np.dot(Hdag, H), _np.dot(Hdag, errgen_std_flat))
hamProjs.shape = (hamGens.shape[0],)
else:
rows = [hamGen.tolil().reshape((1, d2**2)) for hamGen in hamGens]
H = _sps.vstack(rows, 'csr').transpose()
Hdag = H.copy().transpose().conjugate()
#Do linear least squares: this is what takes the bulk of the time
if _mt.safenorm(errgen_std_flat) < 1e-8: # protect against singular RHS
hamProjs = _np.zeros(bsH - 1, 'd')
else:
hamProjs = _spsl.spsolve(Hdag.dot(H), Hdag.dot(errgen_std_flat))
if _sps.issparse(hamProjs): hamProjs = hamProjs.toarray().flatten()
hamProjs.shape = (bsH - 1,)
else:
hamProjs = None
if bsO > 0:
if not sparse:
if other_mode == "diagonal":
O = otherGens.reshape((bsO - 1, d2**2)).T # other generators == columns
elif other_mode == "diag_affine":
O = otherGens.reshape((2 * (bsO - 1), d2**2)).T # other generators == columns
else:
O = otherGens.reshape(((bsO - 1)**2, d2**2)).T # other generators == columns
Odag = O.T.conjugate()
#Do linear least squares: this is what takes the bulk of the time
otherProjs = _np.linalg.solve(_np.dot(Odag, O), _np.dot(Odag, errgen_std_flat))
if other_mode == "diagonal":
otherProjs.shape = (otherGens.shape[0],)
elif other_mode == "diag_affine":
otherProjs.shape = (2, otherGens.shape[1])
else:
otherProjs.shape = (otherGens.shape[0], otherGens.shape[1])
else:
if other_mode == "diagonal":
rows = [oGen.tolil().reshape((1, d2**2)) for oGen in otherGens]
O = _sps.vstack(rows, 'csr').transpose() # other generators == columns
else: # "diag_affine" or "all"
rows = [oGen.tolil().reshape((1, d2**2)) for oGenRow in otherGens for oGen in oGenRow]
O = _sps.vstack(rows, 'csr').transpose() # other generators == columns
Odag = O.copy().transpose().conjugate() # TODO: maybe conjugate copies data?
#Do linear least squares: this is what takes the bulk of the time
if _mt.safenorm(errgen_std_flat) < 1e-8: # protect against singular RHS
if other_mode == "diagonal": otherProjs = _np.zeros(bsO - 1, 'd')
elif other_mode == "diag_affine": otherProjs = _np.zeros((2, bsO - 1), 'd')
else: otherProjs = _np.zeros((bsO - 1, bsO - 1), 'd')
else:
otherProjs = _spsl.spsolve(Odag.dot(O), Odag.dot(errgen_std_flat))
if _sps.issparse(otherProjs): otherProjs = otherProjs.toarray().flatten()
if other_mode == "diagonal":
otherProjs.shape = (bsO - 1,)
elif other_mode == "diag_affine":
otherProjs.shape = (2, bsO - 1)
else: # other_mode == "all"
otherProjs.shape = (bsO - 1, bsO - 1)
else:
otherProjs = None
#check err gens are linearly independent -- but can take a very long time, so comment out!
#assert(_np.linalg.matrix_rank(H,1e-7) == H.shape[1])
#assert(_np.linalg.matrix_rank(O,1e-7) == O.shape[1])
#if False: # further check against older (slower) version
# M = _np.concatenate( (hamGens.reshape((bs-1,d2**2)).T, otherGens.reshape(((bs-1)**2,d2**2)).T), axis=1)
# assert(_np.linalg.matrix_rank(M,1e-7) == M.shape[1]) #check err gens are linearly independent
# Mdag = M.T.conjugate()
# print("DB D: %.1f" % (time.time()-t)); t = time.time()
# projs = _np.linalg.solve(_np.dot(Mdag,M), _np.dot(Mdag,errgen_std_flat))
# hamProjs_chk = projs[0:(bs-1)]
# otherProjs_chk = projs[(bs-1):]
# assert(_np.linalg.norm(hamProjs-hamProjs_chk) < 1e-6)
# assert(_np.linalg.norm(otherProjs-otherProjs_chk) < 1e-6)
if return_generators:
return hamProjs, otherProjs, hamGens, otherGens
else:
return hamProjs, otherProjs
def projections_to_lindblad_terms(hamProjs, otherProjs, ham_basis, other_basis,
other_mode="all", return_basis=True):
"""
Converts the projections of an error generator onto basis elements into
the Lindblad-term dictionary and basis used to individually specify
Lindblad terms.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the dimension of the projected error
generator, giving the projections onto the Hamiltonian-type Lindblad
terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the dimension
of the projected error generator, for `other_mode` equal to `"all"`,
`"diag_affine"`, or `"diagonal"`, respectively. Values give the
projections onto the non-Hamiltonian-type Lindblad terms.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct `hamProjs`. Allowed values are Matrix-unit
(std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt), list of
numpy arrays, or a custom basis object.
other_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct `otherProjs`. Allowed values are
Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt),
list of numpy arrays, or a custom basis object.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
return_basis : bool, optional
Whether to return a :class:`Basis` containing the elements
corresponding to labels within the returned `Ltermdict`.
Returns
-------
Ltermdict : dict
Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), or
`"A"` (Affine). Hamiltonian and Affine terms always have a single basis
label (so key is a 2-tuple) whereas Stochastic tuples have 1 basis label
to indicate a *diagonal* term and otherwise have 2 basis labels to
specify off-diagonal non-Hamiltonian Lindblad terms. Basis labels
are taken from `ham_basis` and `other_basis`. Values are complex
coefficients (the projections).
basis : Basis
A single basis containing all the basis labels used in `Ltermdict` (and
*only* those elements). Only returned when `return_basis == True`.
"""
assert(not (ham_basis is None and other_basis is None)), \
"At least one of `ham_basis` and `other_basis` must be non-None"
# Make None => length-0 arrays so iteration code works below (when basis is None)
if hamProjs is None: hamProjs = _np.empty(0, 'd')
if otherProjs is None:
otherProjs = _np.empty(0, 'd') if other_mode == "diagonal" \
else _np.empty((0, 0), 'd')
# Construct a pair of dictionaries describing all of the
# Lindblad-terms:
# Ltermdict keys= ('H',basisLbl), ('S',basisLbl), or ('S',bLbl1,bLbl2)
# vals= coefficients of these terms (projections from errgen)
# basisdict keys= basis labels (just has to match Ltermdict keys)
# vals= basis matrices - can be either sparse or dense
Ltermdict = _collections.OrderedDict()
basisdict = _collections.OrderedDict()
if return_basis:
def set_basis_el(blbl, bel):
""" Sets an elment of basisdict, checking for consistency """
if blbl in basisdict:
assert(_mt.safenorm(basisdict[blbl] - bel) < 1e-8), "Ambiguous basis el label %s" % blbl
else:
basisdict[blbl] = bel
else:
def set_basis_el(blbl, bel):
pass
#Add Hamiltonian error elements
if ham_basis is not None:
ham_lbls = ham_basis.labels
ham_mxs = ham_basis.elements # can be sparse
assert(len(ham_mxs[1:]) == len(hamProjs))
for coeff, lbl, bmx in zip(hamProjs, ham_lbls[1:], ham_mxs[1:]): # skip identity
Ltermdict[('H', lbl)] = coeff
set_basis_el(lbl, bmx)
else:
ham_lbls = []
#Add "other" error elements
if other_basis is not None:
other_lbls = other_basis.labels
other_mxs = other_basis.elements # can be sparse
if other_mode == "diagonal":
assert(len(other_mxs[1:]) == len(otherProjs))
for coeff, lbl, bmx in zip(otherProjs, other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('S', lbl)] = coeff
set_basis_el(lbl, bmx)
elif other_mode == "diag_affine":
assert((2, len(other_mxs[1:])) == otherProjs.shape)
for coeff, lbl, bmx in zip(otherProjs[0], other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('S', lbl)] = coeff
set_basis_el(lbl, bmx)
for coeff, lbl, bmx in zip(otherProjs[1], other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('A', lbl)] = coeff
set_basis_el(lbl, bmx)
else:
assert((len(other_mxs[1:]), len(other_mxs[1:])) == otherProjs.shape)
for i, (lbl1, bmx1) in enumerate(zip(other_lbls[1:], other_mxs[1:])): # skip identity
set_basis_el(lbl1, bmx1)
for j, (lbl2, bmx2) in enumerate(zip(other_lbls[1:], other_mxs[1:])): # skip identity
set_basis_el(lbl2, bmx2)
Ltermdict[('S', lbl1, lbl2)] = otherProjs[i, j]
else:
other_lbls = []
#Turn basisdict into a Basis to return
if return_basis:
if ham_basis == other_basis:
basis = ham_basis
elif ham_basis is None or set(ham_lbls).issubset(set(other_lbls)):
basis = other_basis
elif other_basis is None or set(other_lbls).issubset(set(ham_lbls)):
basis = ham_basis
else:
#Create an ExplictBasis using the matrices in basisdict plus the identity
sparse = True; real = True
if ham_basis is not None:
elshape = ham_basis.elshape
sparse = sparse and ham_basis.sparse
real = real and ham_basis.real
if other_basis is not None:
elshape = other_basis.elshape
sparse = sparse and other_basis.sparse
real = real and other_basis.real
d = elshape[0]
Id = _sps.identity(d, 'complex', 'csr') / _np.sqrt(d) if sparse \
else _np.identity(d, 'complex') / _np.sqrt(d)
lbls = ['I'] + list(basisdict.keys())
mxs = [Id] + list(basisdict.values())
basis = _ExplicitBasis(mxs, lbls, name=None,
real=real, sparse=sparse)
return Ltermdict, basis
else:
return Ltermdict
def lindblad_terms_to_projections(Ltermdict, basis, other_mode="all"):
"""
Convert a set of Lindblad terms into a dense matrix/grid of projections.
Essentially the inverse of :function:`projections_to_lindblad_terms`.
Parameters
----------
Ltermdict : dict
A dictionary specifying which Linblad terms are present in the gate
parameteriztion. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), or
`"A"` (Affine). Hamiltonian and Affine terms always have a single basis
label (so key is a 2-tuple) whereas Stochastic tuples with 1 basis label
indicate a *diagonal* term, and are the only types of terms allowed when
`nonham_mode != "all"`. Otherwise, Stochastic term tuples can include 2
basis labels to specify "off-diagonal" non-Hamiltonian Lindblad terms.
Basis labels can be strings or integers. Values are complex
coefficients (error rates).
basis : Basis, optional
A basis mapping the labels used in the keys of `Ltermdict` to
basis matrices (e.g. numpy arrays or Scipy sparse matrices). The
first element of this basis should be an identity element, and
will be propagated to the returned `ham_basis` and `other_basis`.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian terms are allowed in `Ltermdict`.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
Returns
-------
hamProjs : numpy.ndarray
An array of length `basisdim-1`, giving the projections onto a
full set of the Hamiltonian-type Lindblad terms (onto each element of
`ham_basis`).
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d=`basisdim`
for `other_mode` equal to `"all"`, `"diag_affine"`, or `"diagonal"`,
respectively. Values give the projections onto the non-Hamiltonian
-type Lindblad terms.
ham_basis: Basis
The basis used to construct `hamProjs`.
other_basis : Basis
The basis used to construct `otherProjs`.
hamBasisIndices : OrderedDict
A dictionary mapping the some or all of the basis labels of `basisdict`
to the integers 0 to `len(ham_basis)`. These are indices into
`hamProjs`, giving the projection associated with each Hamiltonian
basis element.
otherBasisIndices : OrderedDict
A dictionary mapping the some or all of the basis labels of `basisdict`
to the integers 0 to `len(other_basis)`. These are row and column
indices into `otherProjs`, giving the projection associated with each
pair of "other" basis elements (or single basis element if
`other_mode!="all"`).
"""
#Separately enumerate the (distinct) basis elements used for Hamiltonian
# and non-Hamiltonian error terms
#print("DB: lindblad term to proj: \n",Ltermdict,"\n",basis)
hamBasisLabels = []
otherBasisLabels = []
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
assert(len(termLbl) == 2), "Hamiltonian term labels should have form ('H',<basis element label>)"
if termLbl[1] not in hamBasisLabels:
hamBasisLabels.append(termLbl[1])
elif termType == "S": # Stochastic
if other_mode in ("diagonal", "diag_affine"):
assert(len(termLbl) == 2), "Stochastic term labels should have form ('S',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
else:
assert(len(termLbl) == 3), "Stochastic term labels should have form ('S',<bel1>, <bel2>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
if termLbl[2] not in otherBasisLabels:
otherBasisLabels.append(termLbl[2])
elif termType == "A": # Affine
assert(other_mode == "diag_affine"), "Affine labels are only allowed in an affine mode"
assert(len(termLbl) == 2), "Affine term labels should have form ('A',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
#Construct bases
# Note: the lists of basis matrices shouldn't contain the identity, since
# the terms above shouldn't contain identity terms - but `basis` should
# contain an identity element as it's first element, so add this identity el
# to non-empty bases (empty bases stay empty!) to be consistent with the
# rest of the framework (bases *have* Ids)
sparse = basis.sparse
if set(hamBasisLabels) == set(basis.labels):
ham_basis = basis
else:
Id = basis[0]
ham_basis_mxs = [basis[bl] for bl in hamBasisLabels]
if len(ham_basis_mxs) > 0:
ham_basis = _ExplicitBasis([Id] + ham_basis_mxs, ['I'] + hamBasisLabels,
name=None, real=True, sparse=sparse)
else:
ham_basis = _ExplicitBasis(ham_basis_mxs, name=None, real=True, sparse=sparse)
if set(otherBasisLabels) == set(basis.labels):
other_basis = basis
else:
Id = basis[0]
other_basis_mxs = [basis[bl] for bl in otherBasisLabels]
if len(other_basis_mxs) > 0:
other_basis = _ExplicitBasis([Id] + other_basis_mxs, ['I'] + otherBasisLabels,
name=None, real=True, sparse=sparse)
else:
other_basis = _ExplicitBasis(other_basis_mxs, name=None, real=True, sparse=sparse)
bsH, bsO = len(ham_basis), len(other_basis)
#print("DB: constructed ham_basis = ",ham_basis)
#print("DB: other basis = ",other_basis)
#Create projection (term coefficient) arrays - or return None if
# the corresponding basis is empty (as per our convention)
hamProjs = _np.zeros(bsH - 1, 'complex') if bsH > 0 else None
if bsO > 0:
if other_mode == "diagonal": # OK if this runs for 'auto' too since then len(otherBasisIndices) == 0
otherProjs = _np.zeros(bsO - 1, 'complex')
elif other_mode == "diag_affine":
otherProjs = _np.zeros((2, bsO - 1), 'complex')
else:
otherProjs = _np.zeros((bsO - 1, bsO - 1), 'complex')
else: otherProjs = None
#Fill arrays
hamBasisIndices = {lbl: i - 1 for i, lbl in enumerate(ham_basis.labels)} # -1 to compensate for identity as
otherBasisIndices = {lbl: i - 1 for i, lbl in enumerate(other_basis.labels)} # first element (not in projections).
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
k = hamBasisIndices[termLbl[1]] # index of coefficient in array
hamProjs[k] = coeff
elif termType == "S": # Stochastic
if other_mode == "diagonal":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[k] = coeff
elif other_mode == "diag_affine":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[0, k] = coeff
else: # other_mode == "all"
k = otherBasisIndices[termLbl[1]] # index of row in "other" coefficient matrix
j = otherBasisIndices[termLbl[2]] # index of col in "other" coefficient matrix
otherProjs[k, j] = coeff
elif termType == "A": # Affine
assert(other_mode == "diag_affine")
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[1, k] = coeff
return hamProjs, otherProjs, ham_basis, other_basis
def lindblad_projections_to_paramvals(hamProjs, otherProjs, param_mode="cptp",
other_mode="all", truncate=True):
"""
Construct the array of Lindblad-gate parameter values from the separate
arrays of Hamiltonian and non-Hamiltonian Lindblad-term projections.
When `cptp=True`, this function handles parameterizing the projections
to that for (real) parameter values correspond to projections for a valid
CPTP gate (e.g. by parameterizing the Cholesky decomposition of `otherProjs`
instead of otherProjs itself). This function is closely related to
implementation details of the LindbladOp class.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the gate
dimension, for `other_mode` equal to `"all"`,`"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto a full
set of non-Hamiltonian-type Lindblad terms.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Describes how values in `hamProjs` and `otherProj` relate to the
returned parameter values. Allowed values are:
`"unconstrained"` (projs are independent unconstrained parameters),
`"cptp"` (independent parameters but constrained so map is CPTP),
`"reldepol"` (all non-Ham. diagonal projs take the *same* value),
`"depol"` (same as `"reldepol"` but projs must be *positive*)
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`.
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given projections
cannot be parameterized as specified.
Returns
-------
numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2, 2*(d-1), or just d-1 non-Hamiltonian
values for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively.
"""
if hamProjs is not None:
assert(_np.isclose(_np.linalg.norm(hamProjs.imag), 0)), \
"Hamiltoian projections (coefficients) are not all real!"
hamParams = hamProjs.real
else:
hamParams = _np.empty(0, 'd')
if otherProjs is not None:
if other_mode == "diagonal":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a *single-element* 1D vector of the sqrt of each diagonal el
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0]) for v in otherProjs])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
otherProj = _np.mean(otherProjs.clip(1e-16, 1e100))
otherParams = _np.array(_np.sqrt(_np.real(otherProj)), 'd') # shape (1,)
elif param_mode == "cptp": # otherParams is a 1D vector of the sqrts of diagonal els
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
otherProjs = otherProjs.clip(1e-16, 1e100)
otherParams = _np.sqrt(otherProjs.real) # shape (bsO-1,)
else: # "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (bsO-1,)
elif other_mode == "diag_affine":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic and affine projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a single depol value + unconstrained affine coeffs
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0, 0]) for v in otherProjs[0]])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
depolProj = _np.mean(otherProjs[0, :].clip(1e-16, 1e100))
otherParams = _np.concatenate(([_np.sqrt(_np.real(depolProj))],
otherProjs[1].real)) # shape (1+(bsO-1),)
elif param_mode == "cptp": # Note: does not constrained affine coeffs to CPTP
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
diagParams = _np.sqrt(_np.real(otherProjs[0, :]).clip(1e-16, 1e100)) # shape (bsO-1,)
otherParams = _np.concatenate((diagParams, otherProjs[1].real)) # diag + affine params
else: # param_mode == "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (2,bsO-1)
else: # other_mode == "all"
assert(_np.isclose(_np.linalg.norm(otherProjs - otherProjs.T.conjugate()), 0)
), "Other projection/coefficient mx is not Hermitian!"
assert(param_mode != "depol"), "`depol` is not supported when `other_mode == 'all'`"
bsO = otherProjs.shape[0] + 1 # +1 to keep convention that this is the basis (w/Identity) size
otherParams = _np.empty((bsO - 1, bsO - 1), 'd')
if param_mode == "cptp": # otherParams mx stores Cholesky decomp
#push any slightly negative evals of otherProjs positive so that
# the Cholesky decomp will work.
evals, U = _np.linalg.eig(otherProjs)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(otherProjs)
# if Lmx not postitive definite, try again with 1e-12 (same lines as above)
except _np.linalg.LinAlgError: # pragma: no cover
pos_evals = evals.clip(1e-12, 1e100) # pragma: no cover
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui)) # pragma: no cover
Lmx = _np.linalg.cholesky(otherProjs) # pragma: no cover
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
otherParams[i, i] = Lmx[i, i].real
for j in range(i):
otherParams[i, j] = Lmx[i, j].real
otherParams[j, i] = Lmx[i, j].imag
else: # param_mode == "unconstrained": otherParams mx stores otherProjs (hermitian) directly
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(otherProjs[i, i])) < IMAG_TOL)
otherParams[i, i] = otherProjs[i, i].real
for j in range(i):
otherParams[i, j] = otherProjs[i, j].real
otherParams[j, i] = otherProjs[i, j].imag
else:
otherParams = _np.empty(0, 'd')
assert(not _np.iscomplexobj(hamParams)) # params should always
assert(not _np.iscomplexobj(otherParams)) # be *real*
return _np.concatenate((hamParams, otherParams.flat))
def paramvals_to_lindblad_projections(paramvals, ham_basis_size,
other_basis_size, param_mode="cptp",
other_mode="all", Lmx=None):
"""
Construct the separate arrays of Hamiltonian and non-Hamiltonian
Lindblad-term projections from the array of Lindblad-gate parameter values.
This function essentially performs the inverse of
:function:`lindblad_projections_to_paramvals`.
Parameters
----------
paramvals : numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2 or just d-1 non-Hamiltonian
values (the latter when `other_mode in ('diagonal','diag_affine')`).
ham_basis_size, other_basis_size : int
The number of elements in the Hamiltonian and non-Hamiltonian
bases used to construct `paramvals`. As such, `ham_basis_size`
gives the offset into `paramvals` where the non-Hamiltonian
parameters begin.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Specifies how the Lindblad-term coefficients are mapped to the set of
(real) parameter values. This really just applies to the "other"
(non-Hamiltonian) coefficients. "unconstrained" means that ranging
over the parameter values lets the coefficient-matrix vary over all
matrices, "cptp" restricts this to postitive matrices. "depol"
maps all of the coefficients to the *same, positive* parameter (only
available for "diagonal" and "diag_affine" other-modes), and "reldepol"
does the same thing but without the positivity constraint.
other_mode : {"all", "diagonal", "diag_affine"}
Specifies the structure of the matrix of other (non-Hamiltonian)
coefficients. If d is the gate dimension, "all" means a (d-1,d-1)
matrix is used; "diagonal" means just the (d2-1,) diagonal of this
matrix is used; "diag_affine" means the coefficients are in a (2,d2-1)
array with the diagonal-term coefficients being the first row and the
affine coefficients being the second row.
Lmx : ndarray, optional
Scratch space that is used to store the lower-triangular
Cholesky decomposition matrix that is used to construct
the "other" projections when there is a CPTP constraint.
Returns
-------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1) or (d-1,) or (2,d-1) where d is the gate
dimension, giving the projections onto a full set of non-Hamiltonian
-type Lindblad terms (see `other_mode` above).
"""
bsH = ham_basis_size
bsO = other_basis_size
if Lmx is None:
Lmx = _np.zeros((bsO - 1, bsO - 1), 'complex') if bsO > 0 else None
# self.paramvals = [hamCoeffs] + [otherParams]
# where hamCoeffs are *real* and of length d2-1 (self.dim == d2)
if bsH > 0:
hamCoeffs = paramvals[0:bsH - 1]
nHam = bsH - 1
else:
hamCoeffs = None
nHam = 0
#built up otherCoeffs based on param_mode and nonham_mode
if bsO > 0:
if other_mode == "diagonal":
otherParams = paramvals[nHam:]
expected_shape = (1,) if (param_mode in ("depol", "reldepol")) else (bsO - 1,)
assert(otherParams.shape == expected_shape)
if param_mode in ("depol", "reldepol"):
otherParams = otherParams[0] * _np.ones(bsO - 1, 'd') # replicate single param bsO-1 times
if param_mode in ("cptp", "depol"):
otherCoeffs = otherParams**2 # Analagous to L*L_dagger
else: # "unconstrained"
otherCoeffs = otherParams
elif other_mode == "diag_affine":
if param_mode in ("depol", "reldepol"):
otherParams = paramvals[nHam:].reshape((1 + bsO - 1,))
otherCoeffs = _np.empty((2, bsO - 1), 'd') # leave as real type b/c doesn't have complex entries
if param_mode == "depol":
otherCoeffs[0, :] = otherParams[0]**2
else:
otherCoeffs[0, :] = otherParams[0]
otherCoeffs[1, :] = otherParams[1:]
else:
otherParams = paramvals[nHam:].reshape((2, bsO - 1))
if param_mode == "cptp":
otherCoeffs = otherParams.copy()
otherCoeffs[0, :] = otherParams[0]**2
else: # param_mode == "unconstrained"
#otherCoeffs = _np.empty((2,bsO-1),'complex')
otherCoeffs = otherParams
else: # other_mode == "all"
otherParams = paramvals[nHam:].reshape((bsO - 1, bsO - 1))
if param_mode == "cptp":
# otherParams is an array of length (bs-1)*(bs-1) that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = otherParams[i,i]
# Lmx[i,j] = otherParams[i,j] + 1j*otherParams[j,i] (i > j)
for i in range(bsO - 1):
Lmx[i, i] = otherParams[i, i]
for j in range(i):
Lmx[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
#The matrix of (complex) "other"-coefficients is build by
# assuming Lmx is its Cholesky decomp; means otherCoeffs
# is pos-def.
# NOTE that the Cholesky decomp with all positive real diagonal
# elements is *unique* for a given positive-definite otherCoeffs
# matrix, but we don't care about this uniqueness criteria and so
# the diagonal els of Lmx can be negative and that's fine -
# otherCoeffs will still be posdef.
otherCoeffs = _np.dot(Lmx, Lmx.T.conjugate())
#DEBUG - test for pos-def
#evals = _np.linalg.eigvalsh(otherCoeffs)
#DEBUG_TOL = 1e-16; #print("EVALS DEBUG = ",evals)
#assert(all([ev >= -DEBUG_TOL for ev in evals]))
else: # param_mode == "unconstrained"
#otherParams holds otherCoeff real and imaginary parts directly
otherCoeffs = _np.empty((bsO - 1, bsO - 1), 'complex')
for i in range(bsO - 1):
otherCoeffs[i, i] = otherParams[i, i]
for j in range(i):
otherCoeffs[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
otherCoeffs[j, i] = otherParams[i, j] - 1j * otherParams[j, i]
else:
otherCoeffs = None
return hamCoeffs, otherCoeffs
#TODO: replace two_qubit_gate, one_qubit_gate, unitary_to_pauligate_* with
# calls to this one and unitary_to_processmx
def rotation_gate_mx(r, mxBasis="gm"):
"""
Construct a rotation operation matrix.
Build the operation matrix corresponding to the unitary
`exp(-i * (r[0]/2*PP[0]*sqrt(d) + r[1]/2*PP[1]*sqrt(d) + ...) )`
where `PP' is the array of Pauli-product matrices
obtained via `pp_matrices(d)`, where `d = sqrt(len(r)+1)`.
The division by 2 is for convention, and the sqrt(d) is to
essentially un-normalise the matrices returned by `pp_matrices`
to they are equal to products of the *standard* Pauli matrices.
Parameters
----------
r : tuple
A tuple of coeffiecients, one per non-identity
Pauli-product basis element
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
.
Returns
-------
numpy array
a d^2 x d^2 operation matrix in the specified basis.
"""
d = int(round(_np.sqrt(len(r) + 1)))
assert(d**2 == len(r) + 1), "Invalid number of rotation angles"
#get Pauli-product matrices (in std basis)
pp = _bt.basis_matrices('pp', d**2)
assert(len(r) == len(pp[1:]))
#build unitary (in std basis)
ex = _np.zeros((d, d), 'complex')
for rot, pp_mx in zip(r, pp[1:]):
ex += rot / 2.0 * pp_mx * _np.sqrt(d)
U = _spl.expm(-1j * ex)
stdGate = unitary_to_process_mx(U)
ret = _bt.change_basis(stdGate, 'std', mxBasis)
return ret
def project_model(model, targetModel,
projectiontypes=('H', 'S', 'H+S', 'LND'),
genType="logG-logT"):
"""
Construct one or more new models by projecting the error generator of
`model` onto some sub-space then reconstructing.
Parameters
----------
model : Model
The model whose error generator should be projected.
targetModel : Model
The set of target (ideal) gates.
projectiontypes : tuple of {'H','S','H+S','LND','LNDCP'}
Which projections to use. The length of this tuple gives the
number of `Model` objects returned. Allowed values are:
- 'H' = Hamiltonian errors
- 'S' = Stochastic Pauli-channel errors
- 'H+S' = both of the above error types
- 'LND' = errgen projected to a normal (CPTP) Lindbladian
- 'LNDF' = errgen projected to an unrestricted (full) Lindbladian
genType : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
projected_models : list of Models
Elements are projected versions of `model` corresponding to
the elements of `projectiontypes`.
Nps : list of parameter counts
Integer parameter counts for each model in `projected_models`.
Useful for computing the expected log-likelihood or chi2.
"""
opLabels = list(model.operations.keys()) # operation labels
basis = model.basis
#The projection basis needs to be a basis for density matrices
# (i.e. 2x2 mxs in 1Q case) rather than superoperators (4x4 mxs
# in 1Q case) - whcih is what model.basis is. So, we just extract
# a builtin basis name for the projection basis.
if basis.name in ('pp', 'gm', 'std', 'qt'):
proj_basis_name = basis.name
else:
proj_basis_name = 'pp' # model.basis is weird so just use paulis as projection basis
if basis.name != targetModel.basis.name:
raise ValueError("Basis mismatch between model (%s) and target (%s)!"
% (model.basis.name, targetModel.basis.name))
# Note: set to "full" parameterization so we can set the gates below
# regardless of what parameterization the original model had.
gsDict = {}; NpDict = {}
for p in projectiontypes:
gsDict[p] = model.copy()
gsDict[p].set_all_parameterizations("full")
NpDict[p] = 0
errgens = [error_generator(model.operations[gl],
targetModel.operations[gl],
targetModel.basis, genType)
for gl in opLabels]
for gl, errgen in zip(opLabels, errgens):
if ('H' in projectiontypes) or ('H+S' in projectiontypes):
hamProj, hamGens = std_errgen_projections(
errgen, "hamiltonian", proj_basis_name, basis, True)
#ham_error_gen = _np.einsum('i,ijk', hamProj, hamGens)
ham_error_gen = _np.tensordot(hamProj, hamGens, (0, 0))
ham_error_gen = _bt.change_basis(ham_error_gen, "std", basis)
if ('S' in projectiontypes) or ('H+S' in projectiontypes):
stoProj, stoGens = std_errgen_projections(
errgen, "stochastic", proj_basis_name, basis, True)
#sto_error_gen = _np.einsum('i,ijk', stoProj, stoGens)
sto_error_gen = _np.tensordot(stoProj, stoGens, (0, 0))
sto_error_gen = _bt.change_basis(sto_error_gen, "std", basis)
if ('LND' in projectiontypes) or ('LNDF' in projectiontypes):
HProj, OProj, HGens, OGens = \
lindblad_errgen_projections(
errgen, proj_basis_name, proj_basis_name, basis, normalize=False,
return_generators=True)
#Note: return values *can* be None if an empty/None basis is given
#lnd_error_gen = _np.einsum('i,ijk', HProj, HGens) + \
# _np.einsum('ij,ijkl', OProj, OGens)
lnd_error_gen = _np.tensordot(HProj, HGens, (0, 0)) + \
_np.tensordot(OProj, OGens, ((0, 1), (0, 1)))
lnd_error_gen = _bt.change_basis(lnd_error_gen, "std", basis)
targetOp = targetModel.operations[gl]
if 'H' in projectiontypes:
gsDict['H'].operations[gl] = operation_from_error_generator(
ham_error_gen, targetOp, genType)
NpDict['H'] += len(hamProj)
if 'S' in projectiontypes:
gsDict['S'].operations[gl] = operation_from_error_generator(
sto_error_gen, targetOp, genType)
NpDict['S'] += len(stoProj)
if 'H+S' in projectiontypes:
gsDict['H+S'].operations[gl] = operation_from_error_generator(
ham_error_gen + sto_error_gen, targetOp, genType)
NpDict['H+S'] += len(hamProj) + len(stoProj)
if 'LNDF' in projectiontypes:
gsDict['LNDF'].operations[gl] = operation_from_error_generator(
lnd_error_gen, targetOp, genType)
NpDict['LNDF'] += HProj.size + OProj.size
if 'LND' in projectiontypes:
evals, U = _np.linalg.eig(OProj)
pos_evals = evals.clip(0, 1e100) # clip negative eigenvalues to 0
OProj_cp = _np.dot(U, _np.dot(_np.diag(pos_evals), _np.linalg.inv(U)))
#OProj_cp is now a pos-def matrix
#lnd_error_gen_cp = _np.einsum('i,ijk', HProj, HGens) + \
# _np.einsum('ij,ijkl', OProj_cp, OGens)
lnd_error_gen_cp = _np.tensordot(HProj, HGens, (0, 0)) + \
_np.tensordot(OProj_cp, OGens, ((0, 1), (0, 1)))
lnd_error_gen_cp = _bt.change_basis(lnd_error_gen_cp, "std", basis)
gsDict['LND'].operations[gl] = operation_from_error_generator(
lnd_error_gen_cp, targetOp, genType)
NpDict['LND'] += HProj.size + OProj.size
#Removed attempt to contract H+S to CPTP by removing positive stochastic projections,
# but this doesn't always return the gate to being CPTP (maybe b/c of normalization)...
#sto_error_gen_cp = _np.einsum('i,ijk', stoProj.clip(None,0), stoGens)
# # (only negative stochastic projections OK)
#sto_error_gen_cp = _tools.std_to_pp(sto_error_gen_cp)
#gsHSCP.operations[gl] = _tools.operation_from_error_generator(
# ham_error_gen, targetOp, genType) #+sto_error_gen_cp
#DEBUG!!!
#print("DEBUG: BEST sum neg evals = ",_tools.sum_of_negative_choi_evals(model))
#print("DEBUG: LNDCP sum neg evals = ",_tools.sum_of_negative_choi_evals(gsDict['LND']))
#Check for CPTP where expected
#assert(_tools.sum_of_negative_choi_evals(gsHSCP) < 1e-6)
#assert(_tools.sum_of_negative_choi_evals(gsDict['LND']) < 1e-6)
#Collect and return requrested results:
ret_gs = [gsDict[p] for p in projectiontypes]
ret_Nps = [NpDict[p] for p in projectiontypes]
return ret_gs, ret_Nps
def get_a_best_case_gauge_transform(gate_mx, target_gate_mx, returnAll=False):
"""
Returns a gauge transformation that maps `gate_mx` into a matrix that is
co-diagonal with `target_gate_mx`, i.e. they share a common set of eigenvectors.
Gauge transformations effectively change the basis of all the gates in a model.
From the perspective of a single gate a gauge transformation leaves it's
eigenvalues the same and changes its eigenvectors. This function finds a *real*
transformation that transforms the eigenspaces of `gate_mx` so that there exists
a set of eigenvectors which diagonalize both `gate_mx` and `target_gate_mx`.
Parameters
----------
gate_mx, target_gate_mx : numpy.ndarray
The gate and target-gate matrices.
returnAll : bool, optional
If true, also return the matrices of eigenvectors
for `Ugate` for gate_mx and `Utgt` for target_gate_mx such
that `U = dot(Utgt, inv(Ugate))` is real.
Returns
-------
U : numpy.ndarray
A gauge transformation such that if `epgate = U * gate_mx * U_inv`,
then `epgate` (which has the same eigenalues as `gate_mx`), can be
diagonalized with a set of eigenvectors that also diagonalize
`target_gate_mx`. Furthermore, `U` is real.
Ugate, Utgt : numpy.ndarray
only if `returnAll == True`. See above.
"""
# A complication that must be dealt with is that
# the eigenvalues of `target_gate_mx` can be degenerate,
# and so matching up eigenvalues can't be done *just* based on value.
# Our algorithm consists of two steps:
# 1) match gate & target eigenvalues based on value, ensuring conjugacy
# relationships between eigenvalues are preserved.
# 2) for each eigenvalue/vector of `gate`, project the eigenvector onto
# the eigenspace of `tgt_gate` corresponding to the matched eigenvalue.
# (treat conj-pair eigenvalues of `gate` together).
# we want a matrix that gauge-transforms gate_mx into a matrix as
# close to target_gate_mx as possible, i.e. that puts gate_mx's
# eigenvalues in the eigenspaces of target_gate_mx. This is done
# by Ubest = _np.dot(Utgt, inv(Uop)), but there are often degrees
# of freedom in Uop because of its degeneracies. Also, we want Ubest
# to be *real*, so we need to ensure the conjugacy structure of Utgt
# and Uop match...
assert(_np.linalg.norm(gate_mx.imag) < 1e-8)
assert(_np.linalg.norm(target_gate_mx.imag) < 1e-8)
if True: # NEW approach that gives sorted eigenvectors
def get_eigenspace_pairs(mx, TOL=1e-6):
evals, U = _np.linalg.eig(mx) # so mx = U * evals * Uinv
espace_pairs = {}; conj_pair_indices = []
#Pass 1: real evals and positive-imaginary-element-of-conjugate pair evals
# (these are the representatives of "eigenspace pairs")
for i, ev in enumerate(evals):
if ev.imag < -TOL:
conj_pair_indices.append(i); continue # save for pass2
#see if ev is already in espace_pairs
for k, v in espace_pairs.items():
if abs(k - ev) < TOL:
espace_pairs[k]['indices'].append(i)
espace_pairs[k]['conj_pair_indices'].append(None)
#espace_pairs[k]['evecs'].append(U[:,i])
break
else:
espace_pairs[ev] = {'indices': [i], 'conj_pair_indices': [None]}
#Pass 2: negative-imaginary-part elements of evals that occur in conjugate pairs
for i in conj_pair_indices:
ev_pos = _np.conjugate(evals[i])
for k, v in espace_pairs.items(): # ev_pos *should* be in espace_pairs
if abs(k - ev_pos) < TOL:
#found the correct eigenspace-pair to add this eval & evec to,
# now figure our where to put this index based on conjugacy relationships,
# i.e. U[:,esp['indices'][i]] is always conjugate to U[:,esp['conj_pair_indices'][i]]
for jj, j in enumerate(espace_pairs[k]['indices']):
if espace_pairs[k]['conj_pair_indices'][jj] is None: # an empty slot
espace_pairs[k]['conj_pair_indices'][jj] = i
U[:, i] = U[:, j].conj()
break
else:
raise ValueError("Nowhere to place a conjugate eigenvector %d-dim eigenbasis for %s!"
% (len(espace_pairs[k]['indices']), str(k)))
break
else:
raise ValueError("Expected to find %s as an espace-pair representative in %s"
% (str(ev_pos), str(espace_pairs.keys())))
#if not (_np.allclose(mx, _np.dot(U, _np.dot(_np.diag(evals), _np.linalg.inv(U))))):
# import bpdb; bpdb.set_trace()
return evals, U, espace_pairs
def standard_diag(mx, TOL=1e-6):
evals, U, espairs = get_eigenspace_pairs(mx)
std_evals = []
std_evecs = []
sorted_rep_evals = sorted(list(espairs.keys()), key=lambda x: (x.real, x.imag))
for ev in sorted_rep_evals: # iterate in sorted order just for definitiveness
info = espairs[ev]
dim = len(info['indices']) # dimension of this eigenspace (and it's pair, if there is one)
#Ensure real eigenvalue blocks should have real eigenvectors
if abs(ev.imag) < TOL:
#find linear combinations of the eigenvectors that are real
Usub = U[:, info['indices']]
if _np.linalg.norm(Usub.imag) > TOL:
# Im part of Usub * combo = Usub.real*combo.imag + Usub.imag*combo.real
combo_real_imag = _mt.nullspace(_np.concatenate((Usub.imag, Usub.real), axis=1))
combos = combo_real_imag[0:dim, :] + 1j * combo_real_imag[dim:, :]
if combos.shape[1] != dim:
raise ValueError(("Can only find %d (< %d) *real* linear combinations of"
" vectors in eigenspace for %s!") % (combos.shape[1], dim, str(ev)))
U[:, info['indices']] = _np.dot(Usub, combos)
assert(_np.linalg.norm(U[:, info['indices']].imag) < TOL)
#Add real eigenvalues and vectors
std_evals.extend([ev] * dim)
std_evecs.extend([U[:, i] for i in info['indices']])
else: # complex eigenvalue case - should have conjugate pair info
#Ensure blocks for conjugate-pairs of eigenvalues follow one after another and
# corresponding eigenvectors (e.g. the first of each block) are conjugate pairs
# (this is already done in the eigenspace construction)
assert(len(info['conj_pair_indices']) == dim)
std_evals.extend([ev] * dim)
std_evals.extend([_np.conjugate(ev)] * dim)
std_evecs.extend([U[:, i] for i in info['indices']])
std_evecs.extend([U[:, i] for i in info['conj_pair_indices']])
return _np.array(std_evals), _np.array(std_evecs).T
#Create "gate_tilde" which has the eigenvectors of gate_mx around the matched eigenvalues of target_gate_mx
# Doing this essentially decouples the problem of eigenvalue matching from the rest of the task -
# after gate_tilde is created, it and target_gate_mx have exactly the *same* eigenvalues.
evals_tgt, Utgt = _np.linalg.eig(target_gate_mx)
evals_gate, Uop = _np.linalg.eig(gate_mx)
pairs = _mt.minweight_match_realmxeigs(evals_gate, evals_tgt)
replace_evals = _np.array([evals_tgt[j] for _, j in pairs])
gate_tilde = _np.dot(Uop, _np.dot(_np.diag(replace_evals), _np.linalg.inv(Uop)))
#Create "standard diagonalizations" of gate_tilde and target_gate_mx, which give
# sort the eigenvalues and ensure eigenvectors occur in *corresponding* conjugate pairs
# (e.g. even when evals +1j and -1j have multiplicity 4, the first 4-D eigenspace, the
evals_tgt, Utgt = standard_diag(target_gate_mx)
evals_tilde, Uop = standard_diag(gate_tilde)
assert(_np.allclose(evals_tgt, evals_tilde))
#Update Utgt so that Utgt * inv_Uop is close to the identity
kite = _mt.get_kite(evals_tgt) # evals are grouped by standard_diag, so this works
D_prior_to_proj = _np.dot(_np.linalg.inv(Utgt), Uop)
#print("D prior to projection to ",kite," kite:"); _mt.print_mx(D_prior_to_proj)
D = _mt.project_onto_kite(D_prior_to_proj, kite)
start = 0
for i, k in enumerate(kite):
slc = slice(start, start + k)
dstart = start + k
for kk in kite[i + 1:]:
if k == kk and _np.isclose(evals_tgt[start], evals_tgt[dstart].conj()): # conjugate block!
dslc = slice(dstart, dstart + kk)
# enforce block conjugacy needed to retain Uproj conjugacy structure
D[dslc, dslc] = D[slc, slc].conj()
break
dstart += kk
start += k
Utgt = _np.dot(Utgt, D) # update Utgt
Utrans = _np.dot(Utgt, _np.linalg.inv(Uop))
assert(_np.linalg.norm(_np.imag(Utrans)) < 1e-7)
Utrans = Utrans.real # _np.real_if_close(Utrans, tol=1000)
if returnAll:
return Utrans, Uop, Utgt, evals_tgt
else:
return Utrans
evals_tgt, Utgt = _np.linalg.eig(target_gate_mx)
evals_gate, Uop = _np.linalg.eig(gate_mx)
#_, pairs = _mt.minweight_match(evals_tgt, evals_gate, return_pairs=True)
pairs = _mt.minweight_match_realmxeigs(evals_tgt, evals_gate)
#Form eigenspaces of Utgt
eigenspace = {} # key = index of target eigenval, val = assoc. eigenspace
for i, ev in enumerate(evals_tgt):
for j in eigenspace:
if _np.isclose(ev, evals_tgt[j]): # then add evector[i] to this eigenspace
eigenspace[j].append(Utgt[:, i])
eigenspace[i] = eigenspace[j] # reference!
break
else:
eigenspace[i] = [Utgt[:, i]] # new list = new eigenspace
#Project each eigenvector (col of Uop) onto space of cols
evectors = {} # key = index of gate eigenval, val = assoc. (projected) eigenvec
for ipair, (i, j) in enumerate(pairs):
#print("processing pair (i,j) = ",i,j)
if j in evectors: continue # we already processed this one!
# non-orthog projection:
# v = E * coeffs s.t. |E*coeffs-v|^2 is minimal (E is not square so can't invert)
# --> E.dag * v = E.dag * E * coeffs
# --> inv(E.dag * E) * E.dag * v = coeffs
# E*coeffs = E * inv(E.dag * E) * E.dag * v
E = _np.array(eigenspace[i]).T; Edag = E.T.conjugate()
coeffs = _np.dot(_np.dot(_np.linalg.inv(_np.dot(Edag, E)), Edag), Uop[:, j])
evectors[j] = _np.dot(E, coeffs)
#check for conjugate pair
#DB: print("Looking for conjugate:")
for i2, j2 in pairs[ipair + 1:]:
if abs(evals_gate[j].imag) > 1e-6 and _np.isclose(evals_gate[j], _np.conjugate(evals_gate[j2])) \
and _np.allclose(Uop[:, j], Uop[:, j2].conj()):
#DB: print("Found conjugate at j = ",j2)
evectors[j2] = _np.conjugate(evectors[j])
# x = _np.linalg.solve(_np.dot(Edag, E), _np.dot(Edag, evectors[j2]))
#assert(_np.isclose(_np.linalg.norm(x),_np.linalg.norm(coeffs))) ??
#check that this vector is in the span of eigenspace[i2]?
#build new "Utgt" using specially chosen linear combos of degenerate-eigenvecs
Uproj = _np.array([evectors[i] for i in range(Utgt.shape[1])]).T
assert(_np.allclose(_np.dot(Uproj, _np.dot(_np.diag(evals_tgt), _np.linalg.inv(Uproj))), target_gate_mx))
#This is how you get the eigenspace-projected gate:
# epgate = _np.dot(Uproj, _np.dot(_np.diag(evals_gate), Uproj_inv))
# epgate = _np.real_if_close(epgate, tol=1000)
# G = Uop * evals_gate * Uop_inv => eval_gate = Uop_inv * G * Uop
# epgate = Uproj * evals_gate * Uproj_inv (eigenspace-projected gate)
# so epgate = (Uproj Uop_inv) G (Uproj Uop_inv)_inv => (Uproj Uop_inv) is
# a "best_gauge_transform" for G, i.e. it makes G codiagonal with G_tgt
Ubest = _np.dot(Uproj, _np.linalg.inv(Uop))
assert(_np.linalg.norm(_np.imag(Ubest)) < 1e-7)
# this should never happen & indicates an uncaught failure in
# minweight_match_realmxeigs(...)
Ubest = Ubest.real
if returnAll:
return Ubest, Uop, Uproj, evals_tgt
else:
return Ubest
def project_to_target_eigenspace(model, targetModel, EPS=1e-6):
"""
Project each gate of `model` onto the eigenspace of the corresponding
gate within `targetModel`. Return the resulting `Model`.
Parameters
----------
model, targetModel : Model
The model being projected and the model specifying the "target"
eigen-spaces, respectively.
EPS : float, optional
Small magnitude specifying how much to "nudge" the target gates
before eigen-decomposing them, so that their spectra will have the
same conjugacy structure as the gates of `model`.
Returns
-------
Model
"""
ret = targetModel.copy()
ret.set_all_parameterizations("full") # so we can freely assign gates new values
for gl, gate in model.operations.items():
tgt_gate = targetModel.operations[gl]
#Essentially, we want to replace the eigenvalues of `tgt_gate`
# (and *only* the eigenvalues) with those of `gate`. This is what
# a "best gate gauge transform does" (by definition)
gate_mx = gate.todense()
Ugauge = get_a_best_case_gauge_transform(gate_mx, tgt_gate.todense())
Ugauge_inv = _np.linalg.inv(Ugauge)
epgate = _np.dot(Ugauge, _np.dot(gate_mx, Ugauge_inv))
ret.operations[gl] = epgate
return ret
def unitary_to_pauligate(U):
"""
Get the linear operator on (vectorized) density
matrices corresponding to a n-qubit unitary
operator on states.
Parameters
----------
U : numpy array
A dxd array giving the action of the unitary
on a state in the sigma-z basis.
where d = 2 ** n-qubits
Returns
-------
numpy array
The operator on density matrices that have been
vectorized as d**2 vectors in the Pauli basis.
"""
assert U.shape[0] == U.shape[1], '"Unitary" matrix is not square'
return _bt.change_basis(unitary_to_process_mx(U), 'std', 'pp')
def is_valid_lindblad_paramtype(typ):
"""
Whether `typ` is a recognized Lindblad-gate parameterization type.
A *Lindblad type* is comprised of a parameter specification followed
optionally by an evolution-type suffix. The parameter spec can be
"GLND" (general unconstrained Lindbladian), "CPTP" (cptp-constrained),
or any/all of the letters "H" (Hamiltonian), "S" (Stochastic, CPTP),
"s" (Stochastic), "A" (Affine), "D" (Depolarization, CPTP),
"d" (Depolarization) joined with plus (+) signs. Note that "H"
cannot appear alone, and that "A" cannot appear without one of
{"S","s","D","d"}. The suffix can be non-existent (density-matrix),
"terms" (state-vector terms) or "clifford terms" (stabilizer-state
terms). For example, valid Lindblad types are "H+S", "H+d+A",
"CPTP clifford terms", or "S+A terms".
Returns
-------
bool
"""
try:
baseTyp, _ = split_lindblad_paramtype(typ)
except ValueError:
return False # if can't even split `typ`
return baseTyp in ("CPTP", "H+S", "S", "H+S+A", "S+A", "H+D", "D", "H+D+A", "D+A",
"GLND", "H+s", "s", "H+s+A", "s+A", "H+d", "d", "H+d+A", "d+A")
def split_lindblad_paramtype(typ):
"""
Splits a Lindblad-gate parameteriation type into
a base-type (e.g. "H+S") and an evolution-type
string.
Parameters
----------
typ : str
The parameterization type, e.g. "H+S terms".
Returns
-------
base_type : str
The "base-parameterization" part of `typ`.
evotype : str
The evolution type corresponding to `typ`.
"""
bTyp = typ.split()[0] # "base" type
evostr = " ".join(typ.split()[1:])
if evostr == "": evotype = "densitymx"
elif evostr == "terms": evotype = "svterm"
elif evostr == "clifford terms": evotype = "cterm"
else: raise ValueError("Unrecognized evotype in `typ`=%s" % typ)
return bTyp, evotype
def eLabelToOutcome(povm_and_effect_lbl):
"""TODO: Docstring """
# Helper fn: POVM_ELbl:sslbls -> Elbl mapping
if povm_and_effect_lbl is None:
return "NONE" # Dummy label for placeholding
else:
if isinstance(povm_and_effect_lbl, _Label):
last_underscore = povm_and_effect_lbl.name.rindex('_')
effect_lbl = povm_and_effect_lbl.name[last_underscore + 1:]
else:
last_underscore = povm_and_effect_lbl.rindex('_')
effect_lbl = povm_and_effect_lbl[last_underscore + 1:]
return effect_lbl # effect label alone *is* the outcome
def eLabelToPOVM(povm_and_effect_lbl):
"""TODO: Docstring """
# Helper fn: POVM_ELbl:sslbls -> POVM mapping
if povm_and_effect_lbl is None:
return "NONE" # Dummy label for placeholding
else:
if isinstance(povm_and_effect_lbl, _Label):
last_underscore = povm_and_effect_lbl.name.rindex('_')
povm_name = povm_and_effect_lbl.name[:last_underscore]
else:
last_underscore = povm_and_effect_lbl.rindex('_')
povm_name = povm_and_effect_lbl[:last_underscore]
return povm_name
| [
"numpy.sqrt",
"cvxpy.trace",
"numpy.array",
"numpy.isreal",
"numpy.linalg.norm",
"numpy.imag",
"scipy.linalg.logm",
"numpy.diag_indices",
"numpy.conjugate",
"numpy.asarray",
"numpy.tensordot",
"numpy.take",
"numpy.real",
"scipy.linalg.expm",
"numpy.dot",
"numpy.empty",
"numpy.concate... | [((1418, 1440), 'numpy.zeros', '_np.zeros', (['(N, N)', '"""d"""'], {}), "((N, N), 'd')\n", (1427, 1440), True, 'import numpy as _np\n'), ((1701, 1726), 'scipy.linalg.sqrtm', '_spl.sqrtm', (['A'], {'disp': '(False)'}), '(A, disp=False)\n', (1711, 1726), True, 'import scipy.linalg as _spl\n'), ((2617, 2634), 'numpy.linalg.eig', '_np.linalg.eig', (['A'], {}), '(A)\n', (2631, 2634), True, 'import numpy as _np\n'), ((2989, 3006), 'numpy.linalg.eig', '_np.linalg.eig', (['B'], {}), '(B)\n', (3003, 3006), True, 'import numpy as _np\n'), ((11070, 11097), 'numpy.identity', '_np.identity', (['smallDim', '"""d"""'], {}), "(smallDim, 'd')\n", (11082, 11097), True, 'import numpy as _np\n'), ((11822, 11860), 'cvxpy.Problem', '_cvxpy.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (11836, 11860), True, 'import cvxpy as _cvxpy\n'), ((22011, 22031), 'numpy.linalg.eig', '_np.linalg.eig', (['choi'], {}), '(choi)\n', (22025, 22031), True, 'import numpy as _np\n'), ((22123, 22165), 'numpy.argmax', '_np.argmax', (['[ev.real for ev in choi_evals]'], {}), '([ev.real for ev in choi_evals])\n', (22133, 22165), True, 'import numpy as _np\n'), ((25436, 25462), 'numpy.zeros', '_np.zeros', (['(model.dim, nV)'], {}), '((model.dim, nV))\n', (25445, 25462), True, 'import numpy as _np\n'), ((37303, 37321), 'numpy.linalg.eig', '_np.linalg.eig', (['dm'], {}), '(dm)\n', (37317, 37321), True, 'import numpy as _np\n'), ((38930, 38958), 'numpy.empty', '_np.empty', (['(d, d)', '"""complex"""'], {}), "((d, d), 'complex')\n", (38939, 38958), True, 'import numpy as _np\n'), ((41507, 41531), 'numpy.zeros', '_np.zeros', (['(d2, d2)', '"""d"""'], {}), "((d2, d2), 'd')\n", (41516, 41531), True, 'import numpy as _np\n'), ((41893, 41910), 'scipy.linalg.logm', '_spl.logm', (['errgen'], {}), '(errgen)\n', (41902, 41910), True, 'import scipy.linalg as _spl\n'), ((45848, 45864), 'numpy.real', '_np.real', (['errgen'], {}), '(errgen)\n', (45856, 45864), True, 'import numpy as _np\n'), ((50248, 50270), 'numpy.isclose', '_np.isclose', (['(d * d)', 'd2'], {}), '(d * d, d2)\n', (50259, 50270), True, 'import numpy as _np\n'), ((54600, 54622), 'numpy.isclose', '_np.isclose', (['(d * d)', 'd2'], {}), '(d * d, d2)\n', (54611, 54622), True, 'import numpy as _np\n'), ((70582, 70607), 'scipy.sparse.issparse', '_sps.issparse', (['errgen_std'], {}), '(errgen_std)\n', (70595, 70607), True, 'import scipy.sparse as _sps\n'), ((72104, 72126), 'numpy.isclose', '_np.isclose', (['(d * d)', 'd2'], {}), '(d * d, d2)\n', (72115, 72126), True, 'import numpy as _np\n'), ((81007, 81033), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (81031, 81033), True, 'import collections as _collections\n'), ((81050, 81076), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (81074, 81076), True, 'import collections as _collections\n'), ((101160, 101206), 'numpy.concatenate', '_np.concatenate', (['(hamParams, otherParams.flat)'], {}), '((hamParams, otherParams.flat))\n', (101175, 101206), True, 'import numpy as _np\n'), ((109526, 109554), 'numpy.zeros', '_np.zeros', (['(d, d)', '"""complex"""'], {}), "((d, d), 'complex')\n", (109535, 109554), True, 'import numpy as _np\n'), ((109647, 109668), 'scipy.linalg.expm', '_spl.expm', (['(-1.0j * ex)'], {}), '(-1.0j * ex)\n', (109656, 109668), True, 'import scipy.linalg as _spl\n'), ((126837, 126867), 'numpy.linalg.eig', '_np.linalg.eig', (['target_gate_mx'], {}), '(target_gate_mx)\n', (126851, 126867), True, 'import numpy as _np\n'), ((126890, 126913), 'numpy.linalg.eig', '_np.linalg.eig', (['gate_mx'], {}), '(gate_mx)\n', (126904, 126913), True, 'import numpy as _np\n'), ((1840, 1855), 'numpy.isnan', '_np.isnan', (['sqrt'], {}), '(sqrt)\n', (1849, 1855), True, 'import numpy as _np\n'), ((1936, 1953), 'numpy.linalg.eig', '_np.linalg.eig', (['A'], {}), '(A)\n', (1950, 1953), True, 'import numpy as _np\n'), ((2780, 2797), 'numpy.argmax', '_np.argmax', (['evals'], {}), '(evals)\n', (2790, 2797), True, 'import numpy as _np\n'), ((3153, 3170), 'numpy.argmax', '_np.argmax', (['evals'], {}), '(evals)\n', (3163, 3170), True, 'import numpy as _np\n'), ((3716, 3737), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['A'], {}), '(A)\n', (3734, 3737), True, 'import numpy as _np\n'), ((3746, 3896), 'warnings.warn', '_warnings.warn', (["('sqrtm(A) failure when computing fidelity - beware result. Maybe due to rank defficiency - eigenvalues of A are: %s'\n % evals)"], {}), "(\n 'sqrtm(A) failure when computing fidelity - beware result. Maybe due to rank defficiency - eigenvalues of A are: %s'\n % evals)\n", (3760, 3896), True, 'import warnings as _warnings\n'), ((8489, 8502), 'numpy.sqrt', '_np.sqrt', (['dim'], {}), '(dim)\n', (8497, 8502), True, 'import numpy as _np\n'), ((10304, 10329), 'cvxpy.Variable', '_cvxpy.Variable', (['dim', 'dim'], {}), '(dim, dim)\n', (10319, 10329), True, 'import cvxpy as _cvxpy\n'), ((10352, 10377), 'cvxpy.Variable', '_cvxpy.Variable', (['dim', 'dim'], {}), '(dim, dim)\n', (10367, 10377), True, 'import cvxpy as _cvxpy\n'), ((10404, 10439), 'cvxpy.Variable', '_cvxpy.Variable', (['smallDim', 'smallDim'], {}), '(smallDim, smallDim)\n', (10419, 10439), True, 'import cvxpy as _cvxpy\n'), ((10468, 10503), 'cvxpy.Variable', '_cvxpy.Variable', (['smallDim', 'smallDim'], {}), '(smallDim, smallDim)\n', (10483, 10503), True, 'import cvxpy as _cvxpy\n'), ((10532, 10567), 'cvxpy.Variable', '_cvxpy.Variable', (['smallDim', 'smallDim'], {}), '(smallDim, smallDim)\n', (10547, 10567), True, 'import cvxpy as _cvxpy\n'), ((10596, 10631), 'cvxpy.Variable', '_cvxpy.Variable', (['smallDim', 'smallDim'], {}), '(smallDim, smallDim)\n', (10611, 10631), True, 'import cvxpy as _cvxpy\n'), ((10668, 10701), 'cvxpy.Variable', '_cvxpy.Variable', ([], {'shape': '(dim, dim)'}), '(shape=(dim, dim))\n', (10683, 10701), True, 'import cvxpy as _cvxpy\n'), ((10724, 10757), 'cvxpy.Variable', '_cvxpy.Variable', ([], {'shape': '(dim, dim)'}), '(shape=(dim, dim))\n', (10739, 10757), True, 'import cvxpy as _cvxpy\n'), ((10784, 10827), 'cvxpy.Variable', '_cvxpy.Variable', ([], {'shape': '(smallDim, smallDim)'}), '(shape=(smallDim, smallDim))\n', (10799, 10827), True, 'import cvxpy as _cvxpy\n'), ((10856, 10899), 'cvxpy.Variable', '_cvxpy.Variable', ([], {'shape': '(smallDim, smallDim)'}), '(shape=(smallDim, smallDim))\n', (10871, 10899), True, 'import cvxpy as _cvxpy\n'), ((10928, 10971), 'cvxpy.Variable', '_cvxpy.Variable', ([], {'shape': '(smallDim, smallDim)'}), '(shape=(smallDim, smallDim))\n', (10943, 10971), True, 'import cvxpy as _cvxpy\n'), ((11000, 11043), 'cvxpy.Variable', '_cvxpy.Variable', ([], {'shape': '(smallDim, smallDim)'}), '(shape=(smallDim, smallDim))\n', (11015, 11043), True, 'import cvxpy as _cvxpy\n'), ((11131, 11162), 'cvxpy.trace', '_cvxpy.trace', (['(K.T * Y + L.T * Z)'], {}), '(K.T * Y + L.T * Z)\n', (11143, 11162), True, 'import cvxpy as _cvxpy\n'), ((20014, 20029), 'numpy.sum', '_np.sum', (['I_list'], {}), '(I_list)\n', (20021, 20029), True, 'import numpy as _np\n'), ((21165, 21185), 'numpy.identity', '_np.identity', (['d', '"""d"""'], {}), "(d, 'd')\n", (21177, 21185), True, 'import numpy as _np\n'), ((22746, 22761), 'numpy.isnan', '_np.isnan', (['maxF'], {}), '(maxF)\n', (22755, 22761), True, 'import numpy as _np\n'), ((25318, 25354), 'numpy.concatenate', '_np.concatenate', (['povmVectors'], {'axis': '(1)'}), '(povmVectors, axis=1)\n', (25333, 25354), True, 'import numpy as _np\n'), ((25761, 25798), 'numpy.dot', '_np.dot', (['Sk_embedding_in_std', 'povm_mx'], {}), '(Sk_embedding_in_std, povm_mx)\n', (25768, 25798), True, 'import numpy as _np\n'), ((28686, 28710), 'numpy.asarray', '_np.asarray', (['operationMx'], {}), '(operationMx)\n', (28697, 28710), True, 'import numpy as _np\n'), ((30769, 30799), 'numpy.log2', '_np.log2', (['operationMx.shape[0]'], {}), '(operationMx.shape[0])\n', (30777, 30799), True, 'import numpy as _np\n'), ((36610, 36630), 'numpy.conjugate', '_np.conjugate', (['psi.T'], {}), '(psi.T)\n', (36623, 36630), True, 'import numpy as _np\n'), ((37612, 37630), 'numpy.sqrt', '_np.sqrt', (['evals[k]'], {}), '(evals[k])\n', (37620, 37630), True, 'import numpy as _np\n'), ((38290, 38306), 'numpy.conjugate', '_np.conjugate', (['U'], {}), '(U)\n', (38303, 38306), True, 'import numpy as _np\n'), ((39005, 39027), 'numpy.zeros', '_np.zeros', (['(d, d)', '"""d"""'], {}), "((d, d), 'd')\n", (39014, 39027), True, 'import numpy as _np\n'), ((41649, 41668), 'numpy.isclose', '_np.isclose', (['b', '(0.0)'], {}), '(b, 0.0)\n', (41660, 41668), True, 'import numpy as _np\n'), ((41852, 41872), 'numpy.diag_indices', '_np.diag_indices', (['d2'], {}), '(d2)\n', (41868, 41872), True, 'import numpy as _np\n'), ((47751, 47763), 'numpy.sqrt', '_np.sqrt', (['d2'], {}), '(d2)\n', (47759, 47763), True, 'import numpy as _np\n'), ((50030, 50042), 'numpy.sqrt', '_np.sqrt', (['d2'], {}), '(d2)\n', (50038, 50042), True, 'import numpy as _np\n'), ((50922, 50958), 'numpy.linalg.norm', '_np.linalg.norm', (['lindbladMxs[i].flat'], {}), '(lindbladMxs[i].flat)\n', (50937, 50958), True, 'import numpy as _np\n'), ((54218, 54230), 'numpy.sqrt', '_np.sqrt', (['d2'], {}), '(d2)\n', (54226, 54230), True, 'import numpy as _np\n'), ((61733, 61758), 'scipy.sparse.issparse', '_sps.issparse', (['ham_mxs[0]'], {}), '(ham_mxs[0])\n', (61746, 61758), True, 'import scipy.sparse as _sps\n'), ((70962, 70974), 'numpy.sqrt', '_np.sqrt', (['d2'], {}), '(d2)\n', (70970, 70974), True, 'import numpy as _np\n'), ((71966, 71995), 'scipy.sparse.issparse', '_sps.issparse', (['hamBasisMxs[0]'], {}), '(hamBasisMxs[0])\n', (71979, 71995), True, 'import scipy.sparse as _sps\n'), ((80455, 80472), 'numpy.empty', '_np.empty', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (80464, 80472), True, 'import numpy as _np\n'), ((90881, 90910), 'numpy.zeros', '_np.zeros', (['(bsH - 1)', '"""complex"""'], {}), "(bsH - 1, 'complex')\n", (90890, 90910), True, 'import numpy as _np\n'), ((95601, 95618), 'numpy.empty', '_np.empty', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (95610, 95618), True, 'import numpy as _np\n'), ((101002, 101019), 'numpy.empty', '_np.empty', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (101011, 101019), True, 'import numpy as _np\n'), ((101036, 101063), 'numpy.iscomplexobj', '_np.iscomplexobj', (['hamParams'], {}), '(hamParams)\n', (101052, 101063), True, 'import numpy as _np\n'), ((101105, 101134), 'numpy.iscomplexobj', '_np.iscomplexobj', (['otherParams'], {}), '(otherParams)\n', (101121, 101134), True, 'import numpy as _np\n'), ((119300, 119329), 'numpy.linalg.norm', '_np.linalg.norm', (['gate_mx.imag'], {}), '(gate_mx.imag)\n', (119315, 119329), True, 'import numpy as _np\n'), ((119349, 119385), 'numpy.linalg.norm', '_np.linalg.norm', (['target_gate_mx.imag'], {}), '(target_gate_mx.imag)\n', (119364, 119385), True, 'import numpy as _np\n'), ((124810, 124840), 'numpy.linalg.eig', '_np.linalg.eig', (['target_gate_mx'], {}), '(target_gate_mx)\n', (124824, 124840), True, 'import numpy as _np\n'), ((124867, 124890), 'numpy.linalg.eig', '_np.linalg.eig', (['gate_mx'], {}), '(gate_mx)\n', (124881, 124890), True, 'import numpy as _np\n'), ((124985, 125028), 'numpy.array', '_np.array', (['[evals_tgt[j] for _, j in pairs]'], {}), '([evals_tgt[j] for _, j in pairs])\n', (124994, 125028), True, 'import numpy as _np\n'), ((125523, 125559), 'numpy.allclose', '_np.allclose', (['evals_tgt', 'evals_tilde'], {}), '(evals_tgt, evals_tilde)\n', (125535, 125559), True, 'import numpy as _np\n'), ((126493, 126509), 'numpy.dot', '_np.dot', (['Utgt', 'D'], {}), '(Utgt, D)\n', (126500, 126509), True, 'import numpy as _np\n'), ((128289, 128307), 'numpy.dot', '_np.dot', (['E', 'coeffs'], {}), '(E, coeffs)\n', (128296, 128307), True, 'import numpy as _np\n'), ((129735, 129754), 'numpy.linalg.inv', '_np.linalg.inv', (['Uop'], {}), '(Uop)\n', (129749, 129754), True, 'import numpy as _np\n'), ((131233, 131255), 'numpy.linalg.inv', '_np.linalg.inv', (['Ugauge'], {}), '(Ugauge)\n', (131247, 131255), True, 'import numpy as _np\n'), ((5642, 5677), 'numpy.linalg.svd', '_np.linalg.svd', (['A'], {'compute_uv': '(False)'}), '(A, compute_uv=False)\n', (5656, 5677), True, 'import numpy as _np\n'), ((11496, 11538), 'cvxpy.bmat', '_cvxpy.bmat', (['[[sig0, -tau0], [tau0, sig0]]'], {}), '([[sig0, -tau0], [tau0, sig0]])\n', (11507, 11538), True, 'import cvxpy as _cvxpy\n'), ((11574, 11616), 'cvxpy.bmat', '_cvxpy.bmat', (['[[sig1, -tau1], [tau1, sig1]]'], {}), '([[sig1, -tau1], [tau1, sig1]])\n', (11585, 11616), True, 'import cvxpy as _cvxpy\n'), ((11750, 11768), 'cvxpy.trace', '_cvxpy.trace', (['sig0'], {}), '(sig0)\n', (11762, 11768), True, 'import cvxpy as _cvxpy\n'), ((11784, 11802), 'cvxpy.trace', '_cvxpy.trace', (['sig1'], {}), '(sig1)\n', (11796, 11802), True, 'import cvxpy as _cvxpy\n'), ((12196, 12268), 'warnings.warn', '_warnings.warn', (['"""CVXOPT failed (uknown err) - diamonddist returning -2!"""'], {}), "('CVXOPT failed (uknown err) - diamonddist returning -2!')\n", (12210, 12268), True, 'import warnings as _warnings\n'), ((14222, 14247), 'numpy.isclose', '_np.isclose', (['x[0, 0]', '(1.0)'], {}), '(x[0, 0], 1.0)\n', (14233, 14247), True, 'import numpy as _np\n'), ((14354, 14375), 'numpy.identity', '_np.identity', (['d2', '"""d"""'], {}), "(d2, 'd')\n", (14366, 14375), True, 'import numpy as _np\n'), ((15615, 15635), 'numpy.sqrt', '_np.sqrt', (['A.shape[0]'], {}), '(A.shape[0])\n', (15623, 15635), True, 'import numpy as _np\n'), ((21053, 21073), 'numpy.sqrt', '_np.sqrt', (['A.shape[0]'], {}), '(A.shape[0])\n', (21061, 21073), True, 'import numpy as _np\n'), ((22569, 22594), 'numpy.conjugate', '_np.conjugate', (['closestVec'], {}), '(closestVec)\n', (22582, 22594), True, 'import numpy as _np\n'), ((32555, 32600), 'numpy.take', '_np.take', (['op_evecs', 'unit_eval_indices'], {'axis': '(1)'}), '(op_evecs, unit_eval_indices, axis=1)\n', (32563, 32600), True, 'import numpy as _np\n'), ((32617, 32653), 'numpy.array', '_np.array', (['[[1], [0], [0], [0]]', '"""d"""'], {}), "([[1], [0], [0], [0]], 'd')\n", (32626, 32653), True, 'import numpy as _np\n'), ((32777, 32790), 'numpy.dot', '_np.dot', (['A', 'x'], {}), '(A, x)\n', (32784, 32790), True, 'import numpy as _np\n'), ((37238, 37250), 'numpy.sqrt', '_np.sqrt', (['d2'], {}), '(d2)\n', (37246, 37250), True, 'import numpy as _np\n'), ((38907, 38919), 'numpy.sqrt', '_np.sqrt', (['d2'], {}), '(d2)\n', (38915, 38919), True, 'import numpy as _np\n'), ((39209, 39231), 'numpy.zeros', '_np.zeros', (['(d, d)', '"""d"""'], {}), "((d, d), 'd')\n", (39218, 39231), True, 'import numpy as _np\n'), ((39397, 39414), 'numpy.dot', '_np.dot', (['UijU', 'Uj'], {}), '(UijU, Uj)\n', (39404, 39414), True, 'import numpy as _np\n'), ((39883, 39903), 'numpy.linalg.eig', '_np.linalg.eig', (['UiiU'], {}), '(UiiU)\n', (39897, 39903), True, 'import numpy as _np\n'), ((40070, 40089), 'numpy.linalg.norm', '_np.linalg.norm', (['Ui'], {}), '(Ui)\n', (40085, 40089), True, 'import numpy as _np\n'), ((41685, 41702), 'numpy.isclose', '_np.isclose', (['a', 'b'], {}), '(a, b)\n', (41696, 41702), True, 'import numpy as _np\n'), ((43526, 43547), 'numpy.real', '_np.real', (['(logG - logT)'], {}), '(logG - logT)\n', (43534, 43547), True, 'import numpy as _np\n'), ((43969, 43988), 'scipy.linalg.inv', '_spl.inv', (['target_op'], {}), '(target_op)\n', (43977, 43988), True, 'import scipy.linalg as _spl\n'), ((45621, 45637), 'numpy.imag', '_np.imag', (['errgen'], {}), '(errgen)\n', (45629, 45637), True, 'import numpy as _np\n'), ((50974, 50994), 'numpy.isclose', '_np.isclose', (['norm', '(0)'], {}), '(norm, 0)\n', (50985, 50994), True, 'import numpy as _np\n'), ((55686, 55702), 'numpy.isreal', '_np.isreal', (['proj'], {}), '(proj)\n', (55696, 55702), True, 'import numpy as _np\n'), ((61835, 61862), 'scipy.sparse.issparse', '_sps.issparse', (['other_mxs[0]'], {}), '(other_mxs[0])\n', (61848, 61862), True, 'import scipy.sparse as _sps\n'), ((62046, 62074), 'scipy.sparse.identity', '_sps.identity', (['d', '"""d"""', '"""csr"""'], {}), "(d, 'd', 'csr')\n", (62059, 62074), True, 'import scipy.sparse as _sps\n'), ((62514, 62558), 'numpy.empty', '_np.empty', (['(ham_nMxs - 1, d2, d2)', '"""complex"""'], {}), "((ham_nMxs - 1, d2, d2), 'complex')\n", (62523, 62558), True, 'import numpy as _np\n'), ((72023, 72054), 'scipy.sparse.issparse', '_sps.issparse', (['otherBasisMxs[0]'], {}), '(otherBasisMxs[0])\n', (72036, 72054), True, 'import scipy.sparse as _sps\n'), ((80521, 80538), 'numpy.empty', '_np.empty', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (80530, 80538), True, 'import numpy as _np\n'), ((80586, 80608), 'numpy.empty', '_np.empty', (['(0, 0)', '"""d"""'], {}), "((0, 0), 'd')\n", (80595, 80608), True, 'import numpy as _np\n'), ((91083, 91112), 'numpy.zeros', '_np.zeros', (['(bsO - 1)', '"""complex"""'], {}), "(bsO - 1, 'complex')\n", (91092, 91112), True, 'import numpy as _np\n'), ((95428, 95458), 'numpy.linalg.norm', '_np.linalg.norm', (['hamProjs.imag'], {}), '(hamProjs.imag)\n', (95443, 95458), True, 'import numpy as _np\n'), ((104151, 104191), 'numpy.zeros', '_np.zeros', (['(bsO - 1, bsO - 1)', '"""complex"""'], {}), "((bsO - 1, bsO - 1), 'complex')\n", (104160, 104191), True, 'import numpy as _np\n'), ((109627, 109638), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (109635, 109638), True, 'import numpy as _np\n'), ((112853, 112892), 'numpy.tensordot', '_np.tensordot', (['hamProj', 'hamGens', '(0, 0)'], {}), '(hamProj, hamGens, (0, 0))\n', (112866, 112892), True, 'import numpy as _np\n'), ((113253, 113292), 'numpy.tensordot', '_np.tensordot', (['stoProj', 'stoGens', '(0, 0)'], {}), '(stoProj, stoGens, (0, 0))\n', (113266, 113292), True, 'import numpy as _np\n'), ((115032, 115053), 'numpy.linalg.eig', '_np.linalg.eig', (['OProj'], {}), '(OProj)\n', (115046, 115053), True, 'import numpy as _np\n'), ((119526, 119544), 'numpy.linalg.eig', '_np.linalg.eig', (['mx'], {}), '(mx)\n', (119540, 119544), True, 'import numpy as _np\n'), ((125757, 125777), 'numpy.linalg.inv', '_np.linalg.inv', (['Utgt'], {}), '(Utgt)\n', (125771, 125777), True, 'import numpy as _np\n'), ((126557, 126576), 'numpy.linalg.inv', '_np.linalg.inv', (['Uop'], {}), '(Uop)\n', (126571, 126576), True, 'import numpy as _np\n'), ((127252, 127281), 'numpy.isclose', '_np.isclose', (['ev', 'evals_tgt[j]'], {}), '(ev, evals_tgt[j])\n', (127263, 127281), True, 'import numpy as _np\n'), ((128131, 128155), 'numpy.array', '_np.array', (['eigenspace[i]'], {}), '(eigenspace[i])\n', (128140, 128155), True, 'import numpy as _np\n'), ((129783, 129798), 'numpy.imag', '_np.imag', (['Ubest'], {}), '(Ubest)\n', (129791, 129798), True, 'import numpy as _np\n'), ((131290, 131318), 'numpy.dot', '_np.dot', (['gate_mx', 'Ugauge_inv'], {}), '(gate_mx, Ugauge_inv)\n', (131297, 131318), True, 'import numpy as _np\n'), ((2012, 2029), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (2026, 2029), True, 'import numpy as _np\n'), ((3665, 3686), 'numpy.dot', '_np.dot', (['sqrtA', 'sqrtA'], {}), '(sqrtA, sqrtA)\n', (3672, 3686), True, 'import numpy as _np\n'), ((5413, 5431), 'numpy.conjugate', '_np.conjugate', (['A.T'], {}), '(A.T)\n', (5426, 5431), True, 'import numpy as _np\n'), ((5526, 5547), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['A'], {}), '(A)\n', (5544, 5547), True, 'import numpy as _np\n'), ((24848, 24866), 'numpy.sqrt', '_np.sqrt', (['comp.dim'], {}), '(comp.dim)\n', (24856, 24866), True, 'import numpy as _np\n'), ((32735, 32750), 'numpy.dot', '_np.dot', (['A.T', 'b'], {}), '(A.T, b)\n', (32742, 32750), True, 'import numpy as _np\n'), ((32919, 32929), 'numpy.abs', '_np.abs', (['x'], {}), '(x)\n', (32926, 32929), True, 'import numpy as _np\n'), ((33952, 33988), 'numpy.array', '_np.array', (['[[1], [0], [0], [0]]', '"""d"""'], {}), "([[1], [0], [0], [0]], 'd')\n", (33961, 33988), True, 'import numpy as _np\n'), ((34687, 34736), 'numpy.argmax', '_np.argmax', (['[op_evals[i] for i in indsToConsider]'], {}), '([op_evals[i] for i in indsToConsider])\n', (34697, 34736), True, 'import numpy as _np\n'), ((35886, 35917), 'numpy.angle', '_np.angle', (['op_evals[iConjPair1]'], {}), '(op_evals[iConjPair1])\n', (35895, 35917), True, 'import numpy as _np\n'), ((39078, 39112), 'numpy.dot', '_np.dot', (['superop', 'densitymx_i.flat'], {}), '(superop, densitymx_i.flat)\n', (39085, 39112), True, 'import numpy as _np\n'), ((39938, 39952), 'numpy.abs', '_np.abs', (['evals'], {}), '(evals)\n', (39945, 39952), True, 'import numpy as _np\n'), ((44421, 44449), 'numpy.linalg.norm', '_np.linalg.norm', (['errgen.imag'], {}), '(errgen.imag)\n', (44436, 44449), True, 'import numpy as _np\n'), ((44469, 44545), 'warnings.warn', '_warnings.warn', (['"""Falling back to approximate log for logTiG error generator"""'], {}), "('Falling back to approximate log for logTiG error generator')\n", (44483, 44545), True, 'import warnings as _warnings\n'), ((44760, 44779), 'scipy.linalg.inv', '_spl.inv', (['target_op'], {}), '(target_op)\n', (44768, 44779), True, 'import scipy.linalg as _spl\n'), ((46668, 46688), 'scipy.linalg.logm', '_spl.logm', (['target_op'], {}), '(target_op)\n', (46677, 46688), True, 'import scipy.linalg as _spl\n'), ((46750, 46770), 'scipy.linalg.expm', '_spl.expm', (['error_gen'], {}), '(error_gen)\n', (46759, 46770), True, 'import scipy.linalg as _spl\n'), ((47838, 47849), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (47846, 47849), True, 'import numpy as _np\n'), ((51085, 51121), 'numpy.linalg.norm', '_np.linalg.norm', (['lindbladMxs[i].flat'], {}), '(lindbladMxs[i].flat)\n', (51100, 51121), True, 'import numpy as _np\n'), ((63368, 63414), 'numpy.empty', '_np.empty', (['(other_nMxs - 1, d2, d2)', '"""complex"""'], {}), "((other_nMxs - 1, d2, d2), 'complex')\n", (63377, 63414), True, 'import numpy as _np\n'), ((73312, 73328), 'numpy.dot', '_np.dot', (['Hdag', 'H'], {}), '(Hdag, H)\n', (73319, 73328), True, 'import numpy as _np\n'), ((73330, 73360), 'numpy.dot', '_np.dot', (['Hdag', 'errgen_std_flat'], {}), '(Hdag, errgen_std_flat)\n', (73337, 73360), True, 'import numpy as _np\n'), ((73799, 73822), 'numpy.zeros', '_np.zeros', (['(bsH - 1)', '"""d"""'], {}), "(bsH - 1, 'd')\n", (73808, 73822), True, 'import numpy as _np\n'), ((73941, 73964), 'scipy.sparse.issparse', '_sps.issparse', (['hamProjs'], {}), '(hamProjs)\n', (73954, 73964), True, 'import scipy.sparse as _sps\n'), ((74659, 74675), 'numpy.dot', '_np.dot', (['Odag', 'O'], {}), '(Odag, O)\n', (74666, 74675), True, 'import numpy as _np\n'), ((74677, 74707), 'numpy.dot', '_np.dot', (['Odag', 'errgen_std_flat'], {}), '(Odag, errgen_std_flat)\n', (74684, 74707), True, 'import numpy as _np\n'), ((76084, 76109), 'scipy.sparse.issparse', '_sps.issparse', (['otherProjs'], {}), '(otherProjs)\n', (76097, 76109), True, 'import scipy.sparse as _sps\n'), ((91180, 91214), 'numpy.zeros', '_np.zeros', (['(2, bsO - 1)', '"""complex"""'], {}), "((2, bsO - 1), 'complex')\n", (91189, 91214), True, 'import numpy as _np\n'), ((91254, 91294), 'numpy.zeros', '_np.zeros', (['(bsO - 1, bsO - 1)', '"""complex"""'], {}), "((bsO - 1, bsO - 1), 'complex')\n", (91263, 91294), True, 'import numpy as _np\n'), ((99011, 99045), 'numpy.empty', '_np.empty', (['(bsO - 1, bsO - 1)', '"""d"""'], {}), "((bsO - 1, bsO - 1), 'd')\n", (99020, 99045), True, 'import numpy as _np\n'), ((113895, 113930), 'numpy.tensordot', '_np.tensordot', (['HProj', 'HGens', '(0, 0)'], {}), '(HProj, HGens, (0, 0))\n', (113908, 113930), True, 'import numpy as _np\n'), ((113951, 113996), 'numpy.tensordot', '_np.tensordot', (['OProj', 'OGens', '((0, 1), (0, 1))'], {}), '(OProj, OGens, ((0, 1), (0, 1)))\n', (113964, 113996), True, 'import numpy as _np\n'), ((115434, 115469), 'numpy.tensordot', '_np.tensordot', (['HProj', 'HGens', '(0, 0)'], {}), '(HProj, HGens, (0, 0))\n', (115447, 115469), True, 'import numpy as _np\n'), ((115490, 115538), 'numpy.tensordot', '_np.tensordot', (['OProj_cp', 'OGens', '((0, 1), (0, 1))'], {}), '(OProj_cp, OGens, ((0, 1), (0, 1)))\n', (115503, 115538), True, 'import numpy as _np\n'), ((120581, 120604), 'numpy.conjugate', '_np.conjugate', (['evals[i]'], {}), '(evals[i])\n', (120594, 120604), True, 'import numpy as _np\n'), ((124418, 124438), 'numpy.array', '_np.array', (['std_evals'], {}), '(std_evals)\n', (124427, 124438), True, 'import numpy as _np\n'), ((125071, 125094), 'numpy.diag', '_np.diag', (['replace_evals'], {}), '(replace_evals)\n', (125079, 125094), True, 'import numpy as _np\n'), ((125096, 125115), 'numpy.linalg.inv', '_np.linalg.inv', (['Uop'], {}), '(Uop)\n', (125110, 125115), True, 'import numpy as _np\n'), ((126609, 126625), 'numpy.imag', '_np.imag', (['Utrans'], {}), '(Utrans)\n', (126617, 126625), True, 'import numpy as _np\n'), ((128690, 128716), 'numpy.conjugate', '_np.conjugate', (['evectors[j]'], {}), '(evectors[j])\n', (128703, 128716), True, 'import numpy as _np\n'), ((129161, 129180), 'numpy.diag', '_np.diag', (['evals_tgt'], {}), '(evals_tgt)\n', (129169, 129180), True, 'import numpy as _np\n'), ((129182, 129203), 'numpy.linalg.inv', '_np.linalg.inv', (['Uproj'], {}), '(Uproj)\n', (129196, 129203), True, 'import numpy as _np\n'), ((1997, 2009), 'numpy.sqrt', '_np.sqrt', (['ev'], {}), '(ev)\n', (2005, 2009), True, 'import numpy as _np\n'), ((2908, 2923), 'numpy.dot', '_np.dot', (['B', 'vec'], {}), '(B, vec)\n', (2915, 2923), True, 'import numpy as _np\n'), ((3281, 3296), 'numpy.dot', '_np.dot', (['A', 'vec'], {}), '(A, vec)\n', (3288, 3296), True, 'import numpy as _np\n'), ((7159, 7188), 'cvxpy.__version__.split', '_cvxpy.__version__.split', (['"""."""'], {}), "('.')\n", (7183, 7188), True, 'import cvxpy as _cvxpy\n'), ((12133, 12154), 'numpy.zeros', '_np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (12142, 12154), True, 'import numpy as _np\n'), ((12289, 12310), 'numpy.zeros', '_np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (12298, 12310), True, 'import numpy as _np\n'), ((14266, 14289), 'numpy.isclose', '_np.isclose', (['x[0, i]', '(0)'], {}), '(x[0, i], 0)\n', (14277, 14289), True, 'import numpy as _np\n'), ((21383, 21404), 'numpy.transpose', '_np.transpose', (['unital'], {}), '(unital)\n', (21396, 21404), True, 'import numpy as _np\n'), ((24948, 24967), 'numpy.sqrt', '_np.sqrt', (['model.dim'], {}), '(model.dim)\n', (24956, 24967), True, 'import numpy as _np\n'), ((32717, 32732), 'numpy.dot', '_np.dot', (['A.T', 'A'], {}), '(A.T, A)\n', (32724, 32732), True, 'import numpy as _np\n'), ((33572, 33603), 'numpy.linalg.norm', '_np.linalg.norm', (['op_evecs[:, i]'], {}), '(op_evecs[:, i])\n', (33587, 33603), True, 'import numpy as _np\n'), ((35158, 35190), 'numpy.conjugate', '_np.conjugate', (['op_evals[inds[1]]'], {}), '(op_evals[inds[1]])\n', (35171, 35190), True, 'import numpy as _np\n'), ((39287, 39322), 'numpy.dot', '_np.dot', (['superop', 'densitymx_ij.flat'], {}), '(superop, densitymx_ij.flat)\n', (39294, 39322), True, 'import numpy as _np\n'), ((43429, 43443), 'numpy.imag', '_np.imag', (['logG'], {}), '(logG)\n', (43437, 43443), True, 'import numpy as _np\n'), ((43484, 43498), 'numpy.imag', '_np.imag', (['logT'], {}), '(logT)\n', (43492, 43498), True, 'import numpy as _np\n'), ((44052, 44080), 'numpy.dot', '_np.dot', (['target_op_inv', 'gate'], {}), '(target_op_inv, gate)\n', (44059, 44080), True, 'import numpy as _np\n'), ((44182, 44301), 'warnings.warn', '_warnings.warn', (['"""Near-identity matrix log failed; falling back to approximate log for logTiG error generator"""'], {}), "(\n 'Near-identity matrix log failed; falling back to approximate log for logTiG error generator'\n )\n", (44196, 44301), True, 'import warnings as _warnings\n'), ((44594, 44622), 'numpy.dot', '_np.dot', (['target_op_inv', 'gate'], {}), '(target_op_inv, gate)\n', (44601, 44622), True, 'import numpy as _np\n'), ((44672, 44698), 'numpy.zeros', '_np.zeros', (['gate.shape', '"""d"""'], {}), "(gate.shape, 'd')\n", (44681, 44698), True, 'import numpy as _np\n'), ((45231, 45259), 'numpy.linalg.norm', '_np.linalg.norm', (['errgen.imag'], {}), '(errgen.imag)\n', (45246, 45259), True, 'import numpy as _np\n'), ((45279, 45355), 'warnings.warn', '_warnings.warn', (['"""Falling back to approximate log for logGTi error generator"""'], {}), "('Falling back to approximate log for logGTi error generator')\n", (45293, 45355), True, 'import warnings as _warnings\n'), ((46821, 46841), 'scipy.linalg.expm', '_spl.expm', (['error_gen'], {}), '(error_gen)\n', (46830, 46841), True, 'import scipy.linalg as _spl\n'), ((62849, 62869), 'numpy.isclose', '_np.isclose', (['norm', '(0)'], {}), '(norm, 0)\n', (62860, 62869), True, 'import numpy as _np\n'), ((64032, 64081), 'numpy.empty', '_np.empty', (['(2, other_nMxs - 1, d2, d2)', '"""complex"""'], {}), "((2, other_nMxs - 1, d2, d2), 'complex')\n", (64041, 64081), True, 'import numpy as _np\n'), ((64882, 64944), 'numpy.empty', '_np.empty', (['(other_nMxs - 1, other_nMxs - 1, d2, d2)', '"""complex"""'], {}), "((other_nMxs - 1, other_nMxs - 1, d2, d2), 'complex')\n", (64891, 64944), True, 'import numpy as _np\n'), ((73519, 73543), 'scipy.sparse.vstack', '_sps.vstack', (['rows', '"""csr"""'], {}), "(rows, 'csr')\n", (73530, 73543), True, 'import scipy.sparse as _sps\n'), ((75778, 75801), 'numpy.zeros', '_np.zeros', (['(bsO - 1)', '"""d"""'], {}), "(bsO - 1, 'd')\n", (75787, 75801), True, 'import numpy as _np\n'), ((95735, 95755), 'numpy.imag', '_np.imag', (['otherProjs'], {}), '(otherProjs)\n', (95743, 95755), True, 'import numpy as _np\n'), ((96808, 96833), 'numpy.sqrt', '_np.sqrt', (['otherProjs.real'], {}), '(otherProjs.real)\n', (96816, 96833), True, 'import numpy as _np\n'), ((99283, 99309), 'numpy.linalg.eig', '_np.linalg.eig', (['otherProjs'], {}), '(otherProjs)\n', (99297, 99309), True, 'import numpy as _np\n'), ((99331, 99348), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (99345, 99348), True, 'import numpy as _np\n'), ((104872, 104894), 'numpy.ones', '_np.ones', (['(bsO - 1)', '"""d"""'], {}), "(bsO - 1, 'd')\n", (104880, 104894), True, 'import numpy as _np\n'), ((105330, 105358), 'numpy.empty', '_np.empty', (['(2, bsO - 1)', '"""d"""'], {}), "((2, bsO - 1), 'd')\n", (105339, 105358), True, 'import numpy as _np\n'), ((107673, 107713), 'numpy.empty', '_np.empty', (['(bsO - 1, bsO - 1)', '"""complex"""'], {}), "((bsO - 1, bsO - 1), 'complex')\n", (107682, 107713), True, 'import numpy as _np\n'), ((115175, 115194), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (115183, 115194), True, 'import numpy as _np\n'), ((115196, 115213), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (115210, 115213), True, 'import numpy as _np\n'), ((124440, 124460), 'numpy.array', '_np.array', (['std_evecs'], {}), '(std_evecs)\n', (124449, 124460), True, 'import numpy as _np\n'), ((128230, 128246), 'numpy.dot', '_np.dot', (['Edag', 'E'], {}), '(Edag, E)\n', (128237, 128246), True, 'import numpy as _np\n'), ((128506, 128535), 'numpy.conjugate', '_np.conjugate', (['evals_gate[j2]'], {}), '(evals_gate[j2])\n', (128519, 128535), True, 'import numpy as _np\n'), ((2887, 2905), 'numpy.transpose', '_np.transpose', (['vec'], {}), '(vec)\n', (2900, 2905), True, 'import numpy as _np\n'), ((3260, 3278), 'numpy.transpose', '_np.transpose', (['vec'], {}), '(vec)\n', (3273, 3278), True, 'import numpy as _np\n'), ((3962, 3979), 'numpy.dot', '_np.dot', (['B', 'sqrtA'], {}), '(B, sqrtA)\n', (3969, 3979), True, 'import numpy as _np\n'), ((11206, 11230), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'sig0'], {}), '(ident, sig0)\n', (11217, 11230), True, 'import cvxpy as _cvxpy\n'), ((11281, 11305), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'sig1'], {}), '(ident, sig1)\n', (11292, 11305), True, 'import cvxpy as _cvxpy\n'), ((11349, 11373), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'tau0'], {}), '(ident, tau0)\n', (11360, 11373), True, 'import cvxpy as _cvxpy\n'), ((11378, 11402), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'sig0'], {}), '(ident, sig0)\n', (11389, 11402), True, 'import cvxpy as _cvxpy\n'), ((11423, 11447), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'tau1'], {}), '(ident, tau1)\n', (11434, 11447), True, 'import cvxpy as _cvxpy\n'), ((11454, 11478), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'sig1'], {}), '(ident, sig1)\n', (11465, 11478), True, 'import cvxpy as _cvxpy\n'), ((29428, 29446), 'numpy.conjugate', '_np.conjugate', (['ev2'], {}), '(ev2)\n', (29441, 29446), True, 'import numpy as _np\n'), ((31404, 31451), 'numpy.imag', '_np.imag', (['op_evals[conjpair_eval_indices[0][0]]'], {}), '(op_evals[conjpair_eval_indices[0][0]])\n', (31412, 31451), True, 'import numpy as _np\n'), ((31484, 31531), 'numpy.imag', '_np.imag', (['op_evals[conjpair_eval_indices[1][0]]'], {}), '(op_evals[conjpair_eval_indices[1][0]])\n', (31492, 31531), True, 'import numpy as _np\n'), ((31579, 31616), 'numpy.real', '_np.real', (['conjpair_eval_indices[0][0]'], {}), '(conjpair_eval_indices[0][0])\n', (31587, 31616), True, 'import numpy as _np\n'), ((31618, 31655), 'numpy.real', '_np.real', (['conjpair_eval_indices[1][0]'], {}), '(conjpair_eval_indices[1][0])\n', (31626, 31655), True, 'import numpy as _np\n'), ((31679, 31726), 'numpy.imag', '_np.imag', (['op_evals[conjpair_eval_indices[0][0]]'], {}), '(op_evals[conjpair_eval_indices[0][0]])\n', (31687, 31726), True, 'import numpy as _np\n'), ((44366, 44394), 'numpy.dot', '_np.dot', (['target_op_inv', 'gate'], {}), '(target_op_inv, gate)\n', (44373, 44394), True, 'import numpy as _np\n'), ((44843, 44871), 'numpy.dot', '_np.dot', (['gate', 'target_op_inv'], {}), '(gate, target_op_inv)\n', (44850, 44871), True, 'import numpy as _np\n'), ((45404, 45432), 'numpy.dot', '_np.dot', (['gate', 'target_op_inv'], {}), '(gate, target_op_inv)\n', (45411, 45432), True, 'import numpy as _np\n'), ((45482, 45508), 'numpy.zeros', '_np.zeros', (['gate.shape', '"""d"""'], {}), "(gate.shape, 'd')\n", (45491, 45508), True, 'import numpy as _np\n'), ((62344, 62355), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (62352, 62355), True, 'import numpy as _np\n'), ((63147, 63158), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (63155, 63158), True, 'import numpy as _np\n'), ((63712, 63732), 'numpy.isclose', '_np.isclose', (['norm', '(0)'], {}), '(norm, 0)\n', (63723, 63732), True, 'import numpy as _np\n'), ((75163, 75187), 'scipy.sparse.vstack', '_sps.vstack', (['rows', '"""csr"""'], {}), "(rows, 'csr')\n", (75174, 75187), True, 'import scipy.sparse as _sps\n'), ((75398, 75422), 'scipy.sparse.vstack', '_sps.vstack', (['rows', '"""csr"""'], {}), "(rows, 'csr')\n", (75409, 75422), True, 'import scipy.sparse as _sps\n'), ((75865, 75893), 'numpy.zeros', '_np.zeros', (['(2, bsO - 1)', '"""d"""'], {}), "((2, bsO - 1), 'd')\n", (75874, 75893), True, 'import numpy as _np\n'), ((75929, 75963), 'numpy.zeros', '_np.zeros', (['(bsO - 1, bsO - 1)', '"""d"""'], {}), "((bsO - 1, bsO - 1), 'd')\n", (75938, 75963), True, 'import numpy as _np\n'), ((96423, 96442), 'numpy.real', '_np.real', (['otherProj'], {}), '(otherProj)\n', (96431, 96442), True, 'import numpy as _np\n'), ((97110, 97130), 'numpy.imag', '_np.imag', (['otherProjs'], {}), '(otherProjs)\n', (97118, 97130), True, 'import numpy as _np\n'), ((98319, 98368), 'numpy.concatenate', '_np.concatenate', (['(diagParams, otherProjs[1].real)'], {}), '((diagParams, otherProjs[1].real))\n', (98334, 98368), True, 'import numpy as _np\n'), ((99678, 99709), 'numpy.linalg.cholesky', '_np.linalg.cholesky', (['otherProjs'], {}), '(otherProjs)\n', (99697, 99709), True, 'import numpy as _np\n'), ((122781, 122807), 'numpy.linalg.norm', '_np.linalg.norm', (['Usub.imag'], {}), '(Usub.imag)\n', (122796, 122807), True, 'import numpy as _np\n'), ((123423, 123444), 'numpy.dot', '_np.dot', (['Usub', 'combos'], {}), '(Usub, combos)\n', (123430, 123444), True, 'import numpy as _np\n'), ((11236, 11260), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'tau0'], {}), '(ident, tau0)\n', (11247, 11260), True, 'import cvxpy as _cvxpy\n'), ((11313, 11337), 'cvxpy.kron', '_cvxpy.kron', (['ident', 'tau1'], {}), '(ident, tau1)\n', (11324, 11337), True, 'import cvxpy as _cvxpy\n'), ((31769, 31816), 'numpy.imag', '_np.imag', (['op_evals[conjpair_eval_indices[1][0]]'], {}), '(op_evals[conjpair_eval_indices[1][0]])\n', (31777, 31816), True, 'import numpy as _np\n'), ((33241, 33277), 'numpy.vdot', '_np.vdot', (['fixedPtVec', 'op_evecs[:, i]'], {}), '(fixedPtVec, op_evecs[:, i])\n', (33249, 33277), True, 'import numpy as _np\n'), ((34065, 34100), 'numpy.linalg.norm', '_np.linalg.norm', (['(op_evecs[i] - idmx)'], {}), '(op_evecs[i] - idmx)\n', (34080, 34100), True, 'import numpy as _np\n'), ((45176, 45204), 'numpy.dot', '_np.dot', (['gate', 'target_op_inv'], {}), '(gate, target_op_inv)\n', (45183, 45204), True, 'import numpy as _np\n'), ((84189, 84223), 'scipy.sparse.identity', '_sps.identity', (['d', '"""complex"""', '"""csr"""'], {}), "(d, 'complex', 'csr')\n", (84202, 84223), True, 'import scipy.sparse as _sps\n'), ((84226, 84237), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (84234, 84237), True, 'import numpy as _np\n'), ((84271, 84297), 'numpy.identity', '_np.identity', (['d', '"""complex"""'], {}), "(d, 'complex')\n", (84283, 84297), True, 'import numpy as _np\n'), ((84300, 84311), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (84308, 84311), True, 'import numpy as _np\n'), ((96162, 96191), 'numpy.isclose', '_np.isclose', (['v', 'otherProjs[0]'], {}), '(v, otherProjs[0])\n', (96173, 96191), True, 'import numpy as _np\n'), ((99605, 99624), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (99613, 99624), True, 'import numpy as _np\n'), ((100108, 100139), 'numpy.linalg.cholesky', '_np.linalg.cholesky', (['otherProjs'], {}), '(otherProjs)\n', (100127, 100139), True, 'import numpy as _np\n'), ((122967, 123014), 'numpy.concatenate', '_np.concatenate', (['(Usub.imag, Usub.real)'], {'axis': '(1)'}), '((Usub.imag, Usub.real), axis=1)\n', (122982, 123014), True, 'import numpy as _np\n'), ((123476, 123519), 'numpy.linalg.norm', '_np.linalg.norm', (["U[:, info['indices']].imag"], {}), "(U[:, info['indices']].imag)\n", (123491, 123519), True, 'import numpy as _np\n'), ((33476, 33516), 'numpy.vdot', '_np.vdot', (['op_evecs[:, j]', 'op_evecs[:, i]'], {}), '(op_evecs[:, j], op_evecs[:, i])\n', (33484, 33516), True, 'import numpy as _np\n'), ((64508, 64528), 'numpy.isclose', '_np.isclose', (['norm', '(0)'], {}), '(norm, 0)\n', (64519, 64528), True, 'import numpy as _np\n'), ((65420, 65440), 'numpy.isclose', '_np.isclose', (['norm', '(0)'], {}), '(norm, 0)\n', (65431, 65440), True, 'import numpy as _np\n'), ((97541, 97573), 'numpy.isclose', '_np.isclose', (['v', 'otherProjs[0, 0]'], {}), '(v, otherProjs[0, 0])\n', (97552, 97573), True, 'import numpy as _np\n'), ((100261, 100280), 'numpy.imag', '_np.imag', (['Lmx[i, i]'], {}), '(Lmx[i, i])\n', (100269, 100280), True, 'import numpy as _np\n'), ((100697, 100723), 'numpy.imag', '_np.imag', (['otherProjs[i, i]'], {}), '(otherProjs[i, i])\n', (100705, 100723), True, 'import numpy as _np\n'), ((124216, 124233), 'numpy.conjugate', '_np.conjugate', (['ev'], {}), '(ev)\n', (124229, 124233), True, 'import numpy as _np\n'), ((97822, 97841), 'numpy.real', '_np.real', (['depolProj'], {}), '(depolProj)\n', (97830, 97841), True, 'import numpy as _np\n'), ((98224, 98250), 'numpy.real', '_np.real', (['otherProjs[0, :]'], {}), '(otherProjs[0, :])\n', (98232, 98250), True, 'import numpy as _np\n'), ((100036, 100055), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (100044, 100055), True, 'import numpy as _np\n')] |
import numpy
import scipy.stats
import scipy.optimize
have_sklearn = False
# noinspection PyBroadException
try:
import sklearn.linear_model
have_sklearn = True
except Exception:
pass
# methods to avoid calling statsmodels which seems to be incompatible with many
# versions of other packages we need:
# https://github.com/WinVector/pyvtreat/issues/14
def our_corr_score(*, y_true, y_pred):
# compute Pearson correlation
if not isinstance(y_true, numpy.ndarray):
y_true = numpy.asarray(y_true)
if not isinstance(y_pred, numpy.ndarray):
y_pred = numpy.asarray(y_pred)
n = len(y_true)
if n < 2:
return 1, 1
if numpy.min(y_true) >= numpy.max(y_true):
return 1, 1
if numpy.min(y_pred) >= numpy.max(y_pred):
return 0, 1
r, sig = scipy.stats.pearsonr(y_true, y_pred)
if n < 3:
sig = 1
return r, sig
def est_deviance(*, y, est, epsilon=1.0e-5):
if not isinstance(y, numpy.ndarray):
y = numpy.asarray(y)
if not isinstance(est, numpy.ndarray):
x = numpy.asarray(est)
est = numpy.minimum(est, 1 - epsilon)
est = numpy.maximum(est, epsilon)
deviance = -2 * numpy.sum(
y * numpy.log(est) +
(1 - y) * numpy.log(1 - est))
return deviance
# assumes special cases of solve_logistic_regression already eliminated
def brute_force_solve_logistic(*, y, x, regularization=1.e-6):
if not isinstance(y, numpy.ndarray):
y = numpy.asarray(y)
if not isinstance(x, numpy.ndarray):
x = numpy.asarray(x)
def predict(beta):
return 1 / (1 + numpy.exp(-(beta[0] + beta[1] * x)))
def loss(beta):
preds = predict(beta)
return (est_deviance(y=y, est=preds)
+ regularization * (beta[0] * beta[0] + beta[1] * beta[1]) # regularization
)
soln = scipy.optimize.fmin_bfgs(
loss,
x0=[numpy.log(numpy.mean(y) / (1 - numpy.mean(y))), 0.001],
gtol=1.0e-3,
disp=0)
return predict(soln)
# assumes special cases of solve_logistic_regression already eliminated
def sklearn_solve_logistic(*, y, x, regularization=1.e-6):
if not isinstance(y, numpy.ndarray):
y = numpy.asarray(y)
if not isinstance(x, numpy.ndarray):
x = numpy.asarray(x)
fitter = sklearn.linear_model.LogisticRegression(
penalty='l2',
solver='lbfgs',
fit_intercept=True,
C=1/regularization)
dependent_vars = x.reshape((len(y), 1))
fitter.fit(X=dependent_vars, y=y)
preds = fitter.predict_proba(X=dependent_vars)[:, 1]
return preds
# x, y - numpy numeric vectors, y 0/1. solve for y- return predictions
def solve_logistic_regression(*, y, x):
# catch some corner cases
if not isinstance(y, numpy.ndarray):
y = numpy.asarray(y)
if not isinstance(x, numpy.ndarray):
x = numpy.asarray(x)
n = len(y)
if (n < 2) or (numpy.min(y) >= numpy.max(y)):
return y.copy()
if numpy.min(x) >= numpy.max(x):
return numpy.asarray([numpy.mean(y)] * n)
# check for fully seperable cases
big_y_indices = y > 0
x_b = x[big_y_indices]
x_s = x[numpy.logical_not(big_y_indices)]
if (min(x_b) > max(x_s)) or (max(x_b) < min(x_s)):
r = numpy.zeros(n)
r[big_y_indices] = 1
return r
# run a full logistic regression
if have_sklearn:
preds = sklearn_solve_logistic(y=y, x=x)
else:
preds = brute_force_solve_logistic(y=y, x=x)
return numpy.asarray(preds)
# noinspection PyPep8Naming
def our_pseudo_R2(*, y_true, y_pred):
if not isinstance(y_true, numpy.ndarray):
y_true = numpy.asarray(y_true)
if not isinstance(y_pred, numpy.ndarray):
y_pred = numpy.asarray(y_pred)
n = len(y_true)
if n < 2:
return 1, 1
if numpy.min(y_true) >= numpy.max(y_true):
return 1, 1
if numpy.min(y_pred) >= numpy.max(y_pred):
return 0, 1
preds = solve_logistic_regression(y=y_true, x=y_pred)
deviance = est_deviance(y=y_true, est=preds)
null_deviance = est_deviance(y=y_true, est=numpy.zeros(n) + numpy.mean(y_true))
r2 = 1 - deviance / null_deviance
sig = 1
if n >= 3:
# https://github.com/WinVector/sigr/blob/master/R/ChiSqTest.R
df_null = n - 1
df_residual = n - 2
delta_deviance = null_deviance - deviance
delta_df = df_null - df_residual
sig = 1 - scipy.stats.chi2.cdf(x=delta_deviance, df=delta_df)
return r2, sig
| [
"numpy.mean",
"numpy.minimum",
"numpy.logical_not",
"numpy.asarray",
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.zeros",
"numpy.min",
"numpy.maximum"
] | [((1101, 1132), 'numpy.minimum', 'numpy.minimum', (['est', '(1 - epsilon)'], {}), '(est, 1 - epsilon)\n', (1114, 1132), False, 'import numpy\n'), ((1143, 1170), 'numpy.maximum', 'numpy.maximum', (['est', 'epsilon'], {}), '(est, epsilon)\n', (1156, 1170), False, 'import numpy\n'), ((3531, 3551), 'numpy.asarray', 'numpy.asarray', (['preds'], {}), '(preds)\n', (3544, 3551), False, 'import numpy\n'), ((507, 528), 'numpy.asarray', 'numpy.asarray', (['y_true'], {}), '(y_true)\n', (520, 528), False, 'import numpy\n'), ((592, 613), 'numpy.asarray', 'numpy.asarray', (['y_pred'], {}), '(y_pred)\n', (605, 613), False, 'import numpy\n'), ((675, 692), 'numpy.min', 'numpy.min', (['y_true'], {}), '(y_true)\n', (684, 692), False, 'import numpy\n'), ((696, 713), 'numpy.max', 'numpy.max', (['y_true'], {}), '(y_true)\n', (705, 713), False, 'import numpy\n'), ((742, 759), 'numpy.min', 'numpy.min', (['y_pred'], {}), '(y_pred)\n', (751, 759), False, 'import numpy\n'), ((763, 780), 'numpy.max', 'numpy.max', (['y_pred'], {}), '(y_pred)\n', (772, 780), False, 'import numpy\n'), ((1000, 1016), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (1013, 1016), False, 'import numpy\n'), ((1072, 1090), 'numpy.asarray', 'numpy.asarray', (['est'], {}), '(est)\n', (1085, 1090), False, 'import numpy\n'), ((1479, 1495), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (1492, 1495), False, 'import numpy\n'), ((1549, 1565), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (1562, 1565), False, 'import numpy\n'), ((2226, 2242), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (2239, 2242), False, 'import numpy\n'), ((2296, 2312), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (2309, 2312), False, 'import numpy\n'), ((2822, 2838), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (2835, 2838), False, 'import numpy\n'), ((2892, 2908), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (2905, 2908), False, 'import numpy\n'), ((3005, 3017), 'numpy.min', 'numpy.min', (['x'], {}), '(x)\n', (3014, 3017), False, 'import numpy\n'), ((3021, 3033), 'numpy.max', 'numpy.max', (['x'], {}), '(x)\n', (3030, 3033), False, 'import numpy\n'), ((3188, 3220), 'numpy.logical_not', 'numpy.logical_not', (['big_y_indices'], {}), '(big_y_indices)\n', (3205, 3220), False, 'import numpy\n'), ((3289, 3303), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (3300, 3303), False, 'import numpy\n'), ((3683, 3704), 'numpy.asarray', 'numpy.asarray', (['y_true'], {}), '(y_true)\n', (3696, 3704), False, 'import numpy\n'), ((3768, 3789), 'numpy.asarray', 'numpy.asarray', (['y_pred'], {}), '(y_pred)\n', (3781, 3789), False, 'import numpy\n'), ((3851, 3868), 'numpy.min', 'numpy.min', (['y_true'], {}), '(y_true)\n', (3860, 3868), False, 'import numpy\n'), ((3872, 3889), 'numpy.max', 'numpy.max', (['y_true'], {}), '(y_true)\n', (3881, 3889), False, 'import numpy\n'), ((3918, 3935), 'numpy.min', 'numpy.min', (['y_pred'], {}), '(y_pred)\n', (3927, 3935), False, 'import numpy\n'), ((3939, 3956), 'numpy.max', 'numpy.max', (['y_pred'], {}), '(y_pred)\n', (3948, 3956), False, 'import numpy\n'), ((2943, 2955), 'numpy.min', 'numpy.min', (['y'], {}), '(y)\n', (2952, 2955), False, 'import numpy\n'), ((2959, 2971), 'numpy.max', 'numpy.max', (['y'], {}), '(y)\n', (2968, 2971), False, 'import numpy\n'), ((1614, 1649), 'numpy.exp', 'numpy.exp', (['(-(beta[0] + beta[1] * x))'], {}), '(-(beta[0] + beta[1] * x))\n', (1623, 1649), False, 'import numpy\n'), ((4132, 4146), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (4143, 4146), False, 'import numpy\n'), ((4149, 4167), 'numpy.mean', 'numpy.mean', (['y_true'], {}), '(y_true)\n', (4159, 4167), False, 'import numpy\n'), ((1214, 1228), 'numpy.log', 'numpy.log', (['est'], {}), '(est)\n', (1223, 1228), False, 'import numpy\n'), ((1249, 1267), 'numpy.log', 'numpy.log', (['(1 - est)'], {}), '(1 - est)\n', (1258, 1267), False, 'import numpy\n'), ((3065, 3078), 'numpy.mean', 'numpy.mean', (['y'], {}), '(y)\n', (3075, 3078), False, 'import numpy\n'), ((1932, 1945), 'numpy.mean', 'numpy.mean', (['y'], {}), '(y)\n', (1942, 1945), False, 'import numpy\n'), ((1953, 1966), 'numpy.mean', 'numpy.mean', (['y'], {}), '(y)\n', (1963, 1966), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
This enables to parameterize a desired scenario to mock a multi-partner ML project.
"""
from datasets import dataset_mnist, dataset_cifar10, dataset_titanic
from sklearn.model_selection import train_test_split
import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
import uuid
import pandas as pd
from loguru import logger
import operator
import random
import utils
from dataset import Dataset
import constants
from partner import Partner
class Scenario:
def __init__(self, params, experiment_path, scenario_id=1, n_repeat=1, is_dry_run=False):
# ---------------------------------------------------------------------
# Initialization of the dataset defined in the config of the experiment
# ---------------------------------------------------------------------
# Raise Exception if unknown parameters in the .yml file
params_known = ["dataset_name", "dataset_proportion"] # Dataset related
params_known += ["methods", "multi_partner_learning_approach", "aggregation_weighting"] # federated learning related
params_known += ["partners_count", "amounts_per_partner", "corrupted_datasets", "samples_split_option"] # Partners related
params_known += ["gradient_updates_per_pass_count", "epoch_count", "minibatch_count", "is_early_stopping"] # Computation related
params_known += ["evaluation_partner_numbers","sequential_weighting_ponderation"]
params_known += ["is_quick_demo"]
if not all([x in params_known for x in params]):
for x in params:
if not x in params_known:
logger.debug(f"Unrecognised parameter: {x}")
raise Exception(f"Unrecognised parameters, check your .yml file")
# Get and verify which dataset is configured
supported_datasets_names = ["mnist", "cifar10", "titanic"]
if "dataset_name" in params:
dataset_name = params["dataset_name"]
if dataset_name not in supported_datasets_names:
raise Exception(f"Dataset named '{dataset_name}' is not supported (yet). You could add it!")
else:
dataset_name = "mnist" # default
logger.debug(f"Dataset selected: {dataset_name}")
# Reference the module corresponding to the dataset selected and initialize the Dataset object
if dataset_name == "mnist":
dataset_module = dataset_mnist
elif dataset_name == "cifar10":
dataset_module = dataset_cifar10
elif dataset_name == "titanic":
dataset_module = dataset_titanic
else:
raise Exception(f"Dataset named '{dataset_name}' is not supported (yet). You could add it!")
# The proportion of the dataset the computation will used
if "dataset_proportion" in params:
self.dataset_proportion = params["dataset_proportion"]
assert self.dataset_proportion > 0, "Error in the config file, dataset_proportion should be > 0"
assert self.dataset_proportion <= 1, "Error in the config file, dataset_proportion should be <= 1"
else:
self.dataset_proportion = 1 # default
self.dataset = Dataset(
dataset_name,
dataset_module.x_train,
dataset_module.x_test,
dataset_module.y_train,
dataset_module.y_test,
dataset_module.input_shape,
dataset_module.num_classes,
dataset_module.preprocess_dataset_labels,
dataset_module.generate_new_model_for_dataset,
)
if self.dataset_proportion < 1:
self.shorten_dataset_proportion()
else:
logger.debug(f"Computation use the full dataset for scenario #{scenario_id}")
self.nb_samples_used = len(self.dataset.x_train)
self.final_relative_nb_samples = []
# The train set is split into a train set and a validation set (used in particular for early stopping)
self.dataset.train_val_split()
# --------------------------------------
# Definition of collaborative scenarios
# --------------------------------------
# List of all partners defined in the scenario
self.partners_list = []
# partners mock different partners in a collaborative data science project
# For defining the number of partners
self.partners_count = params["partners_count"]
# For configuring the respective sizes of the partners' datasets
# Should the partners receive an equivalent amount of samples each or receive different amounts?
# Define the percentages of samples per partner
# Sum has to equal 1 and number of items has to equal partners_count
self.amounts_per_partner = params["amounts_per_partner"]
# For configuring if data samples are split between partners randomly or in a stratified way...
# ... so that they cover distinct areas of the samples space
if "samples_split_option" in params:
(self.samples_split_type, self.samples_split_description) = params["samples_split_option"]
else:
(self.samples_split_type, self.samples_split_description) = ("basic", "random") # default
# For configuring if the data of the partners are corrupted or not (useful for testing contributivity measures)
if "corrupted_datasets" in params:
self.corrupted_datasets = params["corrupted_datasets"]
else:
self.corrupted_datasets = ["not_corrupted"] * self.partners_count # default
# ---------------------------------------------------
# Configuration of the distributed learning approach
# ---------------------------------------------------
self.mpl = None
self.evaluation_partner_numbers = None
self.sequential_weighting_ponderation = None
# Multi-partner learning approach
multi_partner_learning_approaches_list = [
"fedavg",
"seq-pure",
"seq-with-final-agg",
"seqavg",
"qavg"
]
if "multi_partner_learning_approach" in params:
approach = params["multi_partner_learning_approach"]
if approach in multi_partner_learning_approaches_list:
self.multi_partner_learning_approach = approach
if self.multi_partner_learning_approach == "qavg":
# for specifiying evaluation on subpart of ther dataset
if "evaluation_partner_numbers" in params:
self.evaluation_partner_numbers = params['evaluation_partner_numbers']
else:
self.evaluation_partner_numbers = len(self.partners_list)
else:
raise Exception(f"Multi-partner learning approach '{approach}' is not a valid approach.")
else:
self.multi_partner_learning_approach = 'fedavg' # default
# Define how federated learning aggregation steps are weighted. Toggle between 'uniform' and 'data_volume'
# Default is 'uniform'
if "aggregation_weighting" in params:
self.aggregation_weighting = params["aggregation_weighting"]
if self.aggregation_weighting == 'sequential':
if 'sequential_weighting_ponderation' in params:
self.sequential_weighting_ponderation = params['sequential_weighting_ponderation']
else:
self.sequential_weighting_ponderation = 0.5
else:
self.aggregation_weighting = "uniform" # default
# Number of epochs, mini-batches and fit_batches in ML training
if "epoch_count" in params:
self.epoch_count = params["epoch_count"]
assert self.epoch_count > 0, "Error: in the provided config file, epoch_count should be > 0"
else:
self.epoch_count = 40 # default
if "minibatch_count" in params:
self.minibatch_count = params["minibatch_count"]
assert self.minibatch_count > 0, "Error: in the provided config file, minibatch_count should be > 0"
else:
self.minibatch_count = 20 # default
if "gradient_updates_per_pass_count" in params:
self.gradient_updates_per_pass_count = params["gradient_updates_per_pass_count"]
assert self.gradient_updates_per_pass_count > 0, "Error: in the provided config file, gradient_updates_per_pass_count should be > 0"
else:
self.gradient_updates_per_pass_count = constants.DEFAULT_GRADIENT_UPDATES_PER_PASS_COUNT
# Early stopping stops ML training when performance increase is not significant anymore
# It is used to optimize the number of epochs and the execution time
if "is_early_stopping" in params:
self.is_early_stopping = params["is_early_stopping"]
else:
self.is_early_stopping = True # default
# -----------------------------------------------------------------
# Configuration of contributivity measurement methods to be tested
# -----------------------------------------------------------------
# List of contributivity measures selected and computed in the scenario
self.contributivity_list = []
# Contributivity methods
contributivity_methods_list = [
"Shapley values",
"Independent scores",
"TMCS",
"ITMCS",
"IS_lin_S",
"IS_reg_S",
"AIS_Kriging_S",
"SMCS",
"WR_SMC",
]
self.methods = []
if "methods" in params and params["methods"]:
for method in params["methods"]:
if method in contributivity_methods_list:
self.methods.append(method)
else:
raise Exception(f"Contributivity method '{method}' is not in methods list.")
# -------------
# Miscellaneous
# -------------
# Scenario id and number of repetition
self.scenario_id = scenario_id
self.n_repeat = n_repeat
if "is_quick_demo" in params:
self.is_quick_demo = params["is_quick_demo"]
if self.is_quick_demo and self.dataset_proportion < 1:
raise Exception("Don't start a quick_demo without the full dataset")
else:
self.is_quick_demo = False # default
# The quick demo parameters overwrites previously defined parameters to make the scenario faster to compute
if "is_quick_demo" in params and params["is_quick_demo"]:
# Use less data and/or less epochs to speed up the computations
logger.info("Quick demo: limit number of data and number of epochs.")
if (len(self.dataset.x_train) > 1000):
index_train = np.random.choice(self.dataset.x_train.shape[0], 1000, replace=False)
index_val = np.random.choice(self.dataset.x_val.shape[0], 500, replace=False)
index_test = np.random.choice(self.dataset.x_test.shape[0], 500, replace=False)
self.dataset.x_train = self.dataset.x_train[index_train]
self.dataset.y_train = self.dataset.y_train[index_train]
self.dataset.x_val = self.dataset.x_val[index_val]
self.dataset.y_val = self.dataset.y_val[index_val]
self.dataset.x_test = self.dataset.x_test[index_test]
self.dataset.y_test = self.dataset.y_test[index_test]
self.epoch_count = 3
self.minibatch_count = 2
# -------
# Outputs
# -------
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d_%Hh%M")
self.scenario_name = (
"scenario_"
+ str(self.scenario_id)
+ "_"
+ "repeat"
+ "_"
+ str(self.n_repeat)
+ "_"
+ now_str
+ "_"
+ uuid.uuid4().hex[
:3
] # This is to be sure 2 distinct scenarios do no have the same name
)
self.short_scenario_name = (
str(self.partners_count)
+ " "
+ str(self.amounts_per_partner)
)
self.save_folder = experiment_path / self.scenario_name
if not is_dry_run:
self.save_folder.mkdir(parents=True, exist_ok=True)
# ------------------------------------------------
# Print the description of the scenario configured
# ------------------------------------------------
if not is_dry_run:
# Describe scenario
logger.info("### Description of data scenario configured:")
logger.info(f" Number of partners defined: {self.partners_count}")
logger.info(f" Data distribution scenario chosen: {self.samples_split_description}")
logger.info(f" Multi-partner learning approach: {self.multi_partner_learning_approach}")
logger.info(f" Weighting option: {self.aggregation_weighting}")
logger.info(f" Iterations parameters: "
f"{self.epoch_count} epochs > "
f"{self.minibatch_count} mini-batches > "
f"{self.gradient_updates_per_pass_count} gradient updates per pass")
# Describe data
logger.info(f"### Data loaded: {self.dataset.name}")
logger.info(f" {len(self.dataset.x_train)} train data with {len(self.dataset.y_train)} labels")
logger.info(f" {len(self.dataset.x_val)} val data with {len(self.dataset.y_val)} labels")
logger.info(f" {len(self.dataset.x_test)} test data with {len(self.dataset.y_test)} labels")
def append_contributivity(self, contributivity):
self.contributivity_list.append(contributivity)
def instantiate_scenario_partners(self):
"""Create the partners_list - self.partners_list should be []"""
if self.partners_list != []:
raise Exception("self.partners_list should be []")
self.partners_list = [Partner(i) for i in range(self.partners_count)]
def split_data_fully_specified(self,is_logging_enabled=True):
"""Fully specified split: Populates the partners with trained and test data
The following partition system is needed for each cluster:
- nb_train : number of data used for training
- nb_test : number of data used for testing
- repartition : list of proportion for each class
example :
[
[
1000,
500,
[ 0.1 ,0.6 ,0.3 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ]
]
]
# Added following fields :
partners:
- train_data_size => initialised while parsing config.yml
- test_data_size => initialised while parsing config.yml
- label_repartition => initialised in this function, it is a dictionnary where :
label_repartition[lab] = number of data with label lab in partner train and test dataset
"""
x_train = self.dataset.x_train
y_train = self.dataset.y_train
x_test = self.dataset.x_test
y_test = self.dataset.y_test
partners_list = self.partners_list
amounts_per_partner = self.amounts_per_partner
sample_split_data = self.samples_split_description
for p,partner_params in zip(partners_list,sample_split_data):
train_size,test_size,repartition = partner_params[0],partner_params[1],partner_params[2]
p.repartition_list = repartition
p.train_data_size = train_size
p.test_data_size = test_size
labels = list(set(y_train))
# Stratify the dataset into clusters per labels
x_train_for_cluster, y_train_for_cluster = {}, {}
x_test_for_cluster , y_test_for_cluster = {}, {}
for label in labels:
idx_in_full_trainset = np.where(y_train == label)
x_train_for_cluster[label] = x_train[idx_in_full_trainset]
y_train_for_cluster[label] = y_train[idx_in_full_trainset]
idx_in_test_dataset = np.where(y_test == label)
x_test_for_cluster[label] = x_test[idx_in_test_dataset]
y_test_for_cluster[label] = y_test[idx_in_test_dataset]
for p in partners_list:
p.computed_accuracy_list = []
p.refference_accuracy_list = []
#Generation of cumulative histogram for random selection
total = sum(p.repartition_list)
p_cumulative_list = [p.repartition_list[0]]
for i in range(1,len(labels)):
p_cumulative_list.append(p_cumulative_list[-1] + p.repartition_list[i]/total )
p_index_train = {labels[i]:0 for i in range(len(labels))}
p_index_test = {labels[i]:0 for i in range(len(labels))}
for i in range(p.train_data_size):
random_label = utils.get_random_index_from_weighted_list(p_cumulative_list)
p_index_train[random_label] += 1
for i in range(p.test_data_size):
random_label = utils.get_random_index_from_weighted_list(p_cumulative_list)
p_index_test[random_label] += 1
list_arrays_x_train, list_arrays_y_train = [], []
for label,size in p_index_train.items():
random_raw = np.random.choice(np.arange(len(x_train_for_cluster[label])), size=size, replace=True)
list_arrays_x_train.append(x_train_for_cluster[label][random_raw])
list_arrays_y_train.append(y_train_for_cluster[label][random_raw])
list_arrays_x_test, list_arrays_y_test = [], []
for label,size in p_index_test.items():
random_raw = np.random.choice(np.arange(len(x_train_for_cluster[label])), size=size, replace=True)
list_arrays_x_test.append(x_train_for_cluster[label][random_raw])
list_arrays_y_test.append(y_train_for_cluster[label][random_raw])
p.x_train = np.concatenate(list_arrays_x_train)
p.y_train = np.concatenate(list_arrays_y_train)
p.x_test = np.concatenate(list_arrays_x_test)
p.y_test = np.concatenate(list_arrays_y_test)
p.x_val = np.concatenate(list_arrays_x_train)
p.y_val = np.concatenate(list_arrays_y_train)
for p in partners_list:
p_repartition = {}
for label in labels:
idx_train = np.where(p.y_train == label)
idx_test = np.where(p.y_test == label)
total = np.size(p.y_train[idx_train]) + np.size(p.y_test[idx_test])
p_repartition[label] = total
p.label_repartition = p_repartition
if is_logging_enabled:
logger.info("### Splitting data among partners:")
logger.info(f" Fully defined split performed.")
logger.info(f" Nb of samples split amongst partners: {self.nb_samples_used}")
logger.info(f" Partners' relative nb of samples: {[round(p, 2) for p in self.final_relative_nb_samples]} "
f" (versus initially configured: {amounts_per_partner})")
for partner in self.partners_list:
logger.info(f" Partner #{partner.id}: {len(partner.x_train)} "
f"samples with labels {partner.clusters_list}")
return 0
def split_data_advanced(self, is_logging_enabled=True):
"""Advanced split: Populates the partners with their train and test data (not pre-processed)"""
x_train = self.dataset.x_train
y_train = self.dataset.y_train
partners_list = self.partners_list
amounts_per_partner = self.amounts_per_partner
advanced_split_description = self.samples_split_description
# Compose the lists of partners with data samples from shared clusters and those with specific clusters
for p in partners_list:
p.cluster_count = int(advanced_split_description[p.id][0])
p.cluster_split_option = advanced_split_description[p.id][1]
partners_with_shared_clusters = [p for p in partners_list if p.cluster_split_option == 'shared']
partners_with_specific_clusters = [p for p in partners_list if p.cluster_split_option == 'specific']
partners_with_shared_clusters.sort(key=operator.attrgetter("cluster_count"), reverse=True)
partners_with_specific_clusters.sort(key=operator.attrgetter("cluster_count"), reverse=True)
# Compose the list of different labels in the dataset
labels = list(set(y_train))
random.seed(42)
random.shuffle(labels)
# Check coherence of the split option:
nb_diff_labels = len(labels)
specific_clusters_count = sum([p.cluster_count for p in partners_with_specific_clusters])
if partners_with_shared_clusters:
shared_clusters_count = max([p.cluster_count for p in partners_with_shared_clusters])
else:
shared_clusters_count = 0
assert specific_clusters_count + shared_clusters_count <= nb_diff_labels, "Error: data samples from the initial dataset are split in clusters per data labels - Incompatibility between the split arguments and the dataset provided - Example: ['advanced', [[7, 'shared'], [6, 'shared'], [2, 'specific'], [1, 'specific']]] means 7 shared clusters and 2 + 1 = 3 specific clusters ==> This scenario can't work with a dataset with less than 10 labels"
# Stratify the dataset into clusters per labels
x_train_for_cluster, y_train_for_cluster, nb_samples_per_cluster = {}, {}, {}
for label in labels:
idx_in_full_trainset = np.where(y_train == label)
x_train_for_cluster[label] = x_train[idx_in_full_trainset]
y_train_for_cluster[label] = y_train[idx_in_full_trainset]
nb_samples_per_cluster[label] = len(y_train_for_cluster[label])
# For each partner compose the list of clusters from which they will draw data samples
index = 0
for p in partners_with_specific_clusters:
p.clusters_list = labels[index:index + p.cluster_count]
index += p.cluster_count
shared_clusters = labels[index:index + shared_clusters_count]
for p in partners_with_shared_clusters:
p.clusters_list = random.sample(shared_clusters, k=p.cluster_count)
# We need to enforce the relative data amounts configured.
# It might not be possible to distribute all data samples, depending on...
# ... the coherence of the relative data amounts and the split option.
# We will compute a resize factor to determine the total nb of samples to be distributed per partner
# For partners getting data samples from specific clusters...
# ... compare the nb of available samples vs. the nb of samples initially configured
resize_factor_specific = 1
for p in partners_with_specific_clusters:
nb_available_samples = sum([nb_samples_per_cluster[cl] for cl in p.clusters_list])
nb_samples_requested = int(amounts_per_partner[p.id] * len(y_train))
ratio = nb_available_samples / nb_samples_requested
resize_factor_specific = min(resize_factor_specific, ratio)
# For each partner getting data samples from shared clusters:
# ... compute the nb of samples initially configured and resize it,
# ... then sum per cluster how many samples are needed.
# Then, find if a cluster is requested more samples than it has, and if yes by which factor
resize_factor_shared = 1
nb_samples_needed_per_cluster = dict.fromkeys(shared_clusters, 0)
for p in partners_with_shared_clusters:
initial_amount_resized = int(amounts_per_partner[p.id] * len(y_train) * resize_factor_specific)
initial_amount_resized_per_cluster = int(initial_amount_resized / p.cluster_count)
for cl in p.clusters_list:
nb_samples_needed_per_cluster[cl] += initial_amount_resized_per_cluster
for cl in nb_samples_needed_per_cluster:
resize_factor_shared = min(resize_factor_shared,
nb_samples_per_cluster[cl] / nb_samples_needed_per_cluster[cl],
)
# Compute the final resize factor
final_resize_factor = resize_factor_specific * resize_factor_shared
# Size correctly each partner's subset. For each partner:
for p in partners_list:
p.final_nb_samples = int(amounts_per_partner[p.id] * len(y_train) * final_resize_factor)
p.final_nb_samples_p_cluster = int(p.final_nb_samples / p.cluster_count)
self.nb_samples_used = sum([p.final_nb_samples for p in partners_list])
self.final_relative_nb_samples = [p.final_nb_samples / self.nb_samples_used for p in partners_list]
# Partners receive their subsets
shared_clusters_index = dict.fromkeys(shared_clusters, 0)
for p in partners_list:
list_arrays_x, list_arrays_y = [], []
if p in partners_with_shared_clusters:
for cl in p.clusters_list:
idx = shared_clusters_index[cl]
list_arrays_x.append(x_train_for_cluster[cl][idx:idx + p.final_nb_samples_p_cluster])
list_arrays_y.append(y_train_for_cluster[cl][idx:idx + p.final_nb_samples_p_cluster])
shared_clusters_index[cl] += p.final_nb_samples_p_cluster
elif p in partners_with_specific_clusters:
for cl in p.clusters_list:
list_arrays_x.append(x_train_for_cluster[cl][:p.final_nb_samples_p_cluster])
list_arrays_y.append(y_train_for_cluster[cl][:p.final_nb_samples_p_cluster])
p.x_train = np.concatenate(list_arrays_x)
p.y_train = np.concatenate(list_arrays_y)
# Create local validation and test datasets from the partner train data
p.x_train, p.x_val, p.y_train, p.y_val = train_test_split(
p.x_train, p.y_train, test_size=0.1, random_state=42
)
p.x_train, p.x_test, p.y_train, p.y_test = train_test_split(
p.x_train, p.y_train, test_size=0.1, random_state=42
)
# Check coherence of number of mini-batches versus partner with small dataset
assert self.minibatch_count <= min([len(p.x_train) for p in self.partners_list]), "Error: in the provided config file and the provided dataset, a partner doesn't have enough data samples to create the minibatches "
if is_logging_enabled:
logger.info("### Splitting data among partners:")
logger.info(f" Advanced split performed.")
logger.info(f" Nb of samples split amongst partners: {self.nb_samples_used}")
logger.info(f" Partners' relative nb of samples: {[round(p, 2) for p in self.final_relative_nb_samples]} "
f" (versus initially configured: {amounts_per_partner})")
for partner in self.partners_list:
logger.info(f" Partner #{partner.id}: {len(partner.x_train)} "
f"samples with labels {partner.clusters_list}")
return 0
def split_data(self, is_logging_enabled=True):
"""Populates the partners with their train and test data (not pre-processed)"""
# Fetch parameters of scenario
x_train = self.dataset.x_train
y_train = self.dataset.y_train
# Configure the desired splitting scenario - Datasets sizes
# Should the partners receive an equivalent amount of samples each...
# ... or receive different amounts?
# Check the percentages of samples per partner and control its coherence
assert len(self.amounts_per_partner) == self.partners_count, "Error: in the provided config file, amounts_per_partner list should have a size equals to partners_count"
assert np.sum(self.amounts_per_partner) == 1, "Error: in the provided config file, amounts_per_partner argument: the sum of the proportions you provided isn't equal to 1"
# Then we parameterize this via the splitting_indices to be passed to np.split
# This is to transform the percentages from the scenario configuration into indices where to split the data
if self.partners_count == 1:
splitting_indices_train = 1
else:
splitting_indices = np.empty((self.partners_count - 1,))
splitting_indices[0] = self.amounts_per_partner[0]
for i in range(self.partners_count - 2):
splitting_indices[i + 1] = (
splitting_indices[i] + self.amounts_per_partner[i + 1]
)
splitting_indices_train = (splitting_indices * len(y_train)).astype(int)
# Configure the desired data distribution scenario
# Create a list of indexes of the samples
train_idx = np.arange(len(y_train))
# In the 'stratified' scenario we sort by labels
if self.samples_split_description == "stratified":
# Sort by labels
y_sorted_idx = y_train.argsort()
y_train = y_train[y_sorted_idx]
x_train = x_train[y_sorted_idx]
# In the 'random' scenario we shuffle randomly the indexes
elif self.samples_split_description == "random":
np.random.seed(42)
np.random.shuffle(train_idx)
# If neither 'stratified' nor 'random', we raise an exception
else:
raise NameError(
"This samples_split option ["
+ self.samples_split_description
+ "] is not recognized."
)
# Do the splitting among partners according to desired scenarios
# Split data between partners
train_idx_idx_list = np.split(train_idx, splitting_indices_train)
# Populate partners
partner_idx = 0
for train_idx in train_idx_idx_list:
p = self.partners_list[partner_idx]
# Finalize selection of train data
x_partner_train = x_train[train_idx, :]
y_partner_train = y_train[train_idx, ]
# Populate the partner's train dataset
p.x_train = x_partner_train
p.y_train = y_partner_train
# Create local validation and test datasets from the partner train data
p.x_train, p.x_val, p.y_train, p.y_val = train_test_split(
p.x_train, p.y_train, test_size=0.1, random_state=42
)
p.x_train, p.x_test, p.y_train, p.y_test = train_test_split(
p.x_train, p.y_train, test_size=0.1, random_state=42
)
# Update other attributes from partner
p.final_nb_samples = len(p.x_train)
p.clusters_list = list(set(p.y_train))
# Move on to the next partner
partner_idx += 1
# Check coherence of number of mini-batches versus smaller partner
assert self.minibatch_count <= (min(self.amounts_per_partner) * len(x_train)), "Error: in the provided config file and dataset, a partner doesn't have enough data samples to create the minibatches"
self.nb_samples_used = sum([len(p.x_train) for p in self.partners_list])
self.final_relative_nb_samples = [p.final_nb_samples / self.nb_samples_used for p in self.partners_list]
if is_logging_enabled:
logger.info(f"### Splitting data among partners:")
logger.info(f" Simple split performed.")
logger.info(f" Nb of samples split amongst partners: {self.nb_samples_used}")
for partner in self.partners_list:
logger.info(f" Partner #{partner.id}: "
f"{partner.final_nb_samples} samples "
f"with labels {partner.clusters_list}")
return 0
def plot_data_distribution(self):
for i, partner in enumerate(self.partners_list):
plt.subplot(self.partners_count, 1, i + 1) # TODO share y axis
data_count = np.bincount(partner.y_train)
# Fill with 0
while len(data_count) < 10:
data_count = np.append(data_count, 0)
plt.bar(np.arange(0, 10), data_count)
plt.ylabel("partner " + str(partner.id))
plt.suptitle("Data distribution")
plt.xlabel("Digits")
if not os.path.exists(self.save_folder / 'graphs/'):
os.makedirs(self.save_folder / 'graphs/')
plt.savefig(self.save_folder / "graphs/data_distribution.png")
plt.close()
def compute_batch_sizes(self):
# For each partner we compute the batch size in multi-partner and single-partner setups
batch_size_min = 1
batch_size_max = constants.MAX_BATCH_SIZE
if self.partners_count == 1:
p = self.partners_list[0]
batch_size = int(len(p.x_train) / self.gradient_updates_per_pass_count)
p.batch_size = np.clip(batch_size, batch_size_min, batch_size_max)
else:
for p in self.partners_list:
batch_size = int(len(p.x_train) / (self.minibatch_count * self.gradient_updates_per_pass_count))
p.batch_size = np.clip(batch_size, batch_size_min, batch_size_max)
for p in self.partners_list:
logger.debug(f" Compute batch sizes, partner #{p.id}: {p.batch_size}")
def preprocess_scenarios_data(self):
"""Return scenario with central datasets (val, test) and distributed datasets (partners) pre-processed"""
logger.debug("## Pre-processing datasets of the scenario for keras CNN:")
# First, the scenario central dataset of the scenario
self.dataset.y_val = self.dataset.preprocess_dataset_labels(self.dataset.y_val)
logger.debug(" Central early stopping validation set: done.")
self.dataset.y_test = self.dataset.preprocess_dataset_labels(self.dataset.y_test)
logger.debug(" Central testset: done.")
# Then, datasets of each partner
for partner_index, partner in enumerate(self.partners_list):
# Pre-process labels (y) data
partner.y_train = self.dataset.preprocess_dataset_labels(partner.y_train)
partner.y_val = self.dataset.preprocess_dataset_labels(partner.y_val)
partner.y_test = self.dataset.preprocess_dataset_labels(partner.y_test)
# If a data corruption is configured, apply it
if self.corrupted_datasets[partner_index] == "corrupted":
logger.debug(f" ... Corrupting data (by offsetting labels) of partner #{partner.id}")
partner.corrupt_labels()
elif self.corrupted_datasets[partner_index] == "shuffled":
logger.debug(f" ... Corrupting data (by shuffling labels) of partner #{partner.id}")
partner.shuffle_labels()
elif self.corrupted_datasets[partner_index] == "not_corrupted":
pass
else:
logger.debug("Unexpected label of corruption, no corruption performed!")
logger.debug(f" Partner #{partner.id}: done.")
def to_dataframe(self):
df = pd.DataFrame()
dict_results = {}
# Scenario definition parameters
dict_results["scenario_name"] = self.scenario_name
dict_results["short_scenario_name"] = self.short_scenario_name
dict_results["dataset_name"] = self.dataset.name
dict_results["train_data_samples_count"] = len(self.dataset.x_train)
dict_results["test_data_samples_count"] = len(self.dataset.x_test)
dict_results["partners_count"] = self.partners_count
dict_results["dataset_fraction_per_partner"] = self.amounts_per_partner
dict_results["samples_split_description"] = self.samples_split_description
dict_results["nb_samples_used"] = self.nb_samples_used
dict_results["final_relative_nb_samples"] = self.final_relative_nb_samples
# Multi-partner learning approach parameters
dict_results["multi_partner_learning_approach"] = self.multi_partner_learning_approach
dict_results["aggregation_weighting"] = self.aggregation_weighting
dict_results["epoch_count"] = self.epoch_count
dict_results["minibatch_count"] = self.minibatch_count
dict_results["gradient_updates_per_pass_count"] = self.gradient_updates_per_pass_count
dict_results["is_early_stopping"] = self.is_early_stopping
dict_results["mpl_test_score"] = self.mpl.test_score
dict_results["mpl_nb_epochs_done"] = self.mpl.nb_epochs_done
dict_results["learning_computation_time_sec"] = self.mpl.learning_computation_time
if not self.contributivity_list:
df = df.append(dict_results, ignore_index=True)
for contrib in self.contributivity_list:
# Contributivity data
dict_results["contributivity_method"] = contrib.name
dict_results["contributivity_scores"] = contrib.contributivity_scores
dict_results["contributivity_stds"] = contrib.scores_std
dict_results["computation_time_sec"] = contrib.computation_time_sec
dict_results["first_characteristic_calls_count"] = contrib.first_charac_fct_calls_count
for i in range(self.partners_count):
# Partner-specific data
dict_results["partner_id"] = i
dict_results["dataset_fraction_of_partner"] = self.amounts_per_partner[i]
dict_results["contributivity_score"] = contrib.contributivity_scores[i]
dict_results["contributivity_std"] = contrib.scores_std[i]
df = df.append(dict_results, ignore_index=True)
return df
def shorten_dataset_proportion(self):
"""Truncate the dataset depending on self.dataset_proportion"""
if self.dataset_proportion == 1:
raise Exception("shorten_dataset_proportion shouldn't be called on this scenario, the user targets the full dataset")
x_train = self.dataset.x_train
y_train = self.dataset.y_train
logger.info(f"We don't use the full dataset: only {self.dataset_proportion*100}%")
skip_idx = int(round(len(x_train) * self.dataset_proportion))
train_idx = np.arange(len(x_train))
np.random.seed(42)
np.random.shuffle(train_idx)
self.dataset.x_train = x_train[train_idx[0:skip_idx]]
self.dataset.y_train = y_train[train_idx[0:skip_idx]]
| [
"numpy.clip",
"numpy.arange",
"utils.get_random_index_from_weighted_list",
"os.path.exists",
"partner.Partner",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.empty",
"numpy.random.seed",
"numpy.concatenate",
"pandas.DataFrame",
"operator.attrgetter",
"random.... | [((2238, 2287), 'loguru.logger.debug', 'logger.debug', (['f"""Dataset selected: {dataset_name}"""'], {}), "(f'Dataset selected: {dataset_name}')\n", (2250, 2287), False, 'from loguru import logger\n'), ((3246, 3520), 'dataset.Dataset', 'Dataset', (['dataset_name', 'dataset_module.x_train', 'dataset_module.x_test', 'dataset_module.y_train', 'dataset_module.y_test', 'dataset_module.input_shape', 'dataset_module.num_classes', 'dataset_module.preprocess_dataset_labels', 'dataset_module.generate_new_model_for_dataset'], {}), '(dataset_name, dataset_module.x_train, dataset_module.x_test,\n dataset_module.y_train, dataset_module.y_test, dataset_module.\n input_shape, dataset_module.num_classes, dataset_module.\n preprocess_dataset_labels, dataset_module.generate_new_model_for_dataset)\n', (3253, 3520), False, 'from dataset import Dataset\n'), ((11892, 11915), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11913, 11915), False, 'import datetime\n'), ((21375, 21390), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (21386, 21390), False, 'import random\n'), ((21399, 21421), 'random.shuffle', 'random.shuffle', (['labels'], {}), '(labels)\n', (21413, 21421), False, 'import random\n'), ((30746, 30790), 'numpy.split', 'np.split', (['train_idx', 'splitting_indices_train'], {}), '(train_idx, splitting_indices_train)\n', (30754, 30790), True, 'import numpy as np\n'), ((33278, 33311), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Data distribution"""'], {}), "('Data distribution')\n", (33290, 33311), True, 'import matplotlib.pyplot as plt\n'), ((33320, 33340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Digits"""'], {}), "('Digits')\n", (33330, 33340), True, 'import matplotlib.pyplot as plt\n'), ((33465, 33527), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_folder / 'graphs/data_distribution.png')"], {}), "(self.save_folder / 'graphs/data_distribution.png')\n", (33476, 33527), True, 'import matplotlib.pyplot as plt\n'), ((33536, 33547), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (33545, 33547), True, 'import matplotlib.pyplot as plt\n'), ((34536, 34609), 'loguru.logger.debug', 'logger.debug', (['"""## Pre-processing datasets of the scenario for keras CNN:"""'], {}), "('## Pre-processing datasets of the scenario for keras CNN:')\n", (34548, 34609), False, 'from loguru import logger\n'), ((34769, 34832), 'loguru.logger.debug', 'logger.debug', (['""" Central early stopping validation set: done."""'], {}), "(' Central early stopping validation set: done.')\n", (34781, 34832), False, 'from loguru import logger\n'), ((34931, 34972), 'loguru.logger.debug', 'logger.debug', (['""" Central testset: done."""'], {}), "(' Central testset: done.')\n", (34943, 34972), False, 'from loguru import logger\n'), ((36178, 36192), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (36190, 36192), True, 'import pandas as pd\n'), ((39128, 39217), 'loguru.logger.info', 'logger.info', (['f"""We don\'t use the full dataset: only {self.dataset_proportion * 100}%"""'], {}), '(\n f"We don\'t use the full dataset: only {self.dataset_proportion * 100}%")\n', (39139, 39217), False, 'from loguru import logger\n'), ((39335, 39353), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (39349, 39353), True, 'import numpy as np\n'), ((39362, 39390), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (39379, 39390), True, 'import numpy as np\n'), ((3739, 3816), 'loguru.logger.debug', 'logger.debug', (['f"""Computation use the full dataset for scenario #{scenario_id}"""'], {}), "(f'Computation use the full dataset for scenario #{scenario_id}')\n", (3751, 3816), False, 'from loguru import logger\n'), ((10922, 10991), 'loguru.logger.info', 'logger.info', (['"""Quick demo: limit number of data and number of epochs."""'], {}), "('Quick demo: limit number of data and number of epochs.')\n", (10933, 10991), False, 'from loguru import logger\n'), ((12962, 13021), 'loguru.logger.info', 'logger.info', (['"""### Description of data scenario configured:"""'], {}), "('### Description of data scenario configured:')\n", (12973, 13021), False, 'from loguru import logger\n'), ((13034, 13102), 'loguru.logger.info', 'logger.info', (['f""" Number of partners defined: {self.partners_count}"""'], {}), "(f' Number of partners defined: {self.partners_count}')\n", (13045, 13102), False, 'from loguru import logger\n'), ((13115, 13206), 'loguru.logger.info', 'logger.info', (['f""" Data distribution scenario chosen: {self.samples_split_description}"""'], {}), "(\n f' Data distribution scenario chosen: {self.samples_split_description}')\n", (13126, 13206), False, 'from loguru import logger\n'), ((13214, 13314), 'loguru.logger.info', 'logger.info', (['f""" Multi-partner learning approach: {self.multi_partner_learning_approach}"""'], {}), "(\n f' Multi-partner learning approach: {self.multi_partner_learning_approach}'\n )\n", (13225, 13314), False, 'from loguru import logger\n'), ((13317, 13382), 'loguru.logger.info', 'logger.info', (['f""" Weighting option: {self.aggregation_weighting}"""'], {}), "(f' Weighting option: {self.aggregation_weighting}')\n", (13328, 13382), False, 'from loguru import logger\n'), ((13395, 13577), 'loguru.logger.info', 'logger.info', (['f""" Iterations parameters: {self.epoch_count} epochs > {self.minibatch_count} mini-batches > {self.gradient_updates_per_pass_count} gradient updates per pass"""'], {}), "(\n f' Iterations parameters: {self.epoch_count} epochs > {self.minibatch_count} mini-batches > {self.gradient_updates_per_pass_count} gradient updates per pass'\n )\n", (13406, 13577), False, 'from loguru import logger\n'), ((13693, 13745), 'loguru.logger.info', 'logger.info', (['f"""### Data loaded: {self.dataset.name}"""'], {}), "(f'### Data loaded: {self.dataset.name}')\n", (13704, 13745), False, 'from loguru import logger\n'), ((14429, 14439), 'partner.Partner', 'Partner', (['i'], {}), '(i)\n', (14436, 14439), False, 'from partner import Partner\n'), ((16379, 16405), 'numpy.where', 'np.where', (['(y_train == label)'], {}), '(y_train == label)\n', (16387, 16405), True, 'import numpy as np\n'), ((16596, 16621), 'numpy.where', 'np.where', (['(y_test == label)'], {}), '(y_test == label)\n', (16604, 16621), True, 'import numpy as np\n'), ((18730, 18765), 'numpy.concatenate', 'np.concatenate', (['list_arrays_x_train'], {}), '(list_arrays_x_train)\n', (18744, 18765), True, 'import numpy as np\n'), ((18790, 18825), 'numpy.concatenate', 'np.concatenate', (['list_arrays_y_train'], {}), '(list_arrays_y_train)\n', (18804, 18825), True, 'import numpy as np\n'), ((18850, 18884), 'numpy.concatenate', 'np.concatenate', (['list_arrays_x_test'], {}), '(list_arrays_x_test)\n', (18864, 18884), True, 'import numpy as np\n'), ((18908, 18942), 'numpy.concatenate', 'np.concatenate', (['list_arrays_y_test'], {}), '(list_arrays_y_test)\n', (18922, 18942), True, 'import numpy as np\n'), ((18966, 19001), 'numpy.concatenate', 'np.concatenate', (['list_arrays_x_train'], {}), '(list_arrays_x_train)\n', (18980, 19001), True, 'import numpy as np\n'), ((19024, 19059), 'numpy.concatenate', 'np.concatenate', (['list_arrays_y_train'], {}), '(list_arrays_y_train)\n', (19038, 19059), True, 'import numpy as np\n'), ((19522, 19571), 'loguru.logger.info', 'logger.info', (['"""### Splitting data among partners:"""'], {}), "('### Splitting data among partners:')\n", (19533, 19571), False, 'from loguru import logger\n'), ((19584, 19633), 'loguru.logger.info', 'logger.info', (['f""" Fully defined split performed."""'], {}), "(f' Fully defined split performed.')\n", (19595, 19633), False, 'from loguru import logger\n'), ((19646, 19725), 'loguru.logger.info', 'logger.info', (['f""" Nb of samples split amongst partners: {self.nb_samples_used}"""'], {}), "(f' Nb of samples split amongst partners: {self.nb_samples_used}')\n", (19657, 19725), False, 'from loguru import logger\n'), ((22457, 22483), 'numpy.where', 'np.where', (['(y_train == label)'], {}), '(y_train == label)\n', (22465, 22483), True, 'import numpy as np\n'), ((23120, 23169), 'random.sample', 'random.sample', (['shared_clusters'], {'k': 'p.cluster_count'}), '(shared_clusters, k=p.cluster_count)\n', (23133, 23169), False, 'import random\n'), ((26657, 26686), 'numpy.concatenate', 'np.concatenate', (['list_arrays_x'], {}), '(list_arrays_x)\n', (26671, 26686), True, 'import numpy as np\n'), ((26711, 26740), 'numpy.concatenate', 'np.concatenate', (['list_arrays_y'], {}), '(list_arrays_y)\n', (26725, 26740), True, 'import numpy as np\n'), ((26879, 26949), 'sklearn.model_selection.train_test_split', 'train_test_split', (['p.x_train', 'p.y_train'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(p.x_train, p.y_train, test_size=0.1, random_state=42)\n', (26895, 26949), False, 'from sklearn.model_selection import train_test_split\n'), ((27035, 27105), 'sklearn.model_selection.train_test_split', 'train_test_split', (['p.x_train', 'p.y_train'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(p.x_train, p.y_train, test_size=0.1, random_state=42)\n', (27051, 27105), False, 'from sklearn.model_selection import train_test_split\n'), ((27490, 27539), 'loguru.logger.info', 'logger.info', (['"""### Splitting data among partners:"""'], {}), "('### Splitting data among partners:')\n", (27501, 27539), False, 'from loguru import logger\n'), ((27552, 27596), 'loguru.logger.info', 'logger.info', (['f""" Advanced split performed."""'], {}), "(f' Advanced split performed.')\n", (27563, 27596), False, 'from loguru import logger\n'), ((27609, 27688), 'loguru.logger.info', 'logger.info', (['f""" Nb of samples split amongst partners: {self.nb_samples_used}"""'], {}), "(f' Nb of samples split amongst partners: {self.nb_samples_used}')\n", (27620, 27688), False, 'from loguru import logger\n'), ((28838, 28870), 'numpy.sum', 'np.sum', (['self.amounts_per_partner'], {}), '(self.amounts_per_partner)\n', (28844, 28870), True, 'import numpy as np\n'), ((29329, 29365), 'numpy.empty', 'np.empty', (['(self.partners_count - 1,)'], {}), '((self.partners_count - 1,))\n', (29337, 29365), True, 'import numpy as np\n'), ((31359, 31429), 'sklearn.model_selection.train_test_split', 'train_test_split', (['p.x_train', 'p.y_train'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(p.x_train, p.y_train, test_size=0.1, random_state=42)\n', (31375, 31429), False, 'from sklearn.model_selection import train_test_split\n'), ((31515, 31585), 'sklearn.model_selection.train_test_split', 'train_test_split', (['p.x_train', 'p.y_train'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(p.x_train, p.y_train, test_size=0.1, random_state=42)\n', (31531, 31585), False, 'from sklearn.model_selection import train_test_split\n'), ((32360, 32410), 'loguru.logger.info', 'logger.info', (['f"""### Splitting data among partners:"""'], {}), "(f'### Splitting data among partners:')\n", (32371, 32410), False, 'from loguru import logger\n'), ((32423, 32465), 'loguru.logger.info', 'logger.info', (['f""" Simple split performed."""'], {}), "(f' Simple split performed.')\n", (32434, 32465), False, 'from loguru import logger\n'), ((32478, 32557), 'loguru.logger.info', 'logger.info', (['f""" Nb of samples split amongst partners: {self.nb_samples_used}"""'], {}), "(f' Nb of samples split amongst partners: {self.nb_samples_used}')\n", (32489, 32557), False, 'from loguru import logger\n'), ((32926, 32968), 'matplotlib.pyplot.subplot', 'plt.subplot', (['self.partners_count', '(1)', '(i + 1)'], {}), '(self.partners_count, 1, i + 1)\n', (32937, 32968), True, 'import matplotlib.pyplot as plt\n'), ((33015, 33043), 'numpy.bincount', 'np.bincount', (['partner.y_train'], {}), '(partner.y_train)\n', (33026, 33043), True, 'import numpy as np\n'), ((33357, 33401), 'os.path.exists', 'os.path.exists', (["(self.save_folder / 'graphs/')"], {}), "(self.save_folder / 'graphs/')\n", (33371, 33401), False, 'import os\n'), ((33415, 33456), 'os.makedirs', 'os.makedirs', (["(self.save_folder / 'graphs/')"], {}), "(self.save_folder / 'graphs/')\n", (33426, 33456), False, 'import os\n'), ((33945, 33996), 'numpy.clip', 'np.clip', (['batch_size', 'batch_size_min', 'batch_size_max'], {}), '(batch_size, batch_size_min, batch_size_max)\n', (33952, 33996), True, 'import numpy as np\n'), ((34298, 34370), 'loguru.logger.debug', 'logger.debug', (['f""" Compute batch sizes, partner #{p.id}: {p.batch_size}"""'], {}), "(f' Compute batch sizes, partner #{p.id}: {p.batch_size}')\n", (34310, 34370), False, 'from loguru import logger\n'), ((36086, 36134), 'loguru.logger.debug', 'logger.debug', (['f""" Partner #{partner.id}: done."""'], {}), "(f' Partner #{partner.id}: done.')\n", (36098, 36134), False, 'from loguru import logger\n'), ((11073, 11141), 'numpy.random.choice', 'np.random.choice', (['self.dataset.x_train.shape[0]', '(1000)'], {'replace': '(False)'}), '(self.dataset.x_train.shape[0], 1000, replace=False)\n', (11089, 11141), True, 'import numpy as np\n'), ((11170, 11235), 'numpy.random.choice', 'np.random.choice', (['self.dataset.x_val.shape[0]', '(500)'], {'replace': '(False)'}), '(self.dataset.x_val.shape[0], 500, replace=False)\n', (11186, 11235), True, 'import numpy as np\n'), ((11265, 11331), 'numpy.random.choice', 'np.random.choice', (['self.dataset.x_test.shape[0]', '(500)'], {'replace': '(False)'}), '(self.dataset.x_test.shape[0], 500, replace=False)\n', (11281, 11331), True, 'import numpy as np\n'), ((17477, 17537), 'utils.get_random_index_from_weighted_list', 'utils.get_random_index_from_weighted_list', (['p_cumulative_list'], {}), '(p_cumulative_list)\n', (17518, 17537), False, 'import utils\n'), ((17683, 17743), 'utils.get_random_index_from_weighted_list', 'utils.get_random_index_from_weighted_list', (['p_cumulative_list'], {}), '(p_cumulative_list)\n', (17724, 17743), False, 'import utils\n'), ((19203, 19231), 'numpy.where', 'np.where', (['(p.y_train == label)'], {}), '(p.y_train == label)\n', (19211, 19231), True, 'import numpy as np\n'), ((19260, 19287), 'numpy.where', 'np.where', (['(p.y_test == label)'], {}), '(p.y_test == label)\n', (19268, 19287), True, 'import numpy as np\n'), ((21115, 21151), 'operator.attrgetter', 'operator.attrgetter', (['"""cluster_count"""'], {}), "('cluster_count')\n", (21134, 21151), False, 'import operator\n'), ((21216, 21252), 'operator.attrgetter', 'operator.attrgetter', (['"""cluster_count"""'], {}), "('cluster_count')\n", (21235, 21252), False, 'import operator\n'), ((30280, 30298), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (30294, 30298), True, 'import numpy as np\n'), ((30311, 30339), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (30328, 30339), True, 'import numpy as np\n'), ((32621, 32743), 'loguru.logger.info', 'logger.info', (['f""" Partner #{partner.id}: {partner.final_nb_samples} samples with labels {partner.clusters_list}"""'], {}), "(\n f' Partner #{partner.id}: {partner.final_nb_samples} samples with labels {partner.clusters_list}'\n )\n", (32632, 32743), False, 'from loguru import logger\n'), ((33140, 33164), 'numpy.append', 'np.append', (['data_count', '(0)'], {}), '(data_count, 0)\n', (33149, 33164), True, 'import numpy as np\n'), ((33186, 33202), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (33195, 33202), True, 'import numpy as np\n'), ((34196, 34247), 'numpy.clip', 'np.clip', (['batch_size', 'batch_size_min', 'batch_size_max'], {}), '(batch_size, batch_size_min, batch_size_max)\n', (34203, 34247), True, 'import numpy as np\n'), ((35525, 35617), 'loguru.logger.debug', 'logger.debug', (['f""" ... Corrupting data (by offsetting labels) of partner #{partner.id}"""'], {}), "(\n f' ... Corrupting data (by offsetting labels) of partner #{partner.id}')\n", (35537, 35617), False, 'from loguru import logger\n'), ((1669, 1713), 'loguru.logger.debug', 'logger.debug', (['f"""Unrecognised parameter: {x}"""'], {}), "(f'Unrecognised parameter: {x}')\n", (1681, 1713), False, 'from loguru import logger\n'), ((12260, 12272), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12270, 12272), False, 'import uuid\n'), ((19314, 19343), 'numpy.size', 'np.size', (['p.y_train[idx_train]'], {}), '(p.y_train[idx_train])\n', (19321, 19343), True, 'import numpy as np\n'), ((19346, 19373), 'numpy.size', 'np.size', (['p.y_test[idx_test]'], {}), '(p.y_test[idx_test])\n', (19353, 19373), True, 'import numpy as np\n'), ((35741, 35832), 'loguru.logger.debug', 'logger.debug', (['f""" ... Corrupting data (by shuffling labels) of partner #{partner.id}"""'], {}), "(\n f' ... Corrupting data (by shuffling labels) of partner #{partner.id}')\n", (35753, 35832), False, 'from loguru import logger\n'), ((36000, 36072), 'loguru.logger.debug', 'logger.debug', (['"""Unexpected label of corruption, no corruption performed!"""'], {}), "('Unexpected label of corruption, no corruption performed!')\n", (36012, 36072), False, 'from loguru import logger\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gensim.downloader as api
from gensim.models.word2vec import Word2Vec
import numpy as np
from sklearn.linear_model import LinearRegression
import pickle
w2v = api.load('word2vec-google-news-300')
models = ['Guardian_Pre', 'Guardian_Post', 'Daily Mail_Pre', 'Daily Mail_Post']
for model in models:
mod = Word2Vec.load("./word2vec/{}.model".format(model))
# Get transformation words
transformation_words = set.intersection(
set(mod.wv.index2word),
set(w2v.wv.index2word)
)
# Get training matrices
X = np.concatenate([np.tile(mod[word], (25,1)) for word in transformation_words])
ylist = []
for word in transformation_words:
nn = [i[0] for i in w2v.similar_by_word(word, 25)]
ylist.extend(np.array([w2v[n] for n in nn]))
Y = np.array(ylist)
lr = LinearRegression(fit_intercept=False)
lr.fit(X, Y)
outmat = lr.predict(mod.wv.vectors)
transmat = lr.coef_
outlist = [mod.wv.index2word, outmat, transmat]
with open('./alignement/{}.pkl'.format(model), 'wb') as f:
pickle.dump(outlist, f)
| [
"numpy.tile",
"pickle.dump",
"gensim.downloader.load",
"numpy.array",
"sklearn.linear_model.LinearRegression"
] | [((214, 250), 'gensim.downloader.load', 'api.load', (['"""word2vec-google-news-300"""'], {}), "('word2vec-google-news-300')\n", (222, 250), True, 'import gensim.downloader as api\n'), ((858, 873), 'numpy.array', 'np.array', (['ylist'], {}), '(ylist)\n', (866, 873), True, 'import numpy as np\n'), ((884, 921), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (900, 921), False, 'from sklearn.linear_model import LinearRegression\n'), ((1133, 1156), 'pickle.dump', 'pickle.dump', (['outlist', 'f'], {}), '(outlist, f)\n', (1144, 1156), False, 'import pickle\n'), ((622, 649), 'numpy.tile', 'np.tile', (['mod[word]', '(25, 1)'], {}), '(mod[word], (25, 1))\n', (629, 649), True, 'import numpy as np\n'), ((818, 848), 'numpy.array', 'np.array', (['[w2v[n] for n in nn]'], {}), '([w2v[n] for n in nn])\n', (826, 848), True, 'import numpy as np\n')] |
import sys
sys.path.append("../..")
from bempp import lib as blib
#from bempp import visualization as vis
import numpy as np
import tempfile
import os
import subprocess
import math
def evalBoundaryData(point):
return 1
def evalNullData(point):
return 0
def Keijzer(n):
th = math.asin(1.0/n)
costh = math.fabs(math.cos(th))
R0 = ((n-1.0)*(n-1.0)) / ((n+1.0)*(n+1.0))
return (2.0/(1.0-R0) - 1.0 + costh*costh*costh) / (1.0 - costh*costh)
# Define physical parameters
c = .3
freq = 100e6
omega = 2*np.pi*freq*1E-12
refind = 1.4
alpha = Keijzer(refind)
# Outer region
mua1 = .01
mus1 = 1.
kappa1 = 1./(3.*(mua1+mus1))
w1 = np.sqrt(mua1/kappa1+1j*omega/(c*kappa1))
# Inner region
mua2 = .02
mus2 = .5
kappa2 = 1./(3.*(mua2+mus2))
w2 = np.sqrt(mua2/kappa2+1j*omega/(c*kappa2))
# We consider two spheres. One has radius r1, the other radius r2<r1. We want to look at
# the low-rank interaction between the spheres. Gmsh is used to create the meshes
r1 = 25.
r2 = 7.5
element_size1 = 1.
element_size2 = element_size1*r2/r1
gmsh_command = "gmsh"
sphere_definition = "../../../examples/meshes/sphere.txt"
sphere_def = open(sphere_definition,'r').read()
# Construct two Gmsh meshes with the required parameters
s1_geo, s1_geo_name = tempfile.mkstemp(suffix='.geo',dir=os.getcwd(),text=True)
s2_geo, s2_geo_name = tempfile.mkstemp(suffix='.geo',dir=os.getcwd(),text=True)
s1_msh_name = os.path.splitext(s1_geo_name)[0]+".msh"
s2_msh_name = os.path.splitext(s2_geo_name)[0]+".msh"
s1_geo_f = os.fdopen(s1_geo,"w")
s2_geo_f = os.fdopen(s2_geo,"w")
s1_geo_f.write("rad = "+str(r1)+";\nlc = "+str(element_size1)+";\n"+sphere_def)
s2_geo_f.write("rad = "+str(r2)+";\nlc = "+str(element_size2)+";\n"+sphere_def)
s1_geo_f.close()
s2_geo_f.close()
# Use Gmsh to create meshes
subprocess.check_call(gmsh_command+" -2 "+s1_geo_name,shell=True)
subprocess.check_call(gmsh_command+" -2 "+s2_geo_name,shell=True)
# Read the meshes into BEM++ Objects
sphere1 = blib.createGridFactory().importGmshGrid("triangular",s1_msh_name)
sphere2 = blib.createGridFactory().importGmshGrid("triangular",s2_msh_name)
# Clean up the temporary files
os.remove(s1_geo_name)
os.remove(s2_geo_name)
os.remove(s1_msh_name)
os.remove(s2_msh_name)
# Create Context
accuracy_options = blib.createAccuracyOptions()
# 1 orders higher than default accuracy for regular integrals
accuracy_options.doubleRegular.setRelativeQuadratureOrder(1)
# 0 orders higher than default accuracy for regular integrals
accuracy_options.doubleSingular.setRelativeQuadratureOrder(0)
strategy = blib.createNumericalQuadratureStrategy("float64", "complex128", accuracy_options)
options = blib.createAssemblyOptions()
aca_options = blib.createAcaOptions()
aca_options.eps=1E-6
options.switchToAca(aca_options)
context = blib.createContext(strategy, options)
# Create the spaces
sphere1_plc = blib.createPiecewiseLinearContinuousScalarSpace(context,sphere1)
sphere2_plc = blib.createPiecewiseLinearContinuousScalarSpace(context,sphere2)
# Now create the operators
slp11 = blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator(context,sphere1_plc,sphere1_plc,sphere1_plc,w1)
dlp11 = blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator(context,sphere1_plc,sphere1_plc,sphere1_plc,w1)
id11 = blib.createIdentityOperator(context,sphere1_plc,sphere1_plc,sphere1_plc)
slp22_w1 = blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator(context,sphere2_plc,sphere2_plc,sphere2_plc,w1)
dlp22_w1 = blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator(context,sphere2_plc,sphere2_plc,sphere2_plc,w1)
id22 = blib.createIdentityOperator(context,sphere2_plc,sphere2_plc,sphere2_plc)
slp22_w2 = blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator(context,sphere2_plc,sphere2_plc,sphere2_plc,w2)
dlp22_w2 = blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator(context,sphere2_plc,sphere2_plc,sphere2_plc,w2)
slp12 = blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator(context,sphere2_plc,sphere1_plc,sphere1_plc,w1)
dlp12 = blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator(context,sphere2_plc,sphere1_plc,sphere1_plc,w1)
slp21 = blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator(context,sphere1_plc,sphere2_plc,sphere2_plc,w1)
dlp21 = blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator(context,sphere1_plc,sphere2_plc,sphere2_plc,w1)
scale = 1.0/(2.0*alpha*kappa1)
lhs_k11 = 0.5*id11 + dlp11 + scale*slp11
lhs_k12 = -1.0*dlp12
lhs_k13 = -(1.0/kappa1)*slp12
lhs_k21 = dlp21 + scale*slp21
lhs_k22 = 0.5*id22 - dlp22_w1
lhs_k23 = -(1.0/kappa1)*slp22_w1
# lhs_k31 -- empty
lhs_k32 = 0.5*id22 + dlp22_w2
lhs_k33 = (1.0/kappa2) * slp22_w2
structure = blib.createBlockedOperatorStructure(context)
structure.setBlock(0, 0, lhs_k11)
structure.setBlock(0, 1, lhs_k12)
structure.setBlock(0, 2, lhs_k13)
structure.setBlock(1, 0, lhs_k21)
structure.setBlock(1, 1, lhs_k22)
structure.setBlock(1, 2, lhs_k23)
# structure.setBlock(2, 0, ...); -- empty
structure.setBlock(2, 1, lhs_k32)
structure.setBlock(2, 2, lhs_k33)
lhsOp = blib.createBlockedBoundaryOperator(context,structure)
rhs1 = scale*slp11
rhs2 = scale*slp21
boundaryData1 = rhs1 * blib.createGridFunction(
context, sphere1_plc, sphere1_plc, evalBoundaryData)
boundaryData2 = rhs2 * blib.createGridFunction(
context, sphere1_plc, sphere1_plc, evalBoundaryData)
boundaryData3 = blib.createGridFunction(
context, sphere2_plc, sphere2_plc, evalNullData)
rhs = [boundaryData1, boundaryData2, boundaryData3]
solver = blib.createDefaultIterativeSolver(lhsOp)
params = blib.defaultGmresParameterList(1e-8)
solver.initializeSolver(params)
solution = solver.solve(rhs)
u0 = solution.gridFunction(0)
u1 = solution.gridFunction(1)
v1 = solution.gridFunction(2)
# write out VTK files
u0.exportToVtk("vertex_data", "u0", "u0")
u1.exportToVtk("vertex_data", "u1", "u1")
v1.exportToVtk("vertex_data", "v1", "v1")
| [
"numpy.sqrt",
"bempp.lib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator",
"math.cos",
"bempp.lib.createModifiedHelmholtz3dSingleLayerBoundaryOperator",
"bempp.lib.createGridFactory",
"bempp.lib.createContext",
"sys.path.append",
"bempp.lib.createAccuracyOptions",
"os.remove",
"bempp.lib.crea... | [((12, 36), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (27, 36), False, 'import sys\n'), ((689, 741), 'numpy.sqrt', 'np.sqrt', (['(mua1 / kappa1 + 1.0j * omega / (c * kappa1))'], {}), '(mua1 / kappa1 + 1.0j * omega / (c * kappa1))\n', (696, 741), True, 'import numpy as np\n'), ((807, 859), 'numpy.sqrt', 'np.sqrt', (['(mua2 / kappa2 + 1.0j * omega / (c * kappa2))'], {}), '(mua2 / kappa2 + 1.0j * omega / (c * kappa2))\n', (814, 859), True, 'import numpy as np\n'), ((1585, 1607), 'os.fdopen', 'os.fdopen', (['s1_geo', '"""w"""'], {}), "(s1_geo, 'w')\n", (1594, 1607), False, 'import os\n'), ((1619, 1641), 'os.fdopen', 'os.fdopen', (['s2_geo', '"""w"""'], {}), "(s2_geo, 'w')\n", (1628, 1641), False, 'import os\n'), ((1875, 1945), 'subprocess.check_call', 'subprocess.check_call', (["(gmsh_command + ' -2 ' + s1_geo_name)"], {'shell': '(True)'}), "(gmsh_command + ' -2 ' + s1_geo_name, shell=True)\n", (1896, 1945), False, 'import subprocess\n'), ((1942, 2012), 'subprocess.check_call', 'subprocess.check_call', (["(gmsh_command + ' -2 ' + s2_geo_name)"], {'shell': '(True)'}), "(gmsh_command + ' -2 ' + s2_geo_name, shell=True)\n", (1963, 2012), False, 'import subprocess\n'), ((2241, 2263), 'os.remove', 'os.remove', (['s1_geo_name'], {}), '(s1_geo_name)\n', (2250, 2263), False, 'import os\n'), ((2265, 2287), 'os.remove', 'os.remove', (['s2_geo_name'], {}), '(s2_geo_name)\n', (2274, 2287), False, 'import os\n'), ((2289, 2311), 'os.remove', 'os.remove', (['s1_msh_name'], {}), '(s1_msh_name)\n', (2298, 2311), False, 'import os\n'), ((2313, 2335), 'os.remove', 'os.remove', (['s2_msh_name'], {}), '(s2_msh_name)\n', (2322, 2335), False, 'import os\n'), ((2378, 2406), 'bempp.lib.createAccuracyOptions', 'blib.createAccuracyOptions', ([], {}), '()\n', (2404, 2406), True, 'from bempp import lib as blib\n'), ((2670, 2755), 'bempp.lib.createNumericalQuadratureStrategy', 'blib.createNumericalQuadratureStrategy', (['"""float64"""', '"""complex128"""', 'accuracy_options'], {}), "('float64', 'complex128',\n accuracy_options)\n", (2708, 2755), True, 'from bempp import lib as blib\n'), ((2763, 2791), 'bempp.lib.createAssemblyOptions', 'blib.createAssemblyOptions', ([], {}), '()\n', (2789, 2791), True, 'from bempp import lib as blib\n'), ((2807, 2830), 'bempp.lib.createAcaOptions', 'blib.createAcaOptions', ([], {}), '()\n', (2828, 2830), True, 'from bempp import lib as blib\n'), ((2898, 2935), 'bempp.lib.createContext', 'blib.createContext', (['strategy', 'options'], {}), '(strategy, options)\n', (2916, 2935), True, 'from bempp import lib as blib\n'), ((2978, 3043), 'bempp.lib.createPiecewiseLinearContinuousScalarSpace', 'blib.createPiecewiseLinearContinuousScalarSpace', (['context', 'sphere1'], {}), '(context, sphere1)\n', (3025, 3043), True, 'from bempp import lib as blib\n'), ((3058, 3123), 'bempp.lib.createPiecewiseLinearContinuousScalarSpace', 'blib.createPiecewiseLinearContinuousScalarSpace', (['context', 'sphere2'], {}), '(context, sphere2)\n', (3105, 3123), True, 'from bempp import lib as blib\n'), ((3164, 3277), 'bempp.lib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', (['context', 'sphere1_plc', 'sphere1_plc', 'sphere1_plc', 'w1'], {}), '(context,\n sphere1_plc, sphere1_plc, sphere1_plc, w1)\n', (3221, 3277), True, 'from bempp import lib as blib\n'), ((3279, 3392), 'bempp.lib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', (['context', 'sphere1_plc', 'sphere1_plc', 'sphere1_plc', 'w1'], {}), '(context,\n sphere1_plc, sphere1_plc, sphere1_plc, w1)\n', (3336, 3392), True, 'from bempp import lib as blib\n'), ((3394, 3469), 'bempp.lib.createIdentityOperator', 'blib.createIdentityOperator', (['context', 'sphere1_plc', 'sphere1_plc', 'sphere1_plc'], {}), '(context, sphere1_plc, sphere1_plc, sphere1_plc)\n', (3421, 3469), True, 'from bempp import lib as blib\n'), ((3481, 3594), 'bempp.lib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', (['context', 'sphere2_plc', 'sphere2_plc', 'sphere2_plc', 'w1'], {}), '(context,\n sphere2_plc, sphere2_plc, sphere2_plc, w1)\n', (3538, 3594), True, 'from bempp import lib as blib\n'), ((3599, 3712), 'bempp.lib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', (['context', 'sphere2_plc', 'sphere2_plc', 'sphere2_plc', 'w1'], {}), '(context,\n sphere2_plc, sphere2_plc, sphere2_plc, w1)\n', (3656, 3712), True, 'from bempp import lib as blib\n'), ((3714, 3789), 'bempp.lib.createIdentityOperator', 'blib.createIdentityOperator', (['context', 'sphere2_plc', 'sphere2_plc', 'sphere2_plc'], {}), '(context, sphere2_plc, sphere2_plc, sphere2_plc)\n', (3741, 3789), True, 'from bempp import lib as blib\n'), ((3801, 3914), 'bempp.lib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', (['context', 'sphere2_plc', 'sphere2_plc', 'sphere2_plc', 'w2'], {}), '(context,\n sphere2_plc, sphere2_plc, sphere2_plc, w2)\n', (3858, 3914), True, 'from bempp import lib as blib\n'), ((3919, 4032), 'bempp.lib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', (['context', 'sphere2_plc', 'sphere2_plc', 'sphere2_plc', 'w2'], {}), '(context,\n sphere2_plc, sphere2_plc, sphere2_plc, w2)\n', (3976, 4032), True, 'from bempp import lib as blib\n'), ((4036, 4149), 'bempp.lib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', (['context', 'sphere2_plc', 'sphere1_plc', 'sphere1_plc', 'w1'], {}), '(context,\n sphere2_plc, sphere1_plc, sphere1_plc, w1)\n', (4093, 4149), True, 'from bempp import lib as blib\n'), ((4151, 4264), 'bempp.lib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', (['context', 'sphere2_plc', 'sphere1_plc', 'sphere1_plc', 'w1'], {}), '(context,\n sphere2_plc, sphere1_plc, sphere1_plc, w1)\n', (4208, 4264), True, 'from bempp import lib as blib\n'), ((4268, 4381), 'bempp.lib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dSingleLayerBoundaryOperator', (['context', 'sphere1_plc', 'sphere2_plc', 'sphere2_plc', 'w1'], {}), '(context,\n sphere1_plc, sphere2_plc, sphere2_plc, w1)\n', (4325, 4381), True, 'from bempp import lib as blib\n'), ((4383, 4496), 'bempp.lib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', 'blib.createModifiedHelmholtz3dDoubleLayerBoundaryOperator', (['context', 'sphere1_plc', 'sphere2_plc', 'sphere2_plc', 'w1'], {}), '(context,\n sphere1_plc, sphere2_plc, sphere2_plc, w1)\n', (4440, 4496), True, 'from bempp import lib as blib\n'), ((4817, 4861), 'bempp.lib.createBlockedOperatorStructure', 'blib.createBlockedOperatorStructure', (['context'], {}), '(context)\n', (4852, 4861), True, 'from bempp import lib as blib\n'), ((5196, 5250), 'bempp.lib.createBlockedBoundaryOperator', 'blib.createBlockedBoundaryOperator', (['context', 'structure'], {}), '(context, structure)\n', (5230, 5250), True, 'from bempp import lib as blib\n'), ((5525, 5597), 'bempp.lib.createGridFunction', 'blib.createGridFunction', (['context', 'sphere2_plc', 'sphere2_plc', 'evalNullData'], {}), '(context, sphere2_plc, sphere2_plc, evalNullData)\n', (5548, 5597), True, 'from bempp import lib as blib\n'), ((5671, 5711), 'bempp.lib.createDefaultIterativeSolver', 'blib.createDefaultIterativeSolver', (['lhsOp'], {}), '(lhsOp)\n', (5704, 5711), True, 'from bempp import lib as blib\n'), ((5722, 5759), 'bempp.lib.defaultGmresParameterList', 'blib.defaultGmresParameterList', (['(1e-08)'], {}), '(1e-08)\n', (5752, 5759), True, 'from bempp import lib as blib\n'), ((310, 328), 'math.asin', 'math.asin', (['(1.0 / n)'], {}), '(1.0 / n)\n', (319, 328), False, 'import math\n'), ((5318, 5394), 'bempp.lib.createGridFunction', 'blib.createGridFunction', (['context', 'sphere1_plc', 'sphere1_plc', 'evalBoundaryData'], {}), '(context, sphere1_plc, sphere1_plc, evalBoundaryData)\n', (5341, 5394), True, 'from bempp import lib as blib\n'), ((5425, 5501), 'bempp.lib.createGridFunction', 'blib.createGridFunction', (['context', 'sphere1_plc', 'sphere1_plc', 'evalBoundaryData'], {}), '(context, sphere1_plc, sphere1_plc, evalBoundaryData)\n', (5448, 5501), True, 'from bempp import lib as blib\n'), ((350, 362), 'math.cos', 'math.cos', (['th'], {}), '(th)\n', (358, 362), False, 'import math\n'), ((1357, 1368), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1366, 1368), False, 'import os\n'), ((1438, 1449), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1447, 1449), False, 'import os\n'), ((1476, 1505), 'os.path.splitext', 'os.path.splitext', (['s1_geo_name'], {}), '(s1_geo_name)\n', (1492, 1505), False, 'import os\n'), ((1531, 1560), 'os.path.splitext', 'os.path.splitext', (['s2_geo_name'], {}), '(s2_geo_name)\n', (1547, 1560), False, 'import os\n'), ((2061, 2085), 'bempp.lib.createGridFactory', 'blib.createGridFactory', ([], {}), '()\n', (2083, 2085), True, 'from bempp import lib as blib\n'), ((2138, 2162), 'bempp.lib.createGridFactory', 'blib.createGridFactory', ([], {}), '()\n', (2160, 2162), True, 'from bempp import lib as blib\n')] |
import numpy as np
import os
from bolero.behavior_search import BlackBoxSearch
from bolero.representation import ConstantBehavior
from bolero.optimizer import NoOptimizer
from bolero.utils.testing import assert_pickle
from nose.tools import assert_false, assert_true, assert_raises_regexp
from numpy.testing import assert_array_equal
def test_black_box_search_requires_optimizer():
class NoOptimizerSubclass(object):
pass
bs = BlackBoxSearch(ConstantBehavior(), NoOptimizerSubclass())
assert_raises_regexp(TypeError, "expects instance of Optimizer",
bs.init, 5, 5)
def test_black_box_search_from_dicts():
beh = {"type": "bolero.representation.ConstantBehavior"}
opt = {"type": "bolero.optimizer.NoOptimizer"}
bs = BlackBoxSearch(beh, opt)
bs.init(5, 5)
# NoOptimizer should be initialized with the parameters from the behavior
assert_array_equal(bs.behavior.get_params(), bs.optimizer.initial_params)
def test_black_box_search_protocol():
n_inputs, n_outputs = 5, 5
bs = BlackBoxSearch(ConstantBehavior(), NoOptimizer())
bs.init(n_inputs, n_outputs)
assert_false(bs.is_behavior_learning_done())
beh = bs.get_next_behavior()
inputs = np.zeros(n_inputs)
beh.set_inputs(inputs)
outputs = np.empty(n_outputs)
beh.get_outputs(outputs)
bs.set_evaluation_feedback(np.array([0.0]))
def test_save_black_box_search():
bs = BlackBoxSearch(ConstantBehavior(), NoOptimizer())
bs.init(5, 5)
assert_pickle("BlackBoxSearch", bs)
path = "." + os.sep
bs.write_results(path)
bs.get_behavior_from_results(path)
filename = path + "BlackBoxSearch.pickle"
assert_true(os.path.exists(filename))
if os.path.exists(filename):
os.remove(filename)
| [
"bolero.optimizer.NoOptimizer",
"os.path.exists",
"nose.tools.assert_raises_regexp",
"bolero.behavior_search.BlackBoxSearch",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"bolero.representation.ConstantBehavior",
"bolero.utils.testing.assert_pickle",
"os.remove"
] | [((508, 587), 'nose.tools.assert_raises_regexp', 'assert_raises_regexp', (['TypeError', '"""expects instance of Optimizer"""', 'bs.init', '(5)', '(5)'], {}), "(TypeError, 'expects instance of Optimizer', bs.init, 5, 5)\n", (528, 587), False, 'from nose.tools import assert_false, assert_true, assert_raises_regexp\n'), ((776, 800), 'bolero.behavior_search.BlackBoxSearch', 'BlackBoxSearch', (['beh', 'opt'], {}), '(beh, opt)\n', (790, 800), False, 'from bolero.behavior_search import BlackBoxSearch\n'), ((1236, 1254), 'numpy.zeros', 'np.zeros', (['n_inputs'], {}), '(n_inputs)\n', (1244, 1254), True, 'import numpy as np\n'), ((1296, 1315), 'numpy.empty', 'np.empty', (['n_outputs'], {}), '(n_outputs)\n', (1304, 1315), True, 'import numpy as np\n'), ((1512, 1547), 'bolero.utils.testing.assert_pickle', 'assert_pickle', (['"""BlackBoxSearch"""', 'bs'], {}), "('BlackBoxSearch', bs)\n", (1525, 1547), False, 'from bolero.utils.testing import assert_pickle\n'), ((1734, 1758), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1748, 1758), False, 'import os\n'), ((461, 479), 'bolero.representation.ConstantBehavior', 'ConstantBehavior', ([], {}), '()\n', (477, 479), False, 'from bolero.representation import ConstantBehavior\n'), ((1071, 1089), 'bolero.representation.ConstantBehavior', 'ConstantBehavior', ([], {}), '()\n', (1087, 1089), False, 'from bolero.representation import ConstantBehavior\n'), ((1091, 1104), 'bolero.optimizer.NoOptimizer', 'NoOptimizer', ([], {}), '()\n', (1102, 1104), False, 'from bolero.optimizer import NoOptimizer\n'), ((1377, 1392), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1385, 1392), True, 'import numpy as np\n'), ((1454, 1472), 'bolero.representation.ConstantBehavior', 'ConstantBehavior', ([], {}), '()\n', (1470, 1472), False, 'from bolero.representation import ConstantBehavior\n'), ((1474, 1487), 'bolero.optimizer.NoOptimizer', 'NoOptimizer', ([], {}), '()\n', (1485, 1487), False, 'from bolero.optimizer import NoOptimizer\n'), ((1701, 1725), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1715, 1725), False, 'import os\n'), ((1768, 1787), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1777, 1787), False, 'import os\n')] |
import time
import shutil
import dlib
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import transforms
import dnnlib
import legacy
from configs import GENERATOR_CONFIGS
from dlib_utils.face_alignment import image_align
from dlib_utils.landmarks_detector import LandmarksDetector
from torch_utils.misc import copy_params_and_buffers
from pivot_tuning_inversion.utils.ImagesDataset import ImagesDataset, ImageLatentsDataset
from pivot_tuning_inversion.training.coaches.multi_id_coach import MultiIDCoach
class FaceLandmarksDetector:
"""Dlib landmarks detector wrapper
"""
def __init__(
self,
model_path='pretrained/shape_predictor_68_face_landmarks.dat',
tmp_dir='tmp'
):
self.detector = LandmarksDetector(model_path)
self.timestamp = int(time.time())
self.tmp_src = f'{tmp_dir}/{self.timestamp}_src.png'
self.tmp_align = f'{tmp_dir}/{self.timestamp}_align.png'
def __call__(self, imgpath):
shutil.copy(imgpath, self.tmp_src)
try:
face_landmarks = list(self.detector.get_landmarks(self.tmp_src))[0]
assert isinstance(face_landmarks, list)
assert len(face_landmarks) == 68
image_align(self.tmp_src, self.tmp_align, face_landmarks)
except:
im = PIL.Image.open(self.tmp_src)
im.save(self.tmp_align)
return PIL.Image.open(self.tmp_align).convert('RGB')
class VGGFeatExtractor():
"""VGG16 backbone wrapper
"""
def __init__(self, device):
self.device = device
self.url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(self.url) as f:
self.module = torch.jit.load(f).eval().to(device)
def __call__(self, img): # PIL
img = self._preprocess(img, self.device)
feat = self.module(img)
return feat # (1, 1000)
def _preprocess(self, img, device):
img = img.resize((256,256), PIL.Image.LANCZOS)
img = np.array(img, dtype=np.uint8)
img = torch.tensor(img.transpose([2,0,1])).unsqueeze(dim=0)
return img.to(device)
class Generator():
"""StyleGAN2 generator wrapper
"""
def __init__(self, ckpt, device):
with dnnlib.util.open_url(ckpt) as f:
old_G = legacy.load_network_pkl(f)['G_ema'].requires_grad_(False).to(device)
resolution = old_G.img_resolution
generator_config = GENERATOR_CONFIGS(resolution=resolution)
self.G_kwargs = generator_config.G_kwargs
self.common_kwargs = generator_config.common_kwargs
self.G = dnnlib.util.construct_class_by_name(**self.G_kwargs, **self.common_kwargs).eval().requires_grad_(False).to(device)
copy_params_and_buffers(old_G, self.G, require_all=False)
del old_G
G = self.G
self.style_layers = [
f'G.synthesis.b{feat_size}.{layer}.affine'
for feat_size in [pow(2,x) for x in range(2, int(np.log2(resolution))+1)]
for layer in ['conv0', 'conv1', 'torgb']]
del(self.style_layers[0])
scope = locals()
self.to_stylespace = {layer:eval(layer, scope) for layer in self.style_layers}
w_idx_lst = generator_config.w_idx_lst
assert len(self.style_layers) == len(w_idx_lst)
self.to_w_idx = {self.style_layers[i]:w_idx_lst[i] for i in range(len(self.style_layers))}
def mapping(self, z, truncation_psi=0.7, truncation_cutoff=None, skip_w_avg_update=False):
'''random z -> latent w
'''
return self.G.mapping(
z,
None,
truncation_psi=truncation_psi,
truncation_cutoff=truncation_cutoff,
skip_w_avg_update=skip_w_avg_update
)
def mapping_stylespace(self, latent):
'''latent w -> style s
resolution | w_idx | # conv | # torgb | indices
4 | 0 | 1 | 1 | 0-1
8 | 1 | 2 | 1 | 1-3
16 | 3 | 2 | 1 | 3-5
32 | 5 | 2 | 1 | 5-7
64 | 7 | 2 | 1 | 7-9
128 | 9 | 2 | 1 | 9-11
256 | 11 | 2 | 1 | 11-13 # for 256 resolution
512 | 13 | 2 | 1 | 13-15 # for 512 resolution
1024 | 15 | 2 | 1 | 15-17 # for 1024 resolution
'''
styles = dict()
for layer in self.style_layers:
module = self.to_stylespace.get(layer)
w_idx = self.to_w_idx.get(layer)
styles[layer] = module(latent.unbind(dim=1)[w_idx])
return styles
def synthesis_from_stylespace(self, latent, styles):
'''style s -> generated image
modulated conv2d, synthesis layer.weight, noise
forward after styles = affine(w)
'''
return self.G.synthesis(latent, styles=styles, noise_mode='const')
def synthesis(self, latent):
'''latent w -> generated image
'''
return self.G.synthesis(latent, noise_mode='const')
class e4eEncoder:
'''e4e Encoder
img paths -> latent w
'''
def __init__(self, device):
self.device = device
def __call__(self, target_pils):
dataset = ImagesDataset(
target_pils,
self.device,
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
coach = MultiIDCoach(dataloader, device=self.device)
latents = list()
for fname, image in dataloader:
latents.append(coach.get_e4e_inversion(image))
latents = torch.cat(latents)
return latents
class PivotTuning:
'''pivot tuning inversion
latent, style -> latent, style,
mode
- 'latent' : use latent pivot
- 'style' : use style pivot
'''
def __init__(self, device, G, mode='w'):
assert mode in ['w', 's']
self.device = device
self.G = G
self.mode = mode
self.resolution = G.img_resolution
def __call__(self, latent, target_pils):
dataset = ImageLatentsDataset(
target_pils,
latent,
self.device,
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])],),
self.resolution,
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
coach = MultiIDCoach(
dataloader,
device=self.device,
generator=self.G,
mode=self.mode
)
# run coach by self.mode
new_G = coach.train_from_latent()
return new_G
| [
"dlib_utils.landmarks_detector.LandmarksDetector",
"legacy.load_network_pkl",
"dlib_utils.face_alignment.image_align",
"dnnlib.util.open_url",
"torch.jit.load",
"pivot_tuning_inversion.training.coaches.multi_id_coach.MultiIDCoach",
"numpy.array",
"torch_utils.misc.copy_params_and_buffers",
"configs.... | [((789, 818), 'dlib_utils.landmarks_detector.LandmarksDetector', 'LandmarksDetector', (['model_path'], {}), '(model_path)\n', (806, 818), False, 'from dlib_utils.landmarks_detector import LandmarksDetector\n'), ((1029, 1063), 'shutil.copy', 'shutil.copy', (['imgpath', 'self.tmp_src'], {}), '(imgpath, self.tmp_src)\n', (1040, 1063), False, 'import shutil\n'), ((2085, 2114), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (2093, 2114), True, 'import numpy as np\n'), ((2519, 2559), 'configs.GENERATOR_CONFIGS', 'GENERATOR_CONFIGS', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (2536, 2559), False, 'from configs import GENERATOR_CONFIGS\n'), ((2811, 2868), 'torch_utils.misc.copy_params_and_buffers', 'copy_params_and_buffers', (['old_G', 'self.G'], {'require_all': '(False)'}), '(old_G, self.G, require_all=False)\n', (2834, 2868), False, 'from torch_utils.misc import copy_params_and_buffers\n'), ((5665, 5730), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset, batch_size=1, shuffle=False)\n', (5692, 5730), False, 'import torch\n'), ((5752, 5796), 'pivot_tuning_inversion.training.coaches.multi_id_coach.MultiIDCoach', 'MultiIDCoach', (['dataloader'], {'device': 'self.device'}), '(dataloader, device=self.device)\n', (5764, 5796), False, 'from pivot_tuning_inversion.training.coaches.multi_id_coach import MultiIDCoach\n'), ((5939, 5957), 'torch.cat', 'torch.cat', (['latents'], {}), '(latents)\n', (5948, 5957), False, 'import torch\n'), ((6711, 6776), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset, batch_size=1, shuffle=False)\n', (6738, 6776), False, 'import torch\n'), ((6793, 6871), 'pivot_tuning_inversion.training.coaches.multi_id_coach.MultiIDCoach', 'MultiIDCoach', (['dataloader'], {'device': 'self.device', 'generator': 'self.G', 'mode': 'self.mode'}), '(dataloader, device=self.device, generator=self.G, mode=self.mode)\n', (6805, 6871), False, 'from pivot_tuning_inversion.training.coaches.multi_id_coach import MultiIDCoach\n'), ((848, 859), 'time.time', 'time.time', ([], {}), '()\n', (857, 859), False, 'import time\n'), ((1266, 1323), 'dlib_utils.face_alignment.image_align', 'image_align', (['self.tmp_src', 'self.tmp_align', 'face_landmarks'], {}), '(self.tmp_src, self.tmp_align, face_landmarks)\n', (1277, 1323), False, 'from dlib_utils.face_alignment import image_align\n'), ((1727, 1757), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['self.url'], {}), '(self.url)\n', (1747, 1757), False, 'import dnnlib\n'), ((2328, 2354), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['ckpt'], {}), '(ckpt)\n', (2348, 2354), False, 'import dnnlib\n'), ((5537, 5558), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5556, 5558), False, 'from torchvision.transforms import transforms\n'), ((5576, 5630), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['[0.5, 0.5, 0.5]', '[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n', (5596, 5630), False, 'from torchvision.transforms import transforms\n'), ((6553, 6574), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6572, 6574), False, 'from torchvision.transforms import transforms\n'), ((6592, 6646), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['[0.5, 0.5, 0.5]', '[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n', (6612, 6646), False, 'from torchvision.transforms import transforms\n'), ((1790, 1807), 'torch.jit.load', 'torch.jit.load', (['f'], {}), '(f)\n', (1804, 1807), False, 'import torch\n'), ((2381, 2407), 'legacy.load_network_pkl', 'legacy.load_network_pkl', (['f'], {}), '(f)\n', (2404, 2407), False, 'import legacy\n'), ((2688, 2762), 'dnnlib.util.construct_class_by_name', 'dnnlib.util.construct_class_by_name', ([], {}), '(**self.G_kwargs, **self.common_kwargs)\n', (2723, 2762), False, 'import dnnlib\n'), ((3053, 3072), 'numpy.log2', 'np.log2', (['resolution'], {}), '(resolution)\n', (3060, 3072), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import os
def singlewords():
name = "binary.png"
img = cv2.imread(name)
shape = img.shape
print ("shape: ", shape)
#new_img = img.copy()
position_array = np.zeros((shape))
count = 1
for x in range(0, shape[0]):
for y in range(0, shape[1]):
if (img[x, y, 0] == 0):
position_array[x, y, 0] = count
count += 1
#### test
for y in range(0, shape[1]):
for x in range(0, shape[0]):
if (position_array[x, y, 0] != 0):
tmp_list = []
if (y+1 <= shape[1]-1):
if (position_array[x, y+1, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x, y+1, 0])
if (x+1 <= shape[0]-1):
if (position_array[x+1, y, 0] != 0):
tmp_list.append(position_array[x+1, y, 0])
if (position_array[x+1, y+1, 0] != 0):
tmp_list.append(position_array[x+1, y+1, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = \
position_array[x+1, y, 0] = position_array[x+1, y+1, 0] = min_index
else:
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = \
position_array[x+1, y, 0] = min_index
else:
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = min_index
else:
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = min_index
if (x+1 <= shape[0]-1):
tmp_list = []
if (position_array[x+1, y, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x+1, y, 0])
if (y+1 <= shape[1]-1):
if (position_array[x, y+1, 0] != 0):
tmp_list.append(position_array[x, y+1, 0])
if (position_array[x+1, y+1, 0] != 0):
tmp_list.append(position_array[x+1, y+1, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = \
position_array[x+1, y, 0] = position_array[x+1, y+1, 0] = min_index
else:
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x+1, y, 0] = \
position_array[x, y+1, 0] = min_index
else:
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x+1, y, 0] = min_index
else:
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x+1, y, 0] = min_index
#### test
for y in range(0, shape[1]):
for x in range(0, shape[0]):
if (position_array[x, y, 0] != 0):
tmp_list = []
if (y+1 <= shape[1]-1):
if (position_array[x, y+1, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x, y+1, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = min_index
if (x+1 <= shape[0]-1):
tmp_list = []
if (position_array[x+1, y, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x+1, y, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x+1, y, 0] = min_index
for x in range(0, shape[0]):
for y in range(0, shape[1]):
if (position_array[x, y, 0] != 0):
tmp_list = []
if (x+1 <= shape[0]-1):
if (position_array[x+1, y, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x+1, y, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x+1, y, 0] = min_index
if (y+1 <= shape[1]-1):
tmp_list = []
if (position_array[x, y+1, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x, y+1, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y+1, 0] = min_index
for y in range(0, shape[1]):
for x in range(0, shape[0]):
if (position_array[x, y, 0] != 0):
tmp_list = []
if (y-1 >= 0):
if (position_array[x, y-1, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x, y-1, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y-1, 0] = min_index
if (x-1 >= 0):
tmp_list = []
if (position_array[x-1, y, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x-1, y, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x-1, y, 0] = min_index
for x in range(0, shape[0]):
for y in range(0, shape[1]):
if (position_array[x, y, 0] != 0):
tmp_list = []
if (x-1 >= 0):
if (position_array[x-1, y, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x-1, y, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x-1, y, 0] = min_index
if (y-1 >= 0):
tmp_list = []
if (position_array[x, y-1, 0] != 0):
tmp_list.append(position_array[x, y, 0])
tmp_list.append(position_array[x, y-1, 0])
min_index = min(tmp_list)
position_array[x, y, 0] = position_array[x, y-1, 0] = min_index
total_list = []
count_list = []
for y in range(0, shape[1]):
for x in range(0, shape[0]):
if (position_array[x, y, 0] != 0):
tmp_index = position_array[x, y, 0]
if (tmp_index not in total_list):
total_list.append(tmp_index)
count_list.append(1)
else:
list_index = total_list.index(tmp_index)
count_list[list_index] += 1
##### test
for i in range(0, len(total_list)):
if (count_list[i] <= 1):
for x in range(0, shape[0]):
for y in range(0, shape[1]):
if (position_array[x, y, 0] == total_list[i]):
tmp_list = []
tmp_list.append(position_array[x+1, y, 0])
tmp_list.append(position_array[x, y+1, 0])
min_index = min(tmp_list)
if (min_index == 0):
tmp_list.remove(0)
min_index = min(tmp_list)
position_array[x, y, 0] = min_index
##### test
total_list = []
for x in range(0, shape[0]):
for y in range(0, shape[1]):
tmp_index = position_array[x, y, 0]
if (tmp_index not in total_list):
total_list.append(tmp_index)
total_list.remove(0)
print ("total list: ", total_list)
print ("len of total list: ", len(total_list))
count = 0
total_img = np.zeros((shape))
for x in range(0, shape[0]):
for y in range(0, shape[1]):
for j in range(0, 3):
total_img[x, y, j] = 255
for i in range(0, len(total_list)):
new_img = np.zeros((shape))
for x in range(0, shape[0]):
for y in range(0, shape[1]):
if (position_array[x, y, 0] == total_list[i]):
new_img[x, y, 2] = 255
total_img[x, y, 0] = total_img[x, y, 1] = total_img[x, y, 2] = int(count)*5
#print ("pixel: ", int(count)*5)
#else:
# total_img[x, y, 0] = total_img[x, y, 1] = total_img[x, y, 2] = 255
savename = "./words/" + str(count) + ".png"
cv2.imwrite(savename, new_img)
count += 1
savename = "total.png"
cv2.imwrite(savename, total_img)
def sortwords():
sort_list = []
name_dict={}
for filename in os.listdir("./words/"):
#print (filename)
name = "./words/" + filename
#print (name)
tmp_total = []
tmp_list = []
tmp_x = []
tmp_y = []
img = cv2.imread(name)
shape = img.shape
for x in range(0, shape[0]):
for y in range(0, shape[1]):
if (img[x, y, 2] == 255):
total_pix = int(x) + int(y)
tmp_list.append(total_pix)
tmp_x.append(x)
tmp_y.append(y)
#print ("tmp list: ", tmp_list)
min_total = min(tmp_list)
min_index = tmp_list.index(min_total)
min_x = tmp_x[min_index]
min_y = tmp_y[min_index]
#print (min_total, min_x, min_y)
tmp_total.append(min_total)
tmp_total.append(min_x)
tmp_total.append(min_y)
sort_list.append(tmp_total)
name_dict[name] = tmp_total
#print ("items: ", name_dict.values()[1][1])
"""
average_x = 0
average_y = 0
for i in range(0, len(name_dict.values())):
average_x += name_dict.values()[i][1]
average_y += name_dict.values()[i][2]
average_x = average_x / len(name_dict.values())
average_y = average_y / len(name_dict.values())
print ("average x: ", average_x)
print ("average y: ", average_y)
"""
#sort_name_dict = sorted(name_dict.items(), key=lambda d: d[1])
first_row_dict = {}
second_row_dict = {}
for i in range(0, len(name_dict.items())):
if (name_dict.values()[i][1] < int(shape[0]/2)):
first_row_dict[name_dict.keys()[i]] = name_dict.items()[i]
else:
second_row_dict[name_dict.keys()[i]] = name_dict.items()[i]
sort_first_row = sorted(first_row_dict.values(), key=lambda d: d[1])
sort_second_row = sorted(second_row_dict.values(), key=lambda d: d[1])
path = "./sort_painting"
try:
if os.path.exists('./sort_painting'):
shutil.rmtree(path)
os.mkdir(path)
except:
print ("dir exist")
for i in range(0, len(sort_first_row)):
filename = sort_first_row[i][0]
img = cv2.imread(filename)
savename = "./sort_painting/" + str(i) + ".png"
cv2.imwrite(savename, img)
count = len(sort_first_row)
for j in range(0, len(sort_second_row)):
filename = sort_second_row[j][0]
img = cv2.imread(filename)
savename = "./sort_painting/" + str(j+count) + ".png"
cv2.imwrite(savename, img)
if __name__ == "__main__":
singlewords()
sortwords()
print ("finished")
| [
"cv2.imwrite",
"os.path.exists",
"os.listdir",
"numpy.zeros",
"os.mkdir",
"cv2.imread"
] | [((96, 112), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (106, 112), False, 'import cv2\n'), ((211, 226), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (219, 226), True, 'import numpy as np\n'), ((8769, 8784), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (8777, 8784), True, 'import numpy as np\n'), ((9605, 9637), 'cv2.imwrite', 'cv2.imwrite', (['savename', 'total_img'], {}), '(savename, total_img)\n', (9616, 9637), False, 'import cv2\n'), ((9713, 9735), 'os.listdir', 'os.listdir', (['"""./words/"""'], {}), "('./words/')\n", (9723, 9735), False, 'import os\n'), ((9002, 9017), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (9010, 9017), True, 'import numpy as np\n'), ((9524, 9554), 'cv2.imwrite', 'cv2.imwrite', (['savename', 'new_img'], {}), '(savename, new_img)\n', (9535, 9554), False, 'import cv2\n'), ((9919, 9935), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (9929, 9935), False, 'import cv2\n'), ((11677, 11710), 'os.path.exists', 'os.path.exists', (['"""./sort_painting"""'], {}), "('./sort_painting')\n", (11691, 11710), False, 'import os\n'), ((11752, 11766), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (11760, 11766), False, 'import os\n'), ((11910, 11930), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (11920, 11930), False, 'import cv2\n'), ((11995, 12021), 'cv2.imwrite', 'cv2.imwrite', (['savename', 'img'], {}), '(savename, img)\n', (12006, 12021), False, 'import cv2\n'), ((12155, 12175), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (12165, 12175), False, 'import cv2\n'), ((12246, 12272), 'cv2.imwrite', 'cv2.imwrite', (['savename', 'img'], {}), '(savename, img)\n', (12257, 12272), False, 'import cv2\n')] |
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from .. import evaluator, metrics
from ..configuration import *
from .doc2vec_train_doc_prediction import doc2vec_prediction_model
from .doc2vec_train_doc_prediction import DocPredictionDataset
class DocPredictionEval(evaluator.Evaluator):
def __init__(self, dataset, log_dir=DIR_D2V_DOC_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(DocPredictionEval, self).__init__(checkpoints_dir=log_dir,
output_path=os.path.join(log_dir,
dataset.type),
dataset=dataset,
singular_monitored_session_config=config,
infinite_loop=True)
self.best_loss = -1
def model(self, input_vectors, input_gene, input_variation, output_label, batch_size,
embedding_size=EMBEDDINGS_SIZE,
output_classes=9):
logits, targets = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
output_label, batch_size,
is_training=False, embedding_size=embedding_size,
output_classes=output_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets, logits=logits)
self.global_step = training_util.get_or_create_global_step()
global_step_increase = tf.assign_add(self.global_step, 1)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, tf.reduce_sum(loss))
self.prediction = tf.nn.softmax(logits)
self.metrics = metrics.single_label(self.prediction, targets, moving_average=False)
steps = tf.cast(global_step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.accumulated_loss / (steps * batch_size))
return None
def create_graph(self, dataset_tensor, batch_size):
input_vectors, input_gene, input_variation, output_label = dataset_tensor
self.batch_size = batch_size
return self.model(input_vectors, input_gene, input_variation, output_label, batch_size)
def step(self, session, graph_data, summary_op):
self.num_steps, self.final_metrics, self.final_loss, summary = \
session.run([self.global_step, self.metrics, self.accumulated_loss, summary_op])
return summary
def after_create_session(self, session, coord):
super(DocPredictionEval, self).after_create_session(session, coord)
def end(self, session):
super(DocPredictionEval, self).end(session)
cm = self.final_metrics['confusion_matrix']
data_size = self.num_steps * self.batch_size
loss = self.final_loss / data_size
print('Loss: {}'.format(loss))
print('Confusion matrix:')
for r in cm:
print('\t'.join([str(x) for x in r]))
if self.best_loss < 0 or loss < self.best_loss:
self.best_loss = loss
self.copy_checkpoint_as_best()
class DocPredictionInference(evaluator.Evaluator):
def __init__(self, dataset, log_dir=DIR_D2V_DOC_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(DocPredictionInference, self).__init__(checkpoints_dir=log_dir,
output_path=os.path.join(log_dir,
dataset.type),
dataset=dataset,
singular_monitored_session_config=config,
infinite_loop=False)
def model(self, input_vectors, input_gene, input_variation, batch_size,
embedding_size=EMBEDDINGS_SIZE, output_classes=9):
self.global_step = training_util.get_or_create_global_step()
logits, _ = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
None, batch_size,
is_training=False, embedding_size=embedding_size,
output_classes=output_classes)
global_step_increase = tf.assign_add(self.global_step, 1)
with tf.control_dependencies([global_step_increase]):
self.prediction = tf.nn.softmax(logits)
return self.prediction
def end(self, session):
pass
def create_graph(self, dataset_tensor, batch_size):
input_vectors, input_gene, input_variation, _ = dataset_tensor
return self.model(input_vectors, input_gene, input_variation, batch_size)
def after_create_session(self, session, coord):
super(DocPredictionInference, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.prediction])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
if __name__ == '__main__':
import logging
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) > 1 and sys.argv[1] == 'val':
# get validation error
evaluator = DocPredictionEval(dataset=DocPredictionDataset(type='val'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR))
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'test':
# get validation error
evaluator = DocPredictionInference(dataset=DocPredictionDataset(type='stage2_test'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR, 'val'))
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'train':
# get validation error
evaluator = DocPredictionEval(dataset=DocPredictionDataset(type='train'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR))
evaluator.run()
| [
"logging.getLogger",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.Variable",
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.assign_add",
"tensorflow.nn.softmax",
"tensorflo... | [((427, 443), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (441, 443), True, 'import tensorflow as tf\n'), ((1526, 1596), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'targets', 'logits': 'logits'}), '(labels=targets, logits=logits)\n', (1565, 1596), True, 'import tensorflow as tf\n'), ((1624, 1665), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (1663, 1665), False, 'from tensorflow.python.training import training_util\n'), ((1697, 1731), 'tensorflow.assign_add', 'tf.assign_add', (['self.global_step', '(1)'], {}), '(self.global_step, 1)\n', (1710, 1731), True, 'import tensorflow as tf\n'), ((1764, 1840), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'dtype': 'tf.float32', 'name': '"""accumulated_loss"""', 'trainable': '(False)'}), "(0.0, dtype=tf.float32, name='accumulated_loss', trainable=False)\n", (1775, 1840), True, 'import tensorflow as tf\n'), ((2001, 2022), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2014, 2022), True, 'import tensorflow as tf\n'), ((2131, 2178), 'tensorflow.cast', 'tf.cast', (['global_step_increase'], {'dtype': 'tf.float32'}), '(global_step_increase, dtype=tf.float32)\n', (2138, 2178), True, 'import tensorflow as tf\n'), ((2187, 2258), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', '(self.accumulated_loss / (steps * batch_size))'], {}), "('loss', self.accumulated_loss / (steps * batch_size))\n", (2204, 2258), True, 'import tensorflow as tf\n'), ((3561, 3577), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3575, 3577), True, 'import tensorflow as tf\n'), ((4291, 4332), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (4330, 4332), False, 'from tensorflow.python.training import training_util\n'), ((4688, 4722), 'tensorflow.assign_add', 'tf.assign_add', (['self.global_step', '(1)'], {}), '(self.global_step, 1)\n', (4701, 4722), True, 'import tensorflow as tf\n'), ((5603, 5622), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (5609, 5622), True, 'import numpy as np\n'), ((1954, 1973), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (1967, 1973), True, 'import tensorflow as tf\n'), ((4736, 4783), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[global_step_increase]'], {}), '([global_step_increase])\n', (4759, 4783), True, 'import tensorflow as tf\n'), ((4815, 4836), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (4828, 4836), True, 'import tensorflow as tf\n'), ((5838, 5857), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5855, 5857), False, 'import logging\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 11 14:33:43 2017
@author: Lorna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 07:38:39 2017
@author: Lorna
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
"""
Data structure creation and initialisation
"""
# Global parameters
T_final=3600 # time duration
N_t=500 # number of time steps: the time step value dt is computed later
X_final=5000 # road length
N_x=100 # number of space steps: the space step value dx is computed later
rho0=0.2 #jam density in Greenshield flux function(number of vehicles/m)
v0=15 #free flow velocity(m/s)
# structures for visualization and computation of time/space steps
T,dt=np.linspace(0,T_final,num=N_t,endpoint=True,retstep=True)
X,dx=np.linspace(0,X_final,num=N_x,endpoint=True,retstep=True)
#set the range and sample number of time(T)–[0,3600]seconds in every 7.2 seconds
#set the range and sample number of distance(X)–[0,5000]meters in every 50 meters
print("dx = ",dx," dt = ",dt)
# structure for simulation: density as a function of space and time
rho = np.zeros((N_x,N_t))
# initialization of the density at time 0 with a continuous function
for x in range(int(N_x/2)):
rho[x][0] = rho0/3+rho0*x/N_x/3 # from rho0/3 to rho0/2
for x in range(int(N_x/2),N_x):
# rho[x][0] = rho0/3+rho0*x/N_x/3 # from rho0/2 to rho0*2/3
rho[x][0] = rho0/3+rho0*x/N_x/3
print("t = 0")
plt.plot(X,rho[:,0])
plt.show()#plot the density in the range of x at t=0
"""
Start the main simulation loop
Note that the naive Euler integration scheme is ALWAYS numerically unstable
Learn about Von Neumann stability analysis
And use the simple (stable) Lax scheme
But stability needs the Courant Friedrichs Levy condition to be verified
Trick: if unstable, decrease value of dt
"""
for t in range(N_t-1): # at timestep t, compute rho at t+1
for x in range(1,N_x-1):
#calculate density at t+1 on i based on density at t on i and i+1
dr = v0*dt/dx*(2*rho[x][t]/rho0-1)*(rho[x+1][t]-rho[x][t])
r = rho[x][t] + dr #this is Lax Scheme
rho[x][t+1] = r
# for x==N_x, we take the derivative backward
x = 0
dr = v0*dt/dx*(2*rho[x][t]/rho0-1)*(rho[x+1][t]-rho[x][t])
r = (rho[x][t]+rho[x+1][t])/2 + dr
rho[x][t+1] = r
x = N_x-1
dr = v0*dt/dx*(2*rho[x][t]/rho0-1)*(rho[x][t]-rho[x-1][t])
r = (rho[x][t]+rho[x-1][t])/2 + dr
rho[x][t+1] = r
if((t+1)%int(N_t/5)==int(N_t/5)-1):
print("t = ",7.2*t)
plt.plot(X,rho[:,t])
plt.show()#plot the density in the range of x at t=705.6,1425.6,2145.6,2865.6,3585.6
X,T = np.meshgrid(X,T)
Z = rho.reshape(X.shape)
fig=plt.figure()
ax=Axes3D(fig)
ax.plot_surface(X,T,Z, rstride=1, cstride=1, cmap='rainbow')
plt.show()#plot the 3d figure
| [
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show"
] | [((760, 821), 'numpy.linspace', 'np.linspace', (['(0)', 'T_final'], {'num': 'N_t', 'endpoint': '(True)', 'retstep': '(True)'}), '(0, T_final, num=N_t, endpoint=True, retstep=True)\n', (771, 821), True, 'import numpy as np\n'), ((823, 884), 'numpy.linspace', 'np.linspace', (['(0)', 'X_final'], {'num': 'N_x', 'endpoint': '(True)', 'retstep': '(True)'}), '(0, X_final, num=N_x, endpoint=True, retstep=True)\n', (834, 884), True, 'import numpy as np\n'), ((1150, 1170), 'numpy.zeros', 'np.zeros', (['(N_x, N_t)'], {}), '((N_x, N_t))\n', (1158, 1170), True, 'import numpy as np\n'), ((1474, 1496), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'rho[:, 0]'], {}), '(X, rho[:, 0])\n', (1482, 1496), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1503, 1505), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2705), 'numpy.meshgrid', 'np.meshgrid', (['X', 'T'], {}), '(X, T)\n', (2699, 2705), True, 'import numpy as np\n'), ((2735, 2747), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2745, 2747), True, 'import matplotlib.pyplot as plt\n'), ((2751, 2762), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (2757, 2762), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2824, 2834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2832, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2581), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'rho[:, t]'], {}), '(X, rho[:, t])\n', (2567, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2588, 2598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2596, 2598), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmaction.utils import PreciseBNHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1.0], dtype=torch.float32))
return results
def __len__(self):
return 1
class BiggerDataset(ExampleDataset):
def __init__(self, fixed_values=range(0, 12)):
assert len(self) == len(fixed_values)
self.fixed_values = fixed_values
def __getitem__(self, idx):
results = dict(
imgs=torch.tensor([self.fixed_values[idx]], dtype=torch.float32))
return results
def __len__(self):
# a bigger dataset
return 12
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.bn = nn.BatchNorm1d(1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.bn(self.conv(imgs))
@staticmethod
def train_step(data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
class SingleBNModel(ExampleModel):
def __init__(self):
super().__init__()
self.bn = nn.BatchNorm1d(1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.bn(imgs)
class GNExampleModel(ExampleModel):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.bn = nn.GroupNorm(1, 1)
self.test_cfg = None
class NoBNExampleModel(ExampleModel):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.conv(imgs)
def test_precise_bn():
with pytest.raises(TypeError):
# `data_loader` must be a Pytorch DataLoader
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=2,
sampler=None,
num_workers=0,
shuffle=False)
PreciseBNHook('data_loader')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=2)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=2)
precise_bn_loader = copy.deepcopy(data_loader)
logger = get_logger('precise_bn')
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
with pytest.raises(AssertionError):
# num_iters should be no larget than total
# iters
precise_bn_hook = PreciseBNHook(precise_bn_loader, num_iters=5)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test non-DDP model
test_bigger_dataset = BiggerDataset()
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test model w/ gn layer
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = GNExampleModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test model without bn layer
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = NoBNExampleModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test how precise it is
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=6) # run all
assert precise_bn_hook.num_iters == 6
assert precise_bn_hook.interval == 1
model = SingleBNModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
imgs_list = list()
for _, data in enumerate(loader):
imgs_list.append(np.array(data['imgs']))
mean = np.mean([np.mean(batch) for batch in imgs_list])
# bassel correction used in Pytorch, therefore ddof=1
var = np.mean([np.var(batch, ddof=1) for batch in imgs_list])
assert np.equal(mean, np.array(model.bn.running_mean))
assert np.equal(var, np.array(model.bn.running_var))
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_ddp_model_precise_bn():
# test DDP model
test_bigger_dataset = BiggerDataset()
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = ExampleModel()
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=True)
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
| [
"mmcv.utils.get_logger",
"torch.nn.GroupNorm",
"numpy.mean",
"mmcv.runner.build_optimizer",
"torch.utils.data.DataLoader",
"mmaction.utils.PreciseBNHook",
"mmcv.runner.EpochBasedRunner",
"torch.nn.BatchNorm1d",
"numpy.array",
"torch.cuda.is_available",
"pytest.raises",
"torch.tensor",
"torch... | [((2701, 2739), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(2)'}), '(test_dataset, batch_size=2)\n', (2711, 2739), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2783, 2820), 'mmcv.runner.build_optimizer', 'build_optimizer', (['model', 'optimizer_cfg'], {}), '(model, optimizer_cfg)\n', (2798, 2820), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((2840, 2878), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(2)'}), '(test_dataset, batch_size=2)\n', (2850, 2878), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2903, 2929), 'copy.deepcopy', 'copy.deepcopy', (['data_loader'], {}), '(data_loader)\n', (2916, 2929), False, 'import copy\n'), ((2943, 2967), 'mmcv.utils.get_logger', 'get_logger', (['"""precise_bn"""'], {}), "('precise_bn')\n", (2953, 2967), False, 'from mmcv.utils import get_logger\n'), ((2981, 3072), 'mmcv.runner.EpochBasedRunner', 'EpochBasedRunner', ([], {'model': 'model', 'batch_processor': 'None', 'optimizer': 'optimizer', 'logger': 'logger'}), '(model=model, batch_processor=None, optimizer=optimizer,\n logger=logger)\n', (2997, 3072), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((3433, 3478), 'torch.utils.data.DataLoader', 'DataLoader', (['test_bigger_dataset'], {'batch_size': '(2)'}), '(test_bigger_dataset, batch_size=2)\n', (3443, 3478), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((3501, 3535), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['loader'], {'num_iters': '(5)'}), '(loader, num_iters=5)\n', (3514, 3535), False, 'from mmaction.utils import PreciseBNHook\n'), ((3632, 3723), 'mmcv.runner.EpochBasedRunner', 'EpochBasedRunner', ([], {'model': 'model', 'batch_processor': 'None', 'optimizer': 'optimizer', 'logger': 'logger'}), '(model=model, batch_processor=None, optimizer=optimizer,\n logger=logger)\n', (3648, 3723), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((3858, 3903), 'torch.utils.data.DataLoader', 'DataLoader', (['test_bigger_dataset'], {'batch_size': '(2)'}), '(test_bigger_dataset, batch_size=2)\n', (3868, 3903), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((3926, 3960), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['loader'], {'num_iters': '(5)'}), '(loader, num_iters=5)\n', (3939, 3960), False, 'from mmaction.utils import PreciseBNHook\n'), ((4086, 4177), 'mmcv.runner.EpochBasedRunner', 'EpochBasedRunner', ([], {'model': 'model', 'batch_processor': 'None', 'optimizer': 'optimizer', 'logger': 'logger'}), '(model=model, batch_processor=None, optimizer=optimizer,\n logger=logger)\n', (4102, 4177), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((4317, 4362), 'torch.utils.data.DataLoader', 'DataLoader', (['test_bigger_dataset'], {'batch_size': '(2)'}), '(test_bigger_dataset, batch_size=2)\n', (4327, 4362), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((4385, 4419), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['loader'], {'num_iters': '(5)'}), '(loader, num_iters=5)\n', (4398, 4419), False, 'from mmaction.utils import PreciseBNHook\n'), ((4547, 4638), 'mmcv.runner.EpochBasedRunner', 'EpochBasedRunner', ([], {'model': 'model', 'batch_processor': 'None', 'optimizer': 'optimizer', 'logger': 'logger'}), '(model=model, batch_processor=None, optimizer=optimizer,\n logger=logger)\n', (4563, 4638), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((4773, 4818), 'torch.utils.data.DataLoader', 'DataLoader', (['test_bigger_dataset'], {'batch_size': '(2)'}), '(test_bigger_dataset, batch_size=2)\n', (4783, 4818), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((4841, 4875), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['loader'], {'num_iters': '(6)'}), '(loader, num_iters=6)\n', (4854, 4875), False, 'from mmaction.utils import PreciseBNHook\n'), ((5011, 5102), 'mmcv.runner.EpochBasedRunner', 'EpochBasedRunner', ([], {'model': 'model', 'batch_processor': 'None', 'optimizer': 'optimizer', 'logger': 'logger'}), '(model=model, batch_processor=None, optimizer=optimizer,\n logger=logger)\n', (5027, 5102), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((1122, 1137), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (1131, 1137), True, 'import torch.nn as nn\n'), ((1156, 1173), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1)'], {}), '(1)\n', (1170, 1173), True, 'import torch.nn as nn\n'), ((1652, 1669), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1)'], {}), '(1)\n', (1666, 1669), True, 'import torch.nn as nn\n'), ((1887, 1902), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (1896, 1902), True, 'import torch.nn as nn\n'), ((1921, 1939), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(1)', '(1)'], {}), '(1, 1)\n', (1933, 1939), True, 'import torch.nn as nn\n'), ((2081, 2096), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (2090, 2096), True, 'import torch.nn as nn\n'), ((2240, 2264), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2253, 2264), False, 'import pytest\n'), ((2379, 2466), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(2)', 'sampler': 'None', 'num_workers': '(0)', 'shuffle': '(False)'}), '(test_dataset, batch_size=2, sampler=None, num_workers=0, shuffle\n =False)\n', (2389, 2466), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2531, 2559), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['"""data_loader"""'], {}), "('data_loader')\n", (2544, 2559), False, 'from mmaction.utils import PreciseBNHook\n'), ((3088, 3117), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3101, 3117), False, 'import pytest\n'), ((3212, 3257), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['precise_bn_loader'], {'num_iters': '(5)'}), '(precise_bn_loader, num_iters=5)\n', (3225, 3257), False, 'from mmaction.utils import PreciseBNHook\n'), ((5514, 5545), 'numpy.array', 'np.array', (['model.bn.running_mean'], {}), '(model.bn.running_mean)\n', (5522, 5545), True, 'import numpy as np\n'), ((5572, 5602), 'numpy.array', 'np.array', (['model.bn.running_var'], {}), '(model.bn.running_var)\n', (5580, 5602), True, 'import numpy as np\n'), ((5826, 5871), 'torch.utils.data.DataLoader', 'DataLoader', (['test_bigger_dataset'], {'batch_size': '(2)'}), '(test_bigger_dataset, batch_size=2)\n', (5836, 5871), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5898, 5932), 'mmaction.utils.PreciseBNHook', 'PreciseBNHook', (['loader'], {'num_iters': '(5)'}), '(loader, num_iters=5)\n', (5911, 5932), False, 'from mmaction.utils import PreciseBNHook\n'), ((6273, 6364), 'mmcv.runner.EpochBasedRunner', 'EpochBasedRunner', ([], {'model': 'model', 'batch_processor': 'None', 'optimizer': 'optimizer', 'logger': 'logger'}), '(model=model, batch_processor=None, optimizer=optimizer,\n logger=logger)\n', (6289, 6364), False, 'from mmcv.runner import EpochBasedRunner, build_optimizer\n'), ((5280, 5302), 'numpy.array', 'np.array', (["data['imgs']"], {}), "(data['imgs'])\n", (5288, 5302), True, 'import numpy as np\n'), ((5324, 5338), 'numpy.mean', 'np.mean', (['batch'], {}), '(batch)\n', (5331, 5338), True, 'import numpy as np\n'), ((5441, 5462), 'numpy.var', 'np.var', (['batch'], {'ddof': '(1)'}), '(batch, ddof=1)\n', (5447, 5462), True, 'import numpy as np\n'), ((5642, 5667), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5665, 5667), False, 'import torch\n'), ((506, 546), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'dtype': 'torch.float32'}), '([1.0], dtype=torch.float32)\n', (518, 546), False, 'import torch\n'), ((864, 923), 'torch.tensor', 'torch.tensor', (['[self.fixed_values[idx]]'], {'dtype': 'torch.float32'}), '([self.fixed_values[idx]], dtype=torch.float32)\n', (876, 923), False, 'import torch\n'), ((6148, 6175), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (6173, 6175), False, 'import torch\n')] |
import pandas as pd
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
from sklearn.metrics import accuracy_score, f1_score
graph_name = "ppi"
def build_dataframe(input_data: pd.DataFrame, col_name: str, preserve_int_col_name=False) -> pd.DataFrame:
"""
Given an input DataFrame and a column name, return a new DataFrame in which the column has been cleaned.
Used to transform features and labels columns from "0;1;1;0" to [0, 1, 1, 0]
"""
vertices_dict = []
for i, row_i in input_data.iterrows():
features = [int(float(x)) for x in row_i[f"{col_name}s"].split(";")]
new_v = {"id": i}
for j, f in enumerate(features):
new_v[j if preserve_int_col_name else f"{col_name}_{j}"] = f
vertices_dict += [new_v]
res_df = pd.DataFrame(vertices_dict)
return res_df.set_index("id")
def build_vertices():
# Read vertex features and classes in the training set;
vertices_path = f"./data/{graph_name}_train.csv"
vertices_train = pd.read_csv(vertices_path, sep=",", index_col="id")
vertices_train["dataset"] = "train"
# Read vertex features in the test/validation set;
vertices_path = f"./data/{graph_name}_test.csv"
vertices_test = pd.read_csv(vertices_path, sep=",", index_col="id")
vertices_test["dataset"] = "test"
return vertices_train,vertices_test
def build_graph():
edges_path = f"./data/{graph_name}_e.csv"
Data = open(edges_path, "r")
next(Data, None) # skip the first line in the input file
Graphtype = nx.Graph()
G = nx.parse_edgelist(Data, delimiter=',', create_using=Graphtype,
nodetype=int)
G.remove_edges_from(G.selfloop_edges()) # removing self-loops
return G
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""
Convert a scipy sparse matrix to a torch sparse tensor.
"""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def adjacency_matrix_GCN(G, theta=1):
# BUILDING ADJ MATRIX FOR GCN
A = nx.to_scipy_sparse_matrix(G)
A = A+A.T
A += theta*sp.eye(A.shape[0]) # Â = A + I
A = sp.coo_matrix(A) # sparse matrix in coordinate format
rowsum = np.array(A.sum(1)) # D
D = np.power(rowsum, -0.5).flatten() # D = D^(-1/2)
D[np.isinf(D)] = 0.
D = sp.diags(D)
A.dot(D).transpose().dot(D).tocoo()
return sparse_mx_to_torch_sparse_tensor(A)
def edge_list_SAGE():
# CREATING EDGES LIST FOR SAGE
A = pd.read_csv('./data/ppi_e.csv')
return A.values.transpose()
def hamming_accuracy(prediction, true_values):
"""
Metric used in multi-label classification,
for each example measures the % of correctly predicted labels.
Equivalent to traditional accuracy in a single-output scenario;
"""
return np.mean(np.sum(np.equal(prediction, true_values)) / float(true_values.size))
def get_score(prediction, true_values):
print("\tHamming accuracy: {:.3f}".format(hamming_accuracy(prediction, true_values)))
print("\tAccuracy, exact matches: {:.3f}".format(accuracy_score(prediction, true_values)))
print("\tMacro F1 Score: {:.3f}".format(f1_score(y_true=true_values, y_pred=prediction, average="macro")))
print("\tMicro F1 Score: {:.3f}".format(f1_score(y_true=true_values, y_pred=prediction, average="micro")))
def bool_to_int(labels: list) -> list:
"""
Turn a list of 0s and 1s into a list whose values are the indices of 1s.
Used to create a valid Kaggle submission.
E.g. [1, 0, 0, 1, 1] -> [0, 3, 4]
"""
return [i for i, x in enumerate(labels) if x == 1]
def get_results(filename,prediction,X_test_df):
y_pred = [" ".join([str(y) for y in bool_to_int(x)]) for x in prediction]
y_pred_df = pd.DataFrame(y_pred, columns=["labels"], index=X_test_df.index)
y_pred_df.to_csv(filename)
def a_third_law(labels,p):
counter = np.zeros(labels.shape[0])
for i in range(labels.shape[0]):
for j in range(labels[i].shape[0]):
if labels[i][j] == 1:
counter[j] += 1
for i in range(counter.shape[0]):
if counter[i] > labels.shape[0]/3 :
counter[i] = 1
else :
counter[i] = 0
for col in range(counter.shape[0]):
for row in range(p.shape[0]):
if (counter[col] == 1 and p[row][col] != 1) :
p[row][col] = 1
return p
def get_lmean(labels):
lmean = np.zeros((labels.shape[1],2))
for i in range(labels.shape[1]):
for j in range(labels.shape[0]):
lmean[i][0] += labels[j][i]
lmean[i][0] = lmean[i][0]
lmean[i][1] = i
return lmean
def sort_lmean(lmean):
lmean = lmean[lmean[:,0].argsort()]
lmean_f = np.zeros(lmean.shape)
for i in range(lmean.shape[0]):
for j in range(lmean.shape[1]):
lmean_f[i][j] = lmean[121-i][j]
return lmean_f | [
"sklearn.metrics.f1_score",
"scipy.sparse.diags",
"pandas.read_csv",
"scipy.sparse.eye",
"numpy.power",
"networkx.Graph",
"torch.from_numpy",
"numpy.equal",
"numpy.zeros",
"networkx.parse_edgelist",
"networkx.to_scipy_sparse_matrix",
"numpy.vstack",
"scipy.sparse.coo_matrix",
"pandas.DataF... | [((830, 857), 'pandas.DataFrame', 'pd.DataFrame', (['vertices_dict'], {}), '(vertices_dict)\n', (842, 857), True, 'import pandas as pd\n'), ((1050, 1101), 'pandas.read_csv', 'pd.read_csv', (['vertices_path'], {'sep': '""","""', 'index_col': '"""id"""'}), "(vertices_path, sep=',', index_col='id')\n", (1061, 1101), True, 'import pandas as pd\n'), ((1270, 1321), 'pandas.read_csv', 'pd.read_csv', (['vertices_path'], {'sep': '""","""', 'index_col': '"""id"""'}), "(vertices_path, sep=',', index_col='id')\n", (1281, 1321), True, 'import pandas as pd\n'), ((1579, 1589), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1587, 1589), True, 'import networkx as nx\n'), ((1598, 1674), 'networkx.parse_edgelist', 'nx.parse_edgelist', (['Data'], {'delimiter': '""","""', 'create_using': 'Graphtype', 'nodetype': 'int'}), "(Data, delimiter=',', create_using=Graphtype, nodetype=int)\n", (1615, 1674), True, 'import networkx as nx\n'), ((2070, 2102), 'torch.from_numpy', 'torch.from_numpy', (['sparse_mx.data'], {}), '(sparse_mx.data)\n', (2086, 2102), False, 'import torch\n'), ((2115, 2142), 'torch.Size', 'torch.Size', (['sparse_mx.shape'], {}), '(sparse_mx.shape)\n', (2125, 2142), False, 'import torch\n'), ((2154, 2202), 'torch.sparse.FloatTensor', 'torch.sparse.FloatTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (2178, 2202), False, 'import torch\n'), ((2286, 2314), 'networkx.to_scipy_sparse_matrix', 'nx.to_scipy_sparse_matrix', (['G'], {}), '(G)\n', (2311, 2314), True, 'import networkx as nx\n'), ((2385, 2401), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['A'], {}), '(A)\n', (2398, 2401), True, 'import scipy.sparse as sp\n'), ((2566, 2577), 'scipy.sparse.diags', 'sp.diags', (['D'], {}), '(D)\n', (2574, 2577), True, 'import scipy.sparse as sp\n'), ((2733, 2764), 'pandas.read_csv', 'pd.read_csv', (['"""./data/ppi_e.csv"""'], {}), "('./data/ppi_e.csv')\n", (2744, 2764), True, 'import pandas as pd\n'), ((4015, 4078), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {'columns': "['labels']", 'index': 'X_test_df.index'}), "(y_pred, columns=['labels'], index=X_test_df.index)\n", (4027, 4078), True, 'import pandas as pd\n'), ((4155, 4180), 'numpy.zeros', 'np.zeros', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (4163, 4180), True, 'import numpy as np\n'), ((4710, 4740), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], 2)'], {}), '((labels.shape[1], 2))\n', (4718, 4740), True, 'import numpy as np\n'), ((5018, 5039), 'numpy.zeros', 'np.zeros', (['lmean.shape'], {}), '(lmean.shape)\n', (5026, 5039), True, 'import numpy as np\n'), ((2344, 2362), 'scipy.sparse.eye', 'sp.eye', (['A.shape[0]'], {}), '(A.shape[0])\n', (2350, 2362), True, 'import scipy.sparse as sp\n'), ((2540, 2551), 'numpy.isinf', 'np.isinf', (['D'], {}), '(D)\n', (2548, 2551), True, 'import numpy as np\n'), ((2485, 2507), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (2493, 2507), True, 'import numpy as np\n'), ((3322, 3361), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['prediction', 'true_values'], {}), '(prediction, true_values)\n', (3336, 3361), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3408, 3472), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'true_values', 'y_pred': 'prediction', 'average': '"""macro"""'}), "(y_true=true_values, y_pred=prediction, average='macro')\n", (3416, 3472), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3519, 3583), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'true_values', 'y_pred': 'prediction', 'average': '"""micro"""'}), "(y_true=true_values, y_pred=prediction, average='micro')\n", (3527, 3583), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((1997, 2038), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (2006, 2038), True, 'import numpy as np\n'), ((3075, 3108), 'numpy.equal', 'np.equal', (['prediction', 'true_values'], {}), '(prediction, true_values)\n', (3083, 3108), True, 'import numpy as np\n')] |
from sys import argv
import Base
from Base import Net
from Base import Node, Board
import Base_Test
import numpy as np
import copy
import atexit
import matplotlib.pyplot as plt
import Base_Test
from netlist_parser import parse_file
def show_graph():
# b = Board(0, nets, min_cost_placement, 12, 12)
print('\n\r\n')
# print(b)
for i in range(len(min_cost_placement)):
nodes_or[i].pos = min_cost_placement[i].pos
print(nodes_or[i])
print("MIN COST %d" % min_cost)
Base_Test.svg_draw_board(min_cost_placement, nets, BOX_W, BOX_H, "BEST.svg", str(min_cost),NODE_C_=NODE_C,NET_C_=NET_C)
plt.plot(cost_list)
plt.show()
while True:
plt.pause(1)
return 0
atexit.register(show_graph)
def round_pos(pos):
r_x = round(pos[0])
r_y = round(pos[1])
if r_x < 0:
r_x = 0
if r_y < 0:
r_y = 0
return r_x, r_y
def get_xy_point(nd, connected_nodes):
if len(connected_nodes) == 0:
return nd.get_x(), nd.get_y()
x, y = (nd.get_x(), nd.get_y())
x_b, y_b = (0, 0)
for n in connected_nodes:
x_b, y_b = (x_b + abs(x - n.get_x()), y_b + abs(y - n.get_y()))
x_b, y_b = (float(x_b) / len(connected_nodes), float(y_b) / len(connected_nodes))
return x_b, y_b
def get_force_vector(nd, connected_nodes):
x, y = (nd.get_x(), nd.get_y())
Fx_b, Fy_b = (0, 0)
for n in connected_nodes:
Fx_b, Fy_b = (Fx_b + (x - n.get_x()), Fy_b + (y - n.get_y()))
return Fx_b, Fy_b
def get_max_force_node(nodes):
max_force_node = nodes[0]
for i in range(1, len(nodes)):
if nodes[i]["force_mag"] > max_force_node["force_mag"]:
max_force_node = nodes[i]
return max_force_node
def get_force_mag(coordinates):
return (coordinates[0] ** 2 + coordinates[1] ** 2) ** 0.5
nodes = []
nets = []
cost_list = []
NODE_C = 1000
NET_C = 1000
BOX_W = 100
BOX_H = 100
min_cost_placement = []
min_cost = 0
if __name__ == '__main__':
debug = False
if len(argv) < 5:
print("Usage: FD_Placement.py #node #nets width height error_margin #iterations")
NODE_C = 1500
NET_C = 2000
BOX_W = 13
BOX_H = 5
ITR_COUNT = 1000
ERR_CONSTRAIN = -10000000000000
else:
NODE_C = int(argv[1])
NET_C = int(argv[2])
BOX_W = int(argv[3])
BOX_H = int(argv[4])
ERR_CONSTRAIN = float(argv[5])
ITR_COUNT = int(argv[6])
locked_nodes = []
for i in range(NODE_C):
nodes.append(Node((0, 0), i))
for i in range(NET_C):
nets.append(Net(i))
itr = np.random.randint(3, 5)
for j in range(itr):
n_id = np.random.randint(NODE_C)
if not nets[i].has(nodes[n_id]):
nets[i].add_node(nodes[n_id])
################# END ORCAD NETLIST PARSING##################
nets_or,nodes_or = parse_file("orcadNetlist.txt")
nets = copy.deepcopy(nets_or)
nodes = copy.deepcopy(nodes_or)
i = 0
for n in nodes:
n.id = i
i += 1
n_tmp = []
for netIdOld in n.netIds:
n_tmp.append(nets_or.index(netIdOld))
n.netIds = copy.copy(n_tmp)
i=0
for n in nets:
n.id = i
i+=1
n_tmp = copy.deepcopy(n.nodeList)
n.nodeList = []
for node in n_tmp:
n.nodeList.append(nodes[nodes_or.index(node)])
NODE_C = len(nodes)
NET_C = len(nets)
################# END ORCAD NETLIST PARSING##################
c = 0
for net in nets:
c += len(net) - 1
print("LOWER BOUND:%d" % c)
Base.random_place_board(nodes, BOX_W, BOX_H)
cost = Base.get_total_cost(nets)
Base_Test.svg_draw_board(nodes, nets, BOX_W, BOX_H, svg_name="Init_FD.svg", txt=str(cost),NODE_C_=NODE_C,NET_C_=NET_C)
min_cost = cost
last_cost = cost
min_cost_placement = copy.copy(nodes)
b = Board(nets=nets, nodes=nodes, width=BOX_W, height=BOX_H)
if debug:
print(b)
forces = []
cost_list = []
for i in range(ITR_COUNT):
for node in nodes:
connected_nodes = Base.get_connected_nodes(node, nets)
forces.append({
'force_mag': get_force_mag(get_force_vector(node, connected_nodes)),
'node': node,
'zero_pos': get_xy_point(node, connected_nodes)})
while len(forces) > 0:
max_force_node = get_max_force_node(forces)
forces.remove(max_force_node)
if debug:
print("MAX_FORCE_NODE:%s : %f" % (max_force_node["node"], max_force_node["force_mag"]))
# Try Moving it..
dst_pos = Base.find_nearby_unlocked(round_pos(max_force_node["zero_pos"]), nodes, BOX_W, BOX_H)
mm = 0
for n in nodes:
if n.pos == dst_pos:
mm+=1
if mm > 1:
print("!!!!")
if dst_pos != max_force_node["node"].pos:
if dst_pos != False:
n2 = Base.find_node_at(dst_pos, nodes)
if type(n2) == type(False):
max_force_node["node"].pos = dst_pos
last_op = "Place"
else:
Base.swap(max_force_node["node"], n2)
print("SWAPPED %s %s"%(max_force_node["node"],n2))
last_op = "SWAP"
else:
if debug:
print("No place to move!")
mm = 0
for n in nodes:
if n.pos == dst_pos:
mm+=1
if mm > 1:
print("!!!!")
max_force_node["node"].locked = True
Base.unlock_all_nodes(nodes)
current_cost = Base.get_total_cost(nets)
cost_list.append(current_cost)
if current_cost < min_cost:
min_cost = current_cost
min_cost_placement = copy.deepcopy(nodes)
Base_Test.svg_draw_board(min_cost_placement, nets, BOX_W, BOX_H, "BEST_FD.svg", str(min_cost),NODE_C_=NODE_C,NET_C_=NET_C)
if len(cost_list)%10 == 0:
print("%d MIN:%d Current COST:%d\r" % (len(cost_list), min_cost, current_cost), end='\r')
if current_cost == last_cost:
break
last_cost = current_cost
if 100*float(min_cost - c)/c < ERR_CONSTRAIN:
break
if min_cost == c:
break
#print("%d MIN:%d Current COST:%d" % (len(cost_list),min_cost,current_cost))
for i in range(len(min_cost_placement)):
nodes_or[i].pos = min_cost_placement[i].pos
print(nodes_or[i])
print("MIN COST %d" % min_cost)
plt.plot(cost_list)
plt.show()
while True:
plt.pause(1)
| [
"Base.Net",
"Base.unlock_all_nodes",
"Base.get_total_cost",
"Base.Node",
"netlist_parser.parse_file",
"matplotlib.pyplot.plot",
"Base.random_place_board",
"Base.get_connected_nodes",
"Base.find_node_at",
"numpy.random.randint",
"Base.swap",
"copy.deepcopy",
"matplotlib.pyplot.pause",
"copy... | [((744, 771), 'atexit.register', 'atexit.register', (['show_graph'], {}), '(show_graph)\n', (759, 771), False, 'import atexit\n'), ((652, 671), 'matplotlib.pyplot.plot', 'plt.plot', (['cost_list'], {}), '(cost_list)\n', (660, 671), True, 'import matplotlib.pyplot as plt\n'), ((677, 687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3041), 'netlist_parser.parse_file', 'parse_file', (['"""orcadNetlist.txt"""'], {}), "('orcadNetlist.txt')\n", (3021, 3041), False, 'from netlist_parser import parse_file\n'), ((3054, 3076), 'copy.deepcopy', 'copy.deepcopy', (['nets_or'], {}), '(nets_or)\n', (3067, 3076), False, 'import copy\n'), ((3090, 3113), 'copy.deepcopy', 'copy.deepcopy', (['nodes_or'], {}), '(nodes_or)\n', (3103, 3113), False, 'import copy\n'), ((3759, 3803), 'Base.random_place_board', 'Base.random_place_board', (['nodes', 'BOX_W', 'BOX_H'], {}), '(nodes, BOX_W, BOX_H)\n', (3782, 3803), False, 'import Base\n'), ((3816, 3841), 'Base.get_total_cost', 'Base.get_total_cost', (['nets'], {}), '(nets)\n', (3835, 3841), False, 'import Base\n'), ((4035, 4051), 'copy.copy', 'copy.copy', (['nodes'], {}), '(nodes)\n', (4044, 4051), False, 'import copy\n'), ((4061, 4117), 'Base.Board', 'Board', ([], {'nets': 'nets', 'nodes': 'nodes', 'width': 'BOX_W', 'height': 'BOX_H'}), '(nets=nets, nodes=nodes, width=BOX_W, height=BOX_H)\n', (4066, 4117), False, 'from Base import Node, Board\n'), ((6987, 7006), 'matplotlib.pyplot.plot', 'plt.plot', (['cost_list'], {}), '(cost_list)\n', (6995, 7006), True, 'import matplotlib.pyplot as plt\n'), ((7012, 7022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7020, 7022), True, 'import matplotlib.pyplot as plt\n'), ((714, 726), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (723, 726), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2750), 'numpy.random.randint', 'np.random.randint', (['(3)', '(5)'], {}), '(3, 5)\n', (2744, 2750), True, 'import numpy as np\n'), ((3310, 3326), 'copy.copy', 'copy.copy', (['n_tmp'], {}), '(n_tmp)\n', (3319, 3326), False, 'import copy\n'), ((3407, 3432), 'copy.deepcopy', 'copy.deepcopy', (['n.nodeList'], {}), '(n.nodeList)\n', (3420, 3432), False, 'import copy\n'), ((5983, 6011), 'Base.unlock_all_nodes', 'Base.unlock_all_nodes', (['nodes'], {}), '(nodes)\n', (6004, 6011), False, 'import Base\n'), ((6036, 6061), 'Base.get_total_cost', 'Base.get_total_cost', (['nets'], {}), '(nets)\n', (6055, 6061), False, 'import Base\n'), ((7049, 7061), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (7058, 7061), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2651), 'Base.Node', 'Node', (['(0, 0)', 'i'], {}), '((0, 0), i)\n', (2640, 2651), False, 'from Base import Node, Board\n'), ((2704, 2710), 'Base.Net', 'Net', (['i'], {}), '(i)\n', (2707, 2710), False, 'from Base import Net\n'), ((2801, 2826), 'numpy.random.randint', 'np.random.randint', (['NODE_C'], {}), '(NODE_C)\n', (2818, 2826), True, 'import numpy as np\n'), ((4283, 4319), 'Base.get_connected_nodes', 'Base.get_connected_nodes', (['node', 'nets'], {}), '(node, nets)\n', (4307, 4319), False, 'import Base\n'), ((6210, 6230), 'copy.deepcopy', 'copy.deepcopy', (['nodes'], {}), '(nodes)\n', (6223, 6230), False, 'import copy\n'), ((5243, 5276), 'Base.find_node_at', 'Base.find_node_at', (['dst_pos', 'nodes'], {}), '(dst_pos, nodes)\n', (5260, 5276), False, 'import Base\n'), ((5483, 5520), 'Base.swap', 'Base.swap', (["max_force_node['node']", 'n2'], {}), "(max_force_node['node'], n2)\n", (5492, 5520), False, 'import Base\n')] |
"""Бивектор углового и линейного параметра"""
import numpy
import math
import zencad.util
class screw:
"""Геометрический винт.
Состоит из угловой и линейной части."""
__slots__ = ['ang', 'lin']
def __init__(self, ang=(0, 0, 0), lin=(0, 0, 0)):
self.ang = zencad.util.vector3(ang)
self.lin = zencad.util.vector3(lin)
def copy(self):
return screw(ang=self.ang, lin=self.lin)
def __add__(self, oth):
return screw(self.ang + oth.ang, self.lin + oth.lin)
def __sub__(self, oth):
return screw(self.ang - oth.ang, self.lin - oth.lin)
def __mul__(self, oth):
return screw(self.ang * oth, self.lin * oth)
def elementwise_mul(self, oth):
# return screw((self.ang * oth.ang), self.lin * oth.lin)
r = self.to_array() * oth.to_array()
return screw.from_array(r)
def __neg__(self):
return screw(-self.ang, -self.lin)
def scale(self, oth):
return screw(self.ang * oth, self.lin * oth)
def __iadd__(self, oth):
self.ang += oth.ang
self.lin += oth.lin
return self
def carry(self, arm):
"""Перенос бивектора в другую точку приложения. НА ВЕКТОР ПЛЕЧА!!! Не путать с радиус вектор.
Вектор переноса обратен радиус вектору силы.
Detail
------
Формула TODO
"""
return screw(
ang=self.ang - arm.cross(self.lin),
lin=self.lin)
def kinematic_carry(self, arm):
return screw(
lin=self.lin + self.ang.cross(arm),
ang=self.ang)
# def inverse_kinematic_carry(self, arm):
# return screw(
# lin = self.lin - self.ang.cross(-arm),
# ang = self.ang )
def angular_carry(self, arm):
return self.kinematic_carry(arm)
def force_carry(self, arm):
return self.carry(arm)
def dot(self, oth):
l = (self.lin[0]*oth.lin[0]+self.lin[1]*oth.lin[1]+self.lin[2]*oth.lin[2] +
self.ang[0]*oth.ang[0]+self.ang[1]*oth.ang[1]+self.ang[2]*oth.ang[2])
# if l == 0: return 0
return l # math.sqrt(l)
# def to_array(self):
"""Массив имеет обратный принятому в screw порядку"""
# return numpy.array([*self.lin, *self.ang])
@staticmethod
def from_trans(trans):
lin = trans.translation()
ang = trans.rotation().rotation_vector()
return screw(lin=lin, ang=ang)
def to_trans(self):
trans0 = zencad.translate(*self.lin)
rot_mul = self.ang.length()
if rot_mul == 0:
return trans0
else:
rot_dim = self.ang.normalize()
trans1 = zencad.rotate(rot_dim, rot_mul)
return trans0 * trans1
def npvec_lin_first(self):
return numpy.array([self.lin.x, self.lin.y, self.lin.z, self.ang.x, self.ang.y, self.ang.z])
@staticmethod
def from_array(a):
return screw(ang=(a[3], a[4], a[5]), lin=(a[0], a[1], a[2]))
def __str__(self):
return "(a:({},{},{}),l:({},{},{}))".format(*self.ang, *self.lin)
def __repr__(self):
return "screw(a:({},{},{}),l:({},{},{}))".format(*self.ang, *self.lin)
def inverse_rotate_by(self, trans):
q = trans.rotation().inverse()
return screw(ang=q.rotate(self.ang), lin=q.rotate(self.lin))
def rotate_by(self, trans):
return screw(ang=trans(self.ang), lin=trans(self.lin))
def rotate_by_quat(self, q):
return screw(ang=q.rotate(self.ang), lin=q.rotate(self.lin))
def screw_of_vector(vec, arm):
return screw(lin=vec, ang=arm.cross(vec))
def second_kinematic_carry(iacc, ispd, arm):
return screw(
lin=iacc.lin + iacc.ang.cross(arm) +
ispd.ang.cross(ispd.ang.cross(arm)),
ang=iacc.ang
)
| [
"numpy.array"
] | [((2786, 2875), 'numpy.array', 'numpy.array', (['[self.lin.x, self.lin.y, self.lin.z, self.ang.x, self.ang.y, self.ang.z]'], {}), '([self.lin.x, self.lin.y, self.lin.z, self.ang.x, self.ang.y,\n self.ang.z])\n', (2797, 2875), False, 'import numpy\n')] |
"""
Deprecated file: Just keeping it, separate loading into numpy arrays is needed in future
"""
from utils import generate_file_name_from_labels
from constants import DATA_PATH, label_dict, folder_labels
from obspy import read
import os
import warnings
import numpy as np
def load_data(file_name, training_folder, folder_type="trimmed_data"):
"""
Function to load data from text file
Args:
file_name (str): Path to text file from which contains info on data. Assumption:
Lines in file are of the form: Time_stamp network station component label
training_folder (str): Folder name which contains the actual training data
folder_type (str): Specify what kind of data to load (processed_data, raw_data, audio,
plots). Default = trimmed_data
Returns (np arrays): Two arrays X and Y where X = training data, Y = training labels and
X_names = file name associated with the data in X
"""
fl_map = generate_file_name_from_labels(file_name)
X, Y, X_names = [], [], []
train_path = DATA_PATH / training_folder
for folder, files in fl_map.items():
folder_path = train_path / folder / folder_type
for file in files:
# File is a list of following form = [file_name, label]
# Also, load the BHE and BHN components along with the Z component
# NOTE: Its assumed that the labelled data only contains BHZ components. We are
# assuming the labels of other components to be the same
f_bhe = file[0].replace('BHZ', 'BHE')
f_bhn = file[0].replace('BHZ', 'BHN')
file_path_z = str(folder_path / (file[0]+'.sac'))
file_path_e = str(folder_path / (f_bhe+'.sac'))
file_path_n = str(folder_path / (f_bhn+'.sac'))
if os.path.exists(file_path_z) and os.path.exists(file_path_e) and os.path.exists(
file_path_n):
st1 = read(file_path_z)
st2 = read(file_path_e)
st3 = read(file_path_n)
X.append([st1[0].data, st2[0].data, st3[0].data])
X_names.append(file[0])
Y.append(label_dict[file[1]])
else:
# Warn users if some file is not found
warnings.warn("File not found: {}".format(file[0]))
return np.array(X), np.array(Y, dtype='int64'), X_names
def load_data_from_folder(training_folder, folder_type):
"""
Function to load data directly from folder.
Assumes the following folder structure:
Training Folder name
- Data Folder 1
- positive
- negative
- etc
- Data Folder 2
- positive
- negative
- etc
Args:
training_folder (str): Name of parent training folder
folder_type (str): Type of examples to load (i.e positive, negative etc)
Returns (np arrays): Two arrays X and Y where X = training data, Y = training labels and
X_names = file name associated with the data in X
"""
X, Y, X_names = [], [], []
train_path = DATA_PATH / training_folder
for folder in os.listdir(train_path):
folder_path = train_path / folder
if os.path.isdir(folder_path):
# Each earthquake data has a different folder, so loop through this inner folder
for inner_folder in os.listdir(folder_path):
if folder_type == inner_folder:
# Get unique file names (ignore the components for now)
files = []
for file in os.listdir(folder_path / inner_folder):
if '.SAC' in file or '.sac' in file:
# Remove component info (BHE,BHZ etc) and ".sac" before appending
# File names are assumed to be name_BH{}.sac
files.append(file[:-7]) # Hence remove last 7 chars
# Now, load three component data for each unique file name
for file in files:
file_path_z = str(folder_path / inner_folder / (file + 'BHZ' + '.SAC'))
file_path_e = str(folder_path / inner_folder / (file + 'BHE' + '.SAC'))
file_path_n = str(folder_path / inner_folder / (file + 'BHN' + '.SAC'))
if os.path.exists(file_path_z) and os.path.exists(
file_path_e) and os.path.exists(
file_path_n):
st1 = read(file_path_z)
st2 = read(file_path_e)
st3 = read(file_path_n)
X.append([st1[0].data, st2[0].data, st3[0].data])
X_names.append(file)
Y.append(folder_labels[folder_type])
return np.array(X), np.array(Y, dtype='int64'), X_names
if __name__ == '__main__':
X, Y, X_names = load_data_from_folder(training_folder='Training_Set_Prem',
folder_type='positive')
# X, Y, X_names = load_data('../../data/V_golden.txt', training_folder='Training_Set_Vivian')
print(X.shape)
for i in range(X.shape[0]):
print(X[i][0].shape[-1], X[i][1].shape, X[i][2].shape)
# X = np.expand_dims(X, axis=2)
# print(X.shape)
| [
"obspy.read",
"utils.generate_file_name_from_labels",
"os.path.exists",
"os.listdir",
"numpy.array",
"os.path.isdir"
] | [((1020, 1061), 'utils.generate_file_name_from_labels', 'generate_file_name_from_labels', (['file_name'], {}), '(file_name)\n', (1050, 1061), False, 'from utils import generate_file_name_from_labels\n'), ((3240, 3262), 'os.listdir', 'os.listdir', (['train_path'], {}), '(train_path)\n', (3250, 3262), False, 'import os\n'), ((2408, 2419), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2416, 2419), True, 'import numpy as np\n'), ((2421, 2447), 'numpy.array', 'np.array', (['Y'], {'dtype': '"""int64"""'}), "(Y, dtype='int64')\n", (2429, 2447), True, 'import numpy as np\n'), ((3317, 3343), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (3330, 3343), False, 'import os\n'), ((4987, 4998), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4995, 4998), True, 'import numpy as np\n'), ((5000, 5026), 'numpy.array', 'np.array', (['Y'], {'dtype': '"""int64"""'}), "(Y, dtype='int64')\n", (5008, 5026), True, 'import numpy as np\n'), ((3470, 3493), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (3480, 3493), False, 'import os\n'), ((1869, 1896), 'os.path.exists', 'os.path.exists', (['file_path_z'], {}), '(file_path_z)\n', (1883, 1896), False, 'import os\n'), ((1901, 1928), 'os.path.exists', 'os.path.exists', (['file_path_e'], {}), '(file_path_e)\n', (1915, 1928), False, 'import os\n'), ((1933, 1960), 'os.path.exists', 'os.path.exists', (['file_path_n'], {}), '(file_path_n)\n', (1947, 1960), False, 'import os\n'), ((2005, 2022), 'obspy.read', 'read', (['file_path_z'], {}), '(file_path_z)\n', (2009, 2022), False, 'from obspy import read\n'), ((2045, 2062), 'obspy.read', 'read', (['file_path_e'], {}), '(file_path_e)\n', (2049, 2062), False, 'from obspy import read\n'), ((2085, 2102), 'obspy.read', 'read', (['file_path_n'], {}), '(file_path_n)\n', (2089, 2102), False, 'from obspy import read\n'), ((3683, 3721), 'os.listdir', 'os.listdir', (['(folder_path / inner_folder)'], {}), '(folder_path / inner_folder)\n', (3693, 3721), False, 'import os\n'), ((4468, 4495), 'os.path.exists', 'os.path.exists', (['file_path_z'], {}), '(file_path_z)\n', (4482, 4495), False, 'import os\n'), ((4500, 4527), 'os.path.exists', 'os.path.exists', (['file_path_e'], {}), '(file_path_e)\n', (4514, 4527), False, 'import os\n'), ((4565, 4592), 'os.path.exists', 'os.path.exists', (['file_path_n'], {}), '(file_path_n)\n', (4579, 4592), False, 'import os\n'), ((4661, 4678), 'obspy.read', 'read', (['file_path_z'], {}), '(file_path_z)\n', (4665, 4678), False, 'from obspy import read\n'), ((4713, 4730), 'obspy.read', 'read', (['file_path_e'], {}), '(file_path_e)\n', (4717, 4730), False, 'from obspy import read\n'), ((4765, 4782), 'obspy.read', 'read', (['file_path_n'], {}), '(file_path_n)\n', (4769, 4782), False, 'from obspy import read\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import pickle as pickle
import glob
import os
print(glob.glob(os.path.expanduser("~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl")))
predictions = pickle.load(open(glob.glob(os.path.expanduser("~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl"))[0]))["predictions"]
print(len(predictions))
p = np.linspace(0.0, 600.0, 600)
print(predictions[0]["systole"].shape)
pp = predictions[0]["systole"][0]
fig = plt.figure()
mngr = plt.get_current_fig_manager()
# to put it into the upper left corner for example:
mngr.window.setGeometry(50, 100, 600, 300)
im1 = fig.gca().plot(p, pp)
def init():
pp = predictions[0]["systole"][0]
im1[0].set_ydata(pp)
def animate(i):
pp = predictions[0]["systole"][i]
fig.suptitle("power %f"%float(i))
im1[0].set_ydata(pp)
return im1
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=50)
#anim.save('my_animation.mp4')
plt.show() | [
"matplotlib.animation.FuncAnimation",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.get_current_fig_manager",
"os.path.expanduser",
"matplotlib.pyplot.show"
] | [((419, 447), 'numpy.linspace', 'np.linspace', (['(0.0)', '(600.0)', '(600)'], {}), '(0.0, 600.0, 600)\n', (430, 447), True, 'import numpy as np\n'), ((532, 544), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (542, 544), True, 'import matplotlib.pyplot as plt\n'), ((553, 582), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (580, 582), True, 'import matplotlib.pyplot as plt\n'), ((940, 1018), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'init_func': 'init', 'frames': '(100)', 'interval': '(50)'}), '(fig, animate, init_func=init, frames=100, interval=50)\n', (963, 1018), False, 'from matplotlib import animation\n'), ((1052, 1062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1060, 1062), True, 'import matplotlib.pyplot as plt\n'), ((154, 241), 'os.path.expanduser', 'os.path.expanduser', (['"""~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl"""'], {}), "(\n '~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl')\n", (172, 241), False, 'import os\n'), ((281, 368), 'os.path.expanduser', 'os.path.expanduser', (['"""~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl"""'], {}), "(\n '~/storage/metadata/kaggle-heart/predictions/j7_jeroen_ch.pkl')\n", (299, 368), False, 'import os\n')] |
#!/usr/bin/env python
import os
import numpy as np
from scipy.interpolate import interp1d
from pyPanair.preprocess import wgs_creator
from pyPanair.utilities import bspline
def main(x1, x2, y1, y2, y3, aoas=(7.42), target_dir=""):
""" create a LaWGS file for twisted rectangular wing
reference case 3"""
wgs = wgs_creator.LaWGS("ADODG_case3")
n_wing_x = 30
n_wing_y = 30
n_wing_z = 8
chord_wing = 1.
halfspan_wing = chord_wing * 3
# define twist distribution
cv = np.array(((0, 0),
(x1, y1),
(x2, y2),
(1, y3)))
twist_func = bspline(cv, degree=3, periodic=False)
twist_xy = twist_func(np.linspace(0, 1, 150))
twist_dist = interp1d(twist_xy[:,0] * halfspan_wing, twist_xy[:,1])
# create wing
base_airfoil = wgs_creator.naca4digit("0010", num=n_wing_x, chord=chord_wing)
wing = list()
span_pos = np.linspace(0, halfspan_wing, n_wing_y)
for y in span_pos:
rot_foil = base_airfoil.roty(base_airfoil[0], twist_dist(y))
rot_foil.shift((0, y, 0), inplace=True)
wing.append(rot_foil)
wing = wgs_creator.Network(wing)
wgs.append_network("wing", wing, 1)
# create wingtip
degs = np.linspace(0, -180, n_wing_z)
wingtipu, _ = base_airfoil.shift((0, halfspan_wing, 0)).split_half()
wingtip = [wingtipu.rotx(wingtipu[0], d) for d in degs]
wingtip = wgs_creator.Network(wingtip)
wingtip = wingtip.roty(wingtipu[0], twist_dist(halfspan_wing))
wgs.append_network("wingtip", wingtip, 1)
# add wake
wake_length = chord_wing * 50.
wingwake = wing.make_wake(3, wake_length)
wgs.append_network("wingwake", wingwake, 18)
wgs.create_wgs(os.path.join(target_dir, "ADODG_case3.wgs"))
span = halfspan_wing * 2
sref = chord_wing * span
xref = chord_wing * 0.25
aux_name = os.path.join(target_dir, "ADODG_case3.aux")
wgs.create_aux(filename=aux_name, alpha=aoas, mach=0.5, cbar=chord_wing, span=span, sref=sref,
xref=xref, zref=0.)
# wgs.create_stl("ADODG3.stl")
if __name__ == '__main__':
main()
| [
"pyPanair.preprocess.wgs_creator.Network",
"pyPanair.preprocess.wgs_creator.naca4digit",
"os.path.join",
"scipy.interpolate.interp1d",
"pyPanair.utilities.bspline",
"pyPanair.preprocess.wgs_creator.LaWGS",
"numpy.array",
"numpy.linspace"
] | [((335, 367), 'pyPanair.preprocess.wgs_creator.LaWGS', 'wgs_creator.LaWGS', (['"""ADODG_case3"""'], {}), "('ADODG_case3')\n", (352, 367), False, 'from pyPanair.preprocess import wgs_creator\n'), ((526, 573), 'numpy.array', 'np.array', (['((0, 0), (x1, y1), (x2, y2), (1, y3))'], {}), '(((0, 0), (x1, y1), (x2, y2), (1, y3)))\n', (534, 573), True, 'import numpy as np\n'), ((652, 689), 'pyPanair.utilities.bspline', 'bspline', (['cv'], {'degree': '(3)', 'periodic': '(False)'}), '(cv, degree=3, periodic=False)\n', (659, 689), False, 'from pyPanair.utilities import bspline\n'), ((759, 815), 'scipy.interpolate.interp1d', 'interp1d', (['(twist_xy[:, 0] * halfspan_wing)', 'twist_xy[:, 1]'], {}), '(twist_xy[:, 0] * halfspan_wing, twist_xy[:, 1])\n', (767, 815), False, 'from scipy.interpolate import interp1d\n'), ((855, 917), 'pyPanair.preprocess.wgs_creator.naca4digit', 'wgs_creator.naca4digit', (['"""0010"""'], {'num': 'n_wing_x', 'chord': 'chord_wing'}), "('0010', num=n_wing_x, chord=chord_wing)\n", (877, 917), False, 'from pyPanair.preprocess import wgs_creator\n'), ((953, 992), 'numpy.linspace', 'np.linspace', (['(0)', 'halfspan_wing', 'n_wing_y'], {}), '(0, halfspan_wing, n_wing_y)\n', (964, 992), True, 'import numpy as np\n'), ((1179, 1204), 'pyPanair.preprocess.wgs_creator.Network', 'wgs_creator.Network', (['wing'], {}), '(wing)\n', (1198, 1204), False, 'from pyPanair.preprocess import wgs_creator\n'), ((1282, 1312), 'numpy.linspace', 'np.linspace', (['(0)', '(-180)', 'n_wing_z'], {}), '(0, -180, n_wing_z)\n', (1293, 1312), True, 'import numpy as np\n'), ((1463, 1491), 'pyPanair.preprocess.wgs_creator.Network', 'wgs_creator.Network', (['wingtip'], {}), '(wingtip)\n', (1482, 1491), False, 'from pyPanair.preprocess import wgs_creator\n'), ((1933, 1976), 'os.path.join', 'os.path.join', (['target_dir', '"""ADODG_case3.aux"""'], {}), "(target_dir, 'ADODG_case3.aux')\n", (1945, 1976), False, 'import os\n'), ((717, 739), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(150)'], {}), '(0, 1, 150)\n', (728, 739), True, 'import numpy as np\n'), ((1780, 1823), 'os.path.join', 'os.path.join', (['target_dir', '"""ADODG_case3.wgs"""'], {}), "(target_dir, 'ADODG_case3.wgs')\n", (1792, 1823), False, 'import os\n')] |
import tensorflow as tf
import numpy as np
from models.autoencoder_models import stacked_denoising_autoencoder
from utils import datasets, utilities
# #################### #
# Flags definition #
# #################### #
flags = tf.app.flags
FLAGS = flags.FLAGS
# Global configuration
flags.DEFINE_string('dataset', 'mnist', 'Which dataset to use. ["mnist", "cifar10", "custom"]')
flags.DEFINE_string('train_dataset', '', 'Path to train set .npy file.')
flags.DEFINE_string('train_labels', '', 'Path to train labels .npy file.')
flags.DEFINE_string('valid_dataset', '', 'Path to valid set .npy file.')
flags.DEFINE_string('valid_labels', '', 'Path to valid labels .npy file.')
flags.DEFINE_string('test_dataset', '', 'Path to test set .npy file.')
flags.DEFINE_string('test_labels', '', 'Path to test labels .npy file.')
flags.DEFINE_string('cifar_dir', '', 'Path to the cifar 10 dataset directory.')
flags.DEFINE_boolean('do_pretrain', True, 'Whether or not doing unsupervised pretraining.')
flags.DEFINE_string('save_predictions', '', 'Path to a .npy file to save predictions of the model.')
flags.DEFINE_string('save_layers_output', '', 'Path to a .npy file to save output from all the layers of the model.')
flags.DEFINE_boolean('restore_previous_model', False, 'If true, restore previous model corresponding to model name.')
flags.DEFINE_integer('seed', -1, 'Seed for the random generators (>= 0). Useful for testing hyperparameters.')
flags.DEFINE_string('model_name', 'sdae', 'Name for the model.')
# Supervised fine tuning parameters
flags.DEFINE_string('finetune_loss_func', 'cross_entropy', 'Last Layer Loss function.["cross_entropy", "mean_squared"]')
flags.DEFINE_integer('finetune_num_epochs', 30, 'Number of epochs for the fine-tuning phase.')
flags.DEFINE_float('finetune_learning_rate', 0.001, 'Learning rate for the fine-tuning phase.')
flags.DEFINE_string('finetune_act_func', 'relu', 'Activation function for the fine-tuning phase.'
'["sigmoid, "tanh", "relu"]')
flags.DEFINE_float('dropout', 1, 'Dropout parameter.')
flags.DEFINE_string('finetune_opt', 'gradient_descent', '["gradient_descent", "ada_grad", "momentum"]')
flags.DEFINE_integer('finetune_batch_size', 20, 'Size of each mini-batch for the fine-tuning phase.')
flags.DEFINE_integer('verbose', 0, 'Level of verbosity. 0 - silent, 1 - print accuracy.')
flags.DEFINE_string('main_dir', 'sdae/', 'Directory to store data relative to the algorithm.')
flags.DEFINE_string('corr_type', 'none', 'Type of input corruption. ["none", "masking", "salt_and_pepper"]')
flags.DEFINE_float('corr_frac', 0.0, 'Fraction of the input to corrupt.')
# Autoencoder layers specific parameters
flags.DEFINE_string('layers', '256,', 'Comma-separated values for the layers in the sdae.')
flags.DEFINE_string('xavier_init', '1,', 'Value for the constant in xavier weights initialization.')
flags.DEFINE_string('enc_act_func', 'sigmoid,', 'Activation function for the encoder. ["sigmoid", "tanh"]')
flags.DEFINE_string('dec_act_func', 'none,', 'Activation function for the decoder. ["sigmoid", "tanh", "none"]')
flags.DEFINE_string('loss_func', 'mean_squared,', 'Loss function. ["mean_squared" or "cross_entropy"]')
flags.DEFINE_string('opt', 'gradient_descent,', '["gradient_descent", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('learning_rate', '0.01,', 'Initial learning rate.')
flags.DEFINE_string('momentum', '0.5,', 'Momentum parameter.')
flags.DEFINE_string('num_epochs', '10,', 'Number of epochs.')
flags.DEFINE_string('batch_size', '10,', 'Size of each mini-batch.')
# Conversion of Autoencoder layers parameters from string to their specific type
layers = [int(_) for _ in FLAGS.layers.split(',') if _]
xavier_init = [int(_) for _ in FLAGS.xavier_init.split(',') if _]
enc_act_func = [_ for _ in FLAGS.enc_act_func.split(',') if _]
dec_act_func = [_ for _ in FLAGS.dec_act_func.split(',') if _]
opt = [_ for _ in FLAGS.opt.split(',') if _]
loss_func = [_ for _ in FLAGS.loss_func.split(',') if _]
learning_rate = [float(_) for _ in FLAGS.learning_rate.split(',') if _]
momentum = [float(_) for _ in FLAGS.momentum.split(',') if _]
num_epochs = [int(_) for _ in FLAGS.num_epochs.split(',') if _]
batch_size = [int(_) for _ in FLAGS.batch_size.split(',') if _]
# Parameters normalization: if a parameter is not specified, it must be made of the same length of the others
dae_params = {'layers': layers, 'xavier_init': xavier_init, 'enc_act_func': enc_act_func,
'dec_act_func': dec_act_func, 'loss_func': loss_func, 'learning_rate': learning_rate,
'opt': opt,
'momentum': momentum, 'num_epochs': num_epochs, 'batch_size': batch_size}
for p in dae_params:
if len(dae_params[p]) != len(layers):
# The current parameter is not specified by the user, should default it for all the layers
dae_params[p] = [dae_params[p][0] for _ in layers]
# Parameters validation
assert 0. <= FLAGS.corr_frac <= 1.
assert FLAGS.corr_type in ['masking', 'salt_and_pepper', 'none']
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
assert len(layers) > 0
assert all([af in ['sigmoid', 'tanh'] for af in enc_act_func])
assert all([af in ['sigmoid', 'tanh', 'none'] for af in dec_act_func])
assert all([lf in ['cross_entropy', 'mean_squared'] for lf in loss_func])
assert FLAGS.finetune_opt in ['gradient_descent', 'ada_grad', 'momentum', 'adam']
if __name__ == '__main__':
utilities.random_seed_np_tf(FLAGS.seed)
if FLAGS.dataset == 'mnist':
# ################# #
# MNIST Dataset #
# ################# #
trX, trY, vlX, vlY, teX, teY = datasets.load_mnist_dataset(mode='supervised')
elif FLAGS.dataset == 'cifar10':
# ################### #
# Cifar10 Dataset #
# ################### #
trX, trY, teX, teY = datasets.load_cifar10_dataset(FLAGS.cifar_dir, mode='supervised')
# Validation set is the first half of the test set
vlX = teX[:5000]
vlY = teY[:5000]
elif FLAGS.dataset == 'custom':
# ################## #
# Custom Dataset #
# ################## #
def load_from_np(dataset_path):
if dataset_path != '':
return np.load(dataset_path)
else:
return None
trX, trY = load_from_np(FLAGS.train_dataset), load_from_np(FLAGS.train_labels)
vlX, vlY = load_from_np(FLAGS.valid_dataset), load_from_np(FLAGS.valid_labels)
teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)
else:
trX = None
trY = None
vlX = None
vlY = None
teX = None
teY = None
# Create the object
sdae = None
sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
do_pretrain=FLAGS.do_pretrain, model_name=FLAGS.model_name,
layers=dae_params['layers'], finetune_loss_func=FLAGS.finetune_loss_func,
finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size, dropout=FLAGS.dropout,
enc_act_func=dae_params['enc_act_func'], dec_act_func=dae_params['dec_act_func'],
xavier_init=dae_params['xavier_init'], corr_type=FLAGS.corr_type, corr_frac=FLAGS.corr_frac,
dataset=FLAGS.dataset, loss_func=dae_params['loss_func'], main_dir=FLAGS.main_dir, opt=dae_params['opt'],
learning_rate=dae_params['learning_rate'], momentum=dae_params['momentum'], verbose=FLAGS.verbose,
num_epochs=dae_params['num_epochs'], batch_size=dae_params['batch_size'],
finetune_act_func=FLAGS.finetune_act_func)
# Fit the model (unsupervised pretraining)
if FLAGS.do_pretrain:
encoded_X, encoded_vX = sdae.pretrain(trX, vlX)
# Supervised finetuning
sdae.build_model(trX.shape[1], trY.shape[1])
sdae.fit(trX, trY, vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)
# Compute the accuracy of the model
print('Test set accuracy: {}'.format(sdae.compute_accuracy(teX, teY)))
# Save the predictions of the model
if FLAGS.save_predictions:
print('Saving the predictions for the test set...')
np.save(FLAGS.save_predictions, sdae.predict(teX))
# Save output from each layer of the model
if FLAGS.save_layers_output:
print('Saving the output of each layer for the test set')
out = sdae.get_layers_output(teX)
for i, o in enumerate(out):
np.save(FLAGS.save_layers_output + '-layer-' + str(i + 1), o)
| [
"utils.datasets.load_cifar10_dataset",
"utils.datasets.load_mnist_dataset",
"numpy.load",
"models.autoencoder_models.stacked_denoising_autoencoder.StackedDenoisingAutoencoder",
"utils.utilities.random_seed_np_tf"
] | [((5453, 5492), 'utils.utilities.random_seed_np_tf', 'utilities.random_seed_np_tf', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (5480, 5492), False, 'from utils import datasets, utilities\n'), ((6781, 7733), 'models.autoencoder_models.stacked_denoising_autoencoder.StackedDenoisingAutoencoder', 'stacked_denoising_autoencoder.StackedDenoisingAutoencoder', ([], {'do_pretrain': 'FLAGS.do_pretrain', 'model_name': 'FLAGS.model_name', 'layers': "dae_params['layers']", 'finetune_loss_func': 'FLAGS.finetune_loss_func', 'finetune_learning_rate': 'FLAGS.finetune_learning_rate', 'finetune_num_epochs': 'FLAGS.finetune_num_epochs', 'finetune_opt': 'FLAGS.finetune_opt', 'finetune_batch_size': 'FLAGS.finetune_batch_size', 'dropout': 'FLAGS.dropout', 'enc_act_func': "dae_params['enc_act_func']", 'dec_act_func': "dae_params['dec_act_func']", 'xavier_init': "dae_params['xavier_init']", 'corr_type': 'FLAGS.corr_type', 'corr_frac': 'FLAGS.corr_frac', 'dataset': 'FLAGS.dataset', 'loss_func': "dae_params['loss_func']", 'main_dir': 'FLAGS.main_dir', 'opt': "dae_params['opt']", 'learning_rate': "dae_params['learning_rate']", 'momentum': "dae_params['momentum']", 'verbose': 'FLAGS.verbose', 'num_epochs': "dae_params['num_epochs']", 'batch_size': "dae_params['batch_size']", 'finetune_act_func': 'FLAGS.finetune_act_func'}), "(do_pretrain=FLAGS\n .do_pretrain, model_name=FLAGS.model_name, layers=dae_params['layers'],\n finetune_loss_func=FLAGS.finetune_loss_func, finetune_learning_rate=\n FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.\n finetune_num_epochs, finetune_opt=FLAGS.finetune_opt,\n finetune_batch_size=FLAGS.finetune_batch_size, dropout=FLAGS.dropout,\n enc_act_func=dae_params['enc_act_func'], dec_act_func=dae_params[\n 'dec_act_func'], xavier_init=dae_params['xavier_init'], corr_type=FLAGS\n .corr_type, corr_frac=FLAGS.corr_frac, dataset=FLAGS.dataset, loss_func\n =dae_params['loss_func'], main_dir=FLAGS.main_dir, opt=dae_params['opt'\n ], learning_rate=dae_params['learning_rate'], momentum=dae_params[\n 'momentum'], verbose=FLAGS.verbose, num_epochs=dae_params['num_epochs'],\n batch_size=dae_params['batch_size'], finetune_act_func=FLAGS.\n finetune_act_func)\n", (6838, 7733), False, 'from models.autoencoder_models import stacked_denoising_autoencoder\n'), ((5658, 5704), 'utils.datasets.load_mnist_dataset', 'datasets.load_mnist_dataset', ([], {'mode': '"""supervised"""'}), "(mode='supervised')\n", (5685, 5704), False, 'from utils import datasets, utilities\n'), ((5870, 5935), 'utils.datasets.load_cifar10_dataset', 'datasets.load_cifar10_dataset', (['FLAGS.cifar_dir'], {'mode': '"""supervised"""'}), "(FLAGS.cifar_dir, mode='supervised')\n", (5899, 5935), False, 'from utils import datasets, utilities\n'), ((6275, 6296), 'numpy.load', 'np.load', (['dataset_path'], {}), '(dataset_path)\n', (6282, 6296), True, 'import numpy as np\n')] |
"""This module provides a generalized implementation of UNet.
See the `UNet` class docstring for more information.
"""
import attr
from typing import List, Optional, Text
from sleap.nn.architectures import encoder_decoder
from sleap.nn.config import UNetConfig
import numpy as np
import tensorflow as tf
@attr.s(auto_attribs=True)
class PoolingBlock(encoder_decoder.EncoderBlock):
"""Pooling-only encoder block.
Used to compensate for UNet having a skip source before the pooling, so the blocks
need to end with a conv, not the pooling layer. This is added to the end of the
encoder stack to ensure that the number of down blocks is equal to the number of
pooling steps.
Attributes:
pool: If True, applies max pooling at the end of the block.
pooling_stride: Stride of the max pooling operation. If 1, the output of this
block will be at the same stride (== 1/scale) as the input.
"""
pool: bool = True
pooling_stride: int = 2
def make_block(self, x_in: tf.Tensor, prefix: Text = "conv_block") -> tf.Tensor:
"""Instantiate the encoder block from an input tensor."""
x = x_in
if self.pool:
x = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=self.pooling_stride,
padding="same",
name=f"{prefix}_last_pool",
)(x)
return x
@attr.s(auto_attribs=True)
class UNet(encoder_decoder.EncoderDecoder):
"""UNet encoder-decoder architecture for fully convolutional networks.
This is the canonical architecture described in `Ronneberger et al., 2015
<https://arxiv.org/abs/1505.04597>`_.
The default configuration with 4 down/up blocks and 64 base filters has ~34.5M
parameters.
Attributes:
filters: Base number of filters in the first encoder block. More filters will
increase the representational capacity of the network at the cost of memory
and runtime.
filters_rate: Factor to increase the number of filters by in each block.
kernel_size: Size of convolutional kernels (== height == width).
stem_kernel_size: Size of convolutional kernels in stem blocks.
stem_blocks: If >0, will create additional "down" blocks for initial
downsampling. These will be configured identically to the down blocks below.
down_blocks: Number of blocks with pooling in the encoder. More down blocks will
convs_per_block: Number of convolutions in each block. More convolutions per
block will increase the representational capacity of the network at the cost
of memory and runtime.
increase the effective maximum receptive field.
up_blocks: Number of blocks with upsampling in the decoder. If this is equal to
`down_blocks`, the output of this network will be at the same stride (scale)
as the input.
middle_block: If True, add an additional block at the end of the encoder.
up_interpolate: If True, use bilinear interpolation instead of transposed
convolutions for upsampling. Interpolation is faster but transposed
convolutions may be able to learn richer or more complex upsampling to
recover details from higher scales. If using transposed convolutions, the
number of filters are determined by `filters` and `filters_rate` to
progressively decrease the number of filters at each step.
block_contraction: If True, reduces the number of filters at the end of middle
and decoder blocks. This has the effect of introducing an additional
bottleneck before each upsampling step. The original implementation does not
do this, but the CARE implementation does.
Note:
This bears some differences with other implementations, particularly with
respect to the skip connection source tensors in the encoder. In the original,
the skip connection is formed from the output of the convolutions in each
encoder block, not the pooling step. This results in skip connections starting
at the first stride level as well as subsequent ones.
"""
filters: int = 64
filters_rate: float = 2
kernel_size: int = 3
stem_kernel_size: int = 3
convs_per_block: int = 2
stem_blocks: int = 0
down_blocks: int = 4
middle_block: bool = True
up_blocks: int = 4
up_interpolate: bool = False
block_contraction: bool = False
@property
def stem_stack(self) -> Optional[List[encoder_decoder.SimpleConvBlock]]:
"""Define the downsampling stem."""
if self.stem_blocks == 0:
return None
blocks = []
for block in range(self.stem_blocks):
block_filters = int(self.filters * (self.filters_rate ** block))
blocks.append(
encoder_decoder.SimpleConvBlock(
pool=(block > 0),
pool_before_convs=True,
pooling_stride=2,
num_convs=self.convs_per_block,
filters=block_filters,
kernel_size=self.stem_kernel_size,
use_bias=True,
batch_norm=False,
activation="relu",
)
)
# Always finish with a pooling block to account for pooling before convs.
blocks.append(PoolingBlock(pool=True, pooling_stride=2))
return blocks
@property
def encoder_stack(self) -> List[encoder_decoder.SimpleConvBlock]:
"""Define the encoder stack."""
blocks = []
for block in range(self.down_blocks):
block_filters = int(
self.filters * (self.filters_rate ** (block + self.stem_blocks))
)
blocks.append(
encoder_decoder.SimpleConvBlock(
pool=(block > 0),
pool_before_convs=True,
pooling_stride=2,
num_convs=self.convs_per_block,
filters=block_filters,
kernel_size=self.kernel_size,
use_bias=True,
batch_norm=False,
activation="relu",
)
)
# Always finish with a pooling block to account for pooling before convs.
blocks.append(PoolingBlock(pool=True, pooling_stride=2))
# Create a middle block (like the CARE implementation).
if self.middle_block:
if self.convs_per_block > 1:
# First convs are one exponent higher than the last encoder block.
block_filters = int(
self.filters
* (self.filters_rate ** (self.down_blocks + self.stem_blocks))
)
blocks.append(
encoder_decoder.SimpleConvBlock(
pool=False,
pool_before_convs=False,
pooling_stride=2,
num_convs=self.convs_per_block - 1,
filters=block_filters,
kernel_size=self.kernel_size,
use_bias=True,
batch_norm=False,
activation="relu",
block_prefix="_middle_expand"
)
)
if self.block_contraction:
# Contract the channels with an exponent lower than the last encoder block.
block_filters = int(
self.filters
* (self.filters_rate ** (self.down_blocks + self.stem_blocks - 1))
)
else:
# Keep the block output filters the same.
block_filters = int(
self.filters
* (self.filters_rate ** (self.down_blocks + self.stem_blocks))
)
blocks.append(
encoder_decoder.SimpleConvBlock(
pool=False,
pool_before_convs=False,
pooling_stride=2,
num_convs=1,
filters=block_filters,
kernel_size=self.kernel_size,
use_bias=True,
batch_norm=False,
activation="relu",
block_prefix="_middle_contract"
)
)
return blocks
@property
def decoder_stack(self) -> List[encoder_decoder.SimpleUpsamplingBlock]:
"""Define the decoder stack."""
blocks = []
for block in range(self.up_blocks):
block_filters_in = int(
self.filters
* (self.filters_rate ** (self.down_blocks + self.stem_blocks - 1 - block))
)
if self.block_contraction:
block_filters_out = int(
self.filters
* (self.filters_rate ** (self.down_blocks + self.stem_blocks - 2 - block))
)
else:
block_filters_out = block_filters_in
blocks.append(
encoder_decoder.SimpleUpsamplingBlock(
upsampling_stride=2,
transposed_conv=(not self.up_interpolate),
transposed_conv_filters=block_filters_in,
transposed_conv_kernel_size=self.kernel_size,
transposed_conv_batch_norm=False,
interp_method="bilinear",
skip_connection=True,
skip_add=False,
refine_convs=self.convs_per_block,
refine_convs_first_filters=block_filters_in,
refine_convs_filters=block_filters_out,
refine_convs_kernel_size=self.kernel_size,
refine_convs_batch_norm=False,
)
)
return blocks
@classmethod
def from_config(cls, config: UNetConfig) -> "UNet":
"""Create a model from a set of configuration parameters.
Args:
config: An `UNetConfig` instance with the desired parameters.
Returns:
An instance of this class with the specified configuration.
"""
stem_blocks = 0
if config.stem_stride is not None:
stem_blocks = np.log2(config.stem_stride).astype(int)
down_blocks = np.log2(config.max_stride).astype(int) - stem_blocks
up_blocks = np.log2(config.max_stride / config.output_stride).astype(int)
return cls(
filters=config.filters,
filters_rate=config.filters_rate,
kernel_size=3,
stem_kernel_size=7,
convs_per_block=2,
stem_blocks=stem_blocks,
down_blocks=down_blocks,
middle_block=config.middle_block,
up_blocks=up_blocks,
up_interpolate=config.up_interpolate,
stacks=config.stacks,
)
| [
"attr.s",
"sleap.nn.architectures.encoder_decoder.SimpleConvBlock",
"sleap.nn.architectures.encoder_decoder.SimpleUpsamplingBlock",
"numpy.log2",
"tensorflow.keras.layers.MaxPool2D"
] | [((309, 334), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (315, 334), False, 'import attr\n'), ((1419, 1444), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (1425, 1444), False, 'import attr\n'), ((1205, 1321), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': 'self.pooling_stride', 'padding': '"""same"""', 'name': 'f"""{prefix}_last_pool"""'}), "(pool_size=2, strides=self.pooling_stride, padding\n ='same', name=f'{prefix}_last_pool')\n", (1230, 1321), True, 'import tensorflow as tf\n'), ((4937, 5180), 'sleap.nn.architectures.encoder_decoder.SimpleConvBlock', 'encoder_decoder.SimpleConvBlock', ([], {'pool': '(block > 0)', 'pool_before_convs': '(True)', 'pooling_stride': '(2)', 'num_convs': 'self.convs_per_block', 'filters': 'block_filters', 'kernel_size': 'self.stem_kernel_size', 'use_bias': '(True)', 'batch_norm': '(False)', 'activation': '"""relu"""'}), "(pool=block > 0, pool_before_convs=True,\n pooling_stride=2, num_convs=self.convs_per_block, filters=block_filters,\n kernel_size=self.stem_kernel_size, use_bias=True, batch_norm=False,\n activation='relu')\n", (4968, 5180), False, 'from sleap.nn.architectures import encoder_decoder\n'), ((5917, 6155), 'sleap.nn.architectures.encoder_decoder.SimpleConvBlock', 'encoder_decoder.SimpleConvBlock', ([], {'pool': '(block > 0)', 'pool_before_convs': '(True)', 'pooling_stride': '(2)', 'num_convs': 'self.convs_per_block', 'filters': 'block_filters', 'kernel_size': 'self.kernel_size', 'use_bias': '(True)', 'batch_norm': '(False)', 'activation': '"""relu"""'}), "(pool=block > 0, pool_before_convs=True,\n pooling_stride=2, num_convs=self.convs_per_block, filters=block_filters,\n kernel_size=self.kernel_size, use_bias=True, batch_norm=False,\n activation='relu')\n", (5948, 6155), False, 'from sleap.nn.architectures import encoder_decoder\n'), ((8084, 8334), 'sleap.nn.architectures.encoder_decoder.SimpleConvBlock', 'encoder_decoder.SimpleConvBlock', ([], {'pool': '(False)', 'pool_before_convs': '(False)', 'pooling_stride': '(2)', 'num_convs': '(1)', 'filters': 'block_filters', 'kernel_size': 'self.kernel_size', 'use_bias': '(True)', 'batch_norm': '(False)', 'activation': '"""relu"""', 'block_prefix': '"""_middle_contract"""'}), "(pool=False, pool_before_convs=False,\n pooling_stride=2, num_convs=1, filters=block_filters, kernel_size=self.\n kernel_size, use_bias=True, batch_norm=False, activation='relu',\n block_prefix='_middle_contract')\n", (8115, 8334), False, 'from sleap.nn.architectures import encoder_decoder\n'), ((9282, 9791), 'sleap.nn.architectures.encoder_decoder.SimpleUpsamplingBlock', 'encoder_decoder.SimpleUpsamplingBlock', ([], {'upsampling_stride': '(2)', 'transposed_conv': '(not self.up_interpolate)', 'transposed_conv_filters': 'block_filters_in', 'transposed_conv_kernel_size': 'self.kernel_size', 'transposed_conv_batch_norm': '(False)', 'interp_method': '"""bilinear"""', 'skip_connection': '(True)', 'skip_add': '(False)', 'refine_convs': 'self.convs_per_block', 'refine_convs_first_filters': 'block_filters_in', 'refine_convs_filters': 'block_filters_out', 'refine_convs_kernel_size': 'self.kernel_size', 'refine_convs_batch_norm': '(False)'}), "(upsampling_stride=2, transposed_conv=\n not self.up_interpolate, transposed_conv_filters=block_filters_in,\n transposed_conv_kernel_size=self.kernel_size,\n transposed_conv_batch_norm=False, interp_method='bilinear',\n skip_connection=True, skip_add=False, refine_convs=self.convs_per_block,\n refine_convs_first_filters=block_filters_in, refine_convs_filters=\n block_filters_out, refine_convs_kernel_size=self.kernel_size,\n refine_convs_batch_norm=False)\n", (9319, 9791), False, 'from sleap.nn.architectures import encoder_decoder\n'), ((10638, 10687), 'numpy.log2', 'np.log2', (['(config.max_stride / config.output_stride)'], {}), '(config.max_stride / config.output_stride)\n', (10645, 10687), True, 'import numpy as np\n'), ((6948, 7220), 'sleap.nn.architectures.encoder_decoder.SimpleConvBlock', 'encoder_decoder.SimpleConvBlock', ([], {'pool': '(False)', 'pool_before_convs': '(False)', 'pooling_stride': '(2)', 'num_convs': '(self.convs_per_block - 1)', 'filters': 'block_filters', 'kernel_size': 'self.kernel_size', 'use_bias': '(True)', 'batch_norm': '(False)', 'activation': '"""relu"""', 'block_prefix': '"""_middle_expand"""'}), "(pool=False, pool_before_convs=False,\n pooling_stride=2, num_convs=self.convs_per_block - 1, filters=\n block_filters, kernel_size=self.kernel_size, use_bias=True, batch_norm=\n False, activation='relu', block_prefix='_middle_expand')\n", (6979, 7220), False, 'from sleap.nn.architectures import encoder_decoder\n'), ((10503, 10530), 'numpy.log2', 'np.log2', (['config.stem_stride'], {}), '(config.stem_stride)\n', (10510, 10530), True, 'import numpy as np\n'), ((10565, 10591), 'numpy.log2', 'np.log2', (['config.max_stride'], {}), '(config.max_stride)\n', (10572, 10591), True, 'import numpy as np\n')] |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
'''
#+DESCRITION: online segmentation
#+FROM: github.com/durant35/SqueezeSeg
#+DATE: 2018-08-08-Wed
#+AUTHOR: <NAME> (<EMAIL>)
'''
import sys
import os.path
import numpy as np
from PIL import Image
import tensorflow as tf
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import Image as ImageMsg
from std_msgs.msg import Header
from std_msgs.msg import Int8
sys.path.append("/home/dyros-vehicle/gitrepo/ims_ros/catkin_ws_kinetic/src/squeezeseg_cpp_preprocessing/script/squeezeseg")
sys.path.append("./squeezeseg")
from config import *
from nets import SqueezeSeg
from utils.util import *
from utils.clock import Clock
from imdb import kitti # ed: header added
def _make_point_field(num_field):
msg_pf1 = pc2.PointField()
msg_pf1.name = np.str('x')
msg_pf1.offset = np.uint32(0)
msg_pf1.datatype = np.uint8(7)
msg_pf1.count = np.uint32(1)
msg_pf2 = pc2.PointField()
msg_pf2.name = np.str('y')
msg_pf2.offset = np.uint32(4)
msg_pf2.datatype = np.uint8(7)
msg_pf2.count = np.uint32(1)
msg_pf3 = pc2.PointField()
msg_pf3.name = np.str('z')
msg_pf3.offset = np.uint32(8)
msg_pf3.datatype = np.uint8(7)
msg_pf3.count = np.uint32(1)
msg_pf4 = pc2.PointField()
msg_pf4.name = np.str('intensity')
msg_pf4.offset = np.uint32(16)
msg_pf4.datatype = np.uint8(7)
msg_pf4.count = np.uint32(1)
if num_field == 4:
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4]
msg_pf5 = pc2.PointField()
msg_pf5.name = np.str('label')
msg_pf5.offset = np.uint32(20)
msg_pf5.datatype = np.uint8(4)
msg_pf5.count = np.uint32(1)
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4, msg_pf5]
class SegmentNode():
"""LiDAR point cloud segment ros node"""
def __init__(self,
sub_topic, pub_topic, FLAGS):
# os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
self._mc = kitti_squeezeSeg_config()
self._mc.LOAD_PRETRAINED_MODEL = False
self._mc.BATCH_SIZE = 1 # TODO(bichen): fix this hard-coded batch size.
self._model = SqueezeSeg(self._mc)
self._saver = tf.train.Saver(self._model.model_params)
self._session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self._saver.restore(self._session, FLAGS.checkpoint)
self._sub = rospy.Subscriber("/ss_filtered", PointCloud2, self.point_cloud_callback, queue_size=1)
self._pub = rospy.Publisher(pub_topic, PointCloud2, queue_size=1)
rospy.spin()
def point_cloud_callback(self, cloud_msg):
"""
:param cloud_msg:
:return:
"""
clock = Clock()
# rospy.logwarn("subscribed. width: %d, height: %u, point_step: %d, row_step: %d",
# cloud_msg.width, cloud_msg.height, cloud_msg.point_step, cloud_msg.row_step)
pc = pc2.read_points(cloud_msg, skip_nans=False, field_names=("x", "y", "z","intensity","d"))
# to conver pc into numpy.ndarray format
np_p = np.array(list(pc))
# print("shape : {}".format(np_p.shape))
# get depth map
lidar = np_p.reshape(64,512,5)
# print("{}".format(lidar.shape))
lidar_f = lidar.astype(np.float32)
# to perform prediction
lidar_mask = np.reshape(
(lidar[:, :, 4] > 0),
[self._mc.ZENITH_LEVEL, self._mc.AZIMUTH_LEVEL, 1]
)
lidar_f = (lidar_f - self._mc.INPUT_MEAN) / self._mc.INPUT_STD
pred_cls = self._session.run(
self._model.pred_cls,
feed_dict={
self._model.lidar_input: [lidar_f],
self._model.keep_prob: 1.0,
self._model.lidar_mask: [lidar_mask]
}
)
label = pred_cls[0]
## point cloud for SqueezeSeg segments
x = lidar[:, :, 0].reshape(-1)
y = lidar[:, :, 1].reshape(-1)
z = lidar[:, :, 2].reshape(-1)
i = lidar[:, :, 3].reshape(-1)
label = label.reshape(-1)
cloud = np.stack((x, y, z, i, label))
header = Header()
header.stamp = rospy.Time()
header.frame_id = "velodyne_link"
# point cloud segments
# 4 PointFields as channel description
msg_segment = pc2.create_cloud(header=header,
fields=_make_point_field(cloud.shape[0]),
points=cloud.T)
# ed: /squeeze_seg/points publish
self._pub.publish(msg_segment)
rospy.loginfo("Point cloud processed. Took %.6f ms.", clock.takeRealTime())
| [
"numpy.uint8",
"rospy.Publisher",
"numpy.reshape",
"tensorflow.train.Saver",
"numpy.stack",
"utils.clock.Clock",
"numpy.uint32",
"std_msgs.msg.Header",
"rospy.spin",
"rospy.Time",
"sensor_msgs.point_cloud2.PointField",
"tensorflow.ConfigProto",
"rospy.Subscriber",
"sys.path.append",
"sen... | [((500, 633), 'sys.path.append', 'sys.path.append', (['"""/home/dyros-vehicle/gitrepo/ims_ros/catkin_ws_kinetic/src/squeezeseg_cpp_preprocessing/script/squeezeseg"""'], {}), "(\n '/home/dyros-vehicle/gitrepo/ims_ros/catkin_ws_kinetic/src/squeezeseg_cpp_preprocessing/script/squeezeseg'\n )\n", (515, 633), False, 'import sys\n'), ((624, 655), 'sys.path.append', 'sys.path.append', (['"""./squeezeseg"""'], {}), "('./squeezeseg')\n", (639, 655), False, 'import sys\n'), ((852, 868), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (866, 868), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((888, 899), 'numpy.str', 'np.str', (['"""x"""'], {}), "('x')\n", (894, 899), True, 'import numpy as np\n'), ((921, 933), 'numpy.uint32', 'np.uint32', (['(0)'], {}), '(0)\n', (930, 933), True, 'import numpy as np\n'), ((957, 968), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (965, 968), True, 'import numpy as np\n'), ((989, 1001), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (998, 1001), True, 'import numpy as np\n'), ((1017, 1033), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (1031, 1033), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((1053, 1064), 'numpy.str', 'np.str', (['"""y"""'], {}), "('y')\n", (1059, 1064), True, 'import numpy as np\n'), ((1086, 1098), 'numpy.uint32', 'np.uint32', (['(4)'], {}), '(4)\n', (1095, 1098), True, 'import numpy as np\n'), ((1122, 1133), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (1130, 1133), True, 'import numpy as np\n'), ((1154, 1166), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (1163, 1166), True, 'import numpy as np\n'), ((1182, 1198), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (1196, 1198), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((1218, 1229), 'numpy.str', 'np.str', (['"""z"""'], {}), "('z')\n", (1224, 1229), True, 'import numpy as np\n'), ((1251, 1263), 'numpy.uint32', 'np.uint32', (['(8)'], {}), '(8)\n', (1260, 1263), True, 'import numpy as np\n'), ((1287, 1298), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (1295, 1298), True, 'import numpy as np\n'), ((1319, 1331), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (1328, 1331), True, 'import numpy as np\n'), ((1347, 1363), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (1361, 1363), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((1383, 1402), 'numpy.str', 'np.str', (['"""intensity"""'], {}), "('intensity')\n", (1389, 1402), True, 'import numpy as np\n'), ((1424, 1437), 'numpy.uint32', 'np.uint32', (['(16)'], {}), '(16)\n', (1433, 1437), True, 'import numpy as np\n'), ((1461, 1472), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (1469, 1472), True, 'import numpy as np\n'), ((1493, 1505), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (1502, 1505), True, 'import numpy as np\n'), ((1597, 1613), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (1611, 1613), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((1633, 1648), 'numpy.str', 'np.str', (['"""label"""'], {}), "('label')\n", (1639, 1648), True, 'import numpy as np\n'), ((1670, 1683), 'numpy.uint32', 'np.uint32', (['(20)'], {}), '(20)\n', (1679, 1683), True, 'import numpy as np\n'), ((1707, 1718), 'numpy.uint8', 'np.uint8', (['(4)'], {}), '(4)\n', (1715, 1718), True, 'import numpy as np\n'), ((1739, 1751), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (1748, 1751), True, 'import numpy as np\n'), ((2209, 2229), 'nets.SqueezeSeg', 'SqueezeSeg', (['self._mc'], {}), '(self._mc)\n', (2219, 2229), False, 'from nets import SqueezeSeg\n'), ((2252, 2292), 'tensorflow.train.Saver', 'tf.train.Saver', (['self._model.model_params'], {}), '(self._model.model_params)\n', (2266, 2292), True, 'import tensorflow as tf\n'), ((2461, 2551), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/ss_filtered"""', 'PointCloud2', 'self.point_cloud_callback'], {'queue_size': '(1)'}), "('/ss_filtered', PointCloud2, self.point_cloud_callback,\n queue_size=1)\n", (2477, 2551), False, 'import rospy\n'), ((2568, 2621), 'rospy.Publisher', 'rospy.Publisher', (['pub_topic', 'PointCloud2'], {'queue_size': '(1)'}), '(pub_topic, PointCloud2, queue_size=1)\n', (2583, 2621), False, 'import rospy\n'), ((2631, 2643), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2641, 2643), False, 'import rospy\n'), ((2776, 2783), 'utils.clock.Clock', 'Clock', ([], {}), '()\n', (2781, 2783), False, 'from utils.clock import Clock\n'), ((2990, 3084), 'sensor_msgs.point_cloud2.read_points', 'pc2.read_points', (['cloud_msg'], {'skip_nans': '(False)', 'field_names': "('x', 'y', 'z', 'intensity', 'd')"}), "(cloud_msg, skip_nans=False, field_names=('x', 'y', 'z',\n 'intensity', 'd'))\n", (3005, 3084), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((3417, 3504), 'numpy.reshape', 'np.reshape', (['(lidar[:, :, 4] > 0)', '[self._mc.ZENITH_LEVEL, self._mc.AZIMUTH_LEVEL, 1]'], {}), '(lidar[:, :, 4] > 0, [self._mc.ZENITH_LEVEL, self._mc.\n AZIMUTH_LEVEL, 1])\n', (3427, 3504), True, 'import numpy as np\n'), ((4158, 4187), 'numpy.stack', 'np.stack', (['(x, y, z, i, label)'], {}), '((x, y, z, i, label))\n', (4166, 4187), True, 'import numpy as np\n'), ((4207, 4215), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (4213, 4215), False, 'from std_msgs.msg import Header\n'), ((4239, 4251), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (4249, 4251), False, 'import rospy\n'), ((2336, 2377), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2350, 2377), True, 'import tensorflow as tf\n')] |
'''
Created on 12.11.2017
@author: Felix
'''
import Covariance as Covariance
import Optimization as Optimization
import pandas as pd
import numpy as np
class Portfolio():
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
print('creating portfolio')
self.securityList = []
self.obj_covariance = []
self.excelOuputFile = 'efficientFrontier.xls'
def addSecurity2Portfolio(self,security):
if type(security).__name__ != 'Security':
print('addSecurity only takes securities as input!')
return
self.securityList.append(security)
def determineCovarianceMatrix(self,startDate,endDate,fluctuationmode,):
if len(self.securityList) > 0:
covariance = Covariance.Covariance(self.securityList,fluctuationmode,startDate,endDate)
self.driftDF,self.covarianceDF = covariance.getPooledCovariance()
print(' determined covariance matrix of securities in portfolio')
else:
print('no securities in portfolio. cant determine anything')
def getRiskoptimum(self,startDate,endDate,minimumReturn,fluctuationmode):
if sum([hasattr(self, 'covarianceDF'),hasattr(self, 'driftDF')]) != 2:
self.determineCovarianceMatrix(startDate, endDate, fluctuationmode)
opti = Optimization.Optimization((self.driftDF.as_matrix()).flatten(),self.covarianceDF.as_matrix(),minimumReturn)
result,mu,var = opti.doOptimization()
#annualize
if fluctuationmode == 'geometric':
mu = np.exp(mu*365.25)-1
sigma = np.exp(np.sqrt(var*365.25)) -1
else:
mu = (mu*365.25) -1
sigma = np.sqrt(var*365.25)-1
return result,mu,sigma
def getEfficientFrontier(self,startDate,endDate,fluctuationmode):
# will only get useful solutions.. thus no "markowitz bullet" like stuff. always use constraint that return(solution) > requestedReturn, not equal!
self.determineCovarianceMatrix(startDate, endDate, fluctuationmode)
#define bins
drift = self.driftDF.as_matrix()
maximum = drift.max()
minimum = drift.min()
nsteps = 100
stepsize = (maximum-minimum)/float(nsteps)
output = []
for i in range(nsteps):
minimumReturn = (stepsize * i + minimum)
result,mu,var = self.getRiskoptimum(startDate, endDate, minimumReturn, fluctuationmode)
resAsList = list(result)
resAsList.append(mu)
resAsList.append(var)
output.append(resAsList)
columns = [etf.ID for etf in self.securityList]
columns.append('mu')
columns.append('var')
self.efficientFrontier = pd.DataFrame(data = output,index = range(nsteps),columns = columns)
def dumpEfficientFrontiertoExcel(self):
writer = pd.ExcelWriter(self.excelOuputFile)
self.efficientFrontier.to_excel(writer, "efficientFrontier")
writer.save()
writer.close()
| [
"numpy.exp",
"pandas.ExcelWriter",
"Covariance.Covariance",
"numpy.sqrt"
] | [((3168, 3203), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['self.excelOuputFile'], {}), '(self.excelOuputFile)\n', (3182, 3203), True, 'import pandas as pd\n'), ((870, 947), 'Covariance.Covariance', 'Covariance.Covariance', (['self.securityList', 'fluctuationmode', 'startDate', 'endDate'], {}), '(self.securityList, fluctuationmode, startDate, endDate)\n', (891, 947), True, 'import Covariance as Covariance\n'), ((1746, 1765), 'numpy.exp', 'np.exp', (['(mu * 365.25)'], {}), '(mu * 365.25)\n', (1752, 1765), True, 'import numpy as np\n'), ((1901, 1922), 'numpy.sqrt', 'np.sqrt', (['(var * 365.25)'], {}), '(var * 365.25)\n', (1908, 1922), True, 'import numpy as np\n'), ((1794, 1815), 'numpy.sqrt', 'np.sqrt', (['(var * 365.25)'], {}), '(var * 365.25)\n', (1801, 1815), True, 'import numpy as np\n')] |
import inspect
import os
import sys
import warnings
from collections import OrderedDict
from typing import Callable, Union, Iterable
import requests
import numpy as np
from numpy.random.mtrand import RandomState
from matrx.agents.agent_brain import AgentBrain
from matrx.agents.capabilities.capability import SenseCapability
from matrx.agents.human_agent_brain import HumanAgentBrain
from matrx.grid_world import GridWorld
from matrx.logger.logger import GridWorldLogger
from matrx.objects.agent_body import AgentBody
from matrx.objects.env_object import EnvObject
from matrx.utils import utils
from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability
from matrx.objects.simple_objects import Wall, Door, AreaTile, SmokeTile
from matrx.sim_goals.sim_goal import LimitedTimeGoal, SimulationGoal
# addons
from matrx.API import api
from matrx_visualizer import visualization_server
class WorldBuilder:
def __init__(self, shape, tick_duration=0.5, random_seed=1, simulation_goal=1000, run_matrx_api=True,
run_matrx_visualizer=False, visualization_bg_clr="#C2C2C2", visualization_bg_img=None,
verbose=False):
"""
A builder to create one or more worlds.
With the constructor you can set a number of general properties and from the resulting instance you can call
numerous methods to add new objects and/or agents.
Parameters
----------
shape : tuple
Denotes the width and height of the world you create.
tick_duration : float, optional
The duration of a single 'tick' or loop in the game-loop of the world you create. Defaults to 0.5.
random_seed : int, optional
The master random seed on which all objects, agents and worlds are seeded. Should be a positive non-zero
integer. Defaults 1.0.
simulation_goal : int, SimulationGoal, list of SimulationGoal, optional
The goal or goals of the world, either a single SimulationGoal, a list of such or a positive non-zero
integer to denote the maximum number of 'ticks' the world(s) have to run. Defaults to 1000.
run_matrx_api : bool, optional
Whether to run the MATRX API
run_matrx_visualizer : bool, optional
Whether to run the default MATRX visualizer, this requires the API to be run
visualization_bg_clr : str, optional
The color of the world when visualized using MATRX' own visualisation server. A string representation of
hexadecimal color. Defaults to "#C2C2C2" (light grey).
visualization_bg_img : str, optional
An optional background image of the world when visualized using MATRX' own visualisation server. A string
of the path to the image file. Defaults to None (no image).
verbose : bool, optional
Whether the subsequent created world should be verbose or not. Defaults to False.
Raises
------
ValueError
On an incorrect argument. The exception specifies further what argument and what is erroneous about it.
Examples
--------
This creates a WorldBuilder that creates world of a certain size (here 10 by 10);
>>> from matrx.world_builder import WorldBuilder
>>> WorldBuilder(shape=(10, 10))
To create a WorldBuilder with a black background, a tick duration as fast as possible and with a different
master random seed;
>>> from matrx.world_builder import WorldBuilder
>>> WorldBuilder(shape=(10, 10), random_seed=42, tick_duration=-1, visualization_bg_clr="#000000")
"""
# Check if shape is of correct type and length
if not isinstance(shape, list) and not isinstance(shape, tuple) and len(shape) != 2:
raise ValueError(f"The given grid shape {shape} is not of type List, Tuple or of length two.")
# convert int to float
if isinstance(tick_duration, int):
tick_duration = float(tick_duration)
# Check that tick duration is of float and a positive number.
if not isinstance(tick_duration, float) and tick_duration >= 0.0:
raise ValueError(f"The given tick_duration {tick_duration} should be a Float and larger or equal than 0.0.")
# Check that the random seed is a positive non-zero integer
if not isinstance(random_seed, int) and random_seed > 0:
raise ValueError(f"The given random_seed {random_seed} should be an Int and bigger or equal to 1.")
# Check if the simulation_goal is a SimulationGoal, an int or a list or tuple of SimulationGoal
if not isinstance(simulation_goal, SimulationGoal) and not isinstance(simulation_goal, int) \
and not (isinstance(simulation_goal, Iterable) and (sum(1 for _ in simulation_goal)) > 0):
raise ValueError(f"The given simulation_goal {simulation_goal} should be of type {SimulationGoal.__name__} "
f"or a list/tuple of {SimulationGoal.__name__}, or it should be an int denoting the max"
f"number of ticks the world should run (negative for infinite).")
# Check the background color
if not isinstance(visualization_bg_clr, str) and len(visualization_bg_clr) != 7 and \
visualization_bg_clr[0] is not "#":
raise ValueError(f"The given visualization_bg_clr {visualization_bg_clr} should be a Str of length 7 with"
f"an initial '#'' (a hexidecimal color string).")
# Check if the background image is a path
if visualization_bg_img is not None and not isinstance(visualization_bg_img, str):
raise ValueError(f"The given visualization_bg_img {visualization_bg_img} should be of type str denoting a path"
f" to an image.")
if not isinstance(run_matrx_visualizer, bool):
raise ValueError(f"The given value {run_matrx_visualizer} for run_matrx_visualizer is invalid, should be "
f"of type bool ")
if not isinstance(run_matrx_api, bool):
raise ValueError(f"The given value {run_matrx_api} for run_matrx_api is invalid, should be "
f"of type bool.")
if not run_matrx_api and run_matrx_visualizer:
raise ValueError(f"Run_matrx_api is set to False while run_matrx_visualizer is set to True. The MATRX "
f"visualizer requires the API to work, so this is not possible.")
# Set our random number generator
self.rng = np.random.RandomState(random_seed)
# Set our settings place holders
self.agent_settings = []
self.object_settings = []
# Set our logger place holders
self.loggers = []
# initialize an API variables
self.run_matrx_api = run_matrx_api
self.api_info = { "run_matrx_api": run_matrx_api,
"api_thread": False }
# initialize the visualization variables
self.run_matrx_visualizer = run_matrx_visualizer
self.matrx_visualizer_thread = False
# Whether the world factory and evrything else should print stuff
self.verbose = verbose
# If simulation goal is an integer, we create a LimitedTimeGoal with that number of ticks
if isinstance(simulation_goal, int):
simulation_goal = LimitedTimeGoal(max_nr_ticks=simulation_goal)
# Set our world settings
self.world_settings = self.__set_world_settings(shape=shape,
tick_duration=tick_duration,
simulation_goal=simulation_goal,
visualization_bg_clr=visualization_bg_clr,
visualization_bg_img=visualization_bg_img,
verbose=self.verbose,
rnd_seed=random_seed)
# Keep track of the number of worlds we created
self.worlds_created = 0
# Based on our verbosity and debug level, we set a warning scheme
if verbose:
warnings.simplefilter("always")
else: # use the default (print all warnings once per location [module and line number])
warnings.simplefilter("default")
def worlds(self, nr_of_worlds: int = 100):
"""
Returns a Generator of GridWorld instance for the specified number of worlds.
Parameters
----------
nr_of_worlds
The number of worlds the Generator contains. Defaults to 10.
Yields
------
GridWorld
A GridWorld, where all random properties and prospects are sampled using the given master seed.
Raises
------
ValueError
The nr_of_worlds should be a postive non-zero integer.
"""
if not isinstance(nr_of_worlds, int) and nr_of_worlds <= 0:
raise ValueError(f"The given nr_of_worlds {nr_of_worlds} should be of type Int and larger or equal to 1.")
while self.worlds_created < nr_of_worlds:
yield self.get_world()
def get_world(self):
"""
Creates a single GridWorld instance based on the current state of this WorldFactor instance.
The returned GridWorld can be started with world.run().
Returns
-------
world: GridWorld
A GridWorld instance.
See Also
--------
"""
#TODO Refer to GridWorld.run()
self.worlds_created += 1
world = self.__create_world()
self.__reset_random()
return world
def __set_world_settings(self, shape, tick_duration, simulation_goal, rnd_seed,
visualization_bg_clr, visualization_bg_img, verbose):
if rnd_seed is None:
rnd_seed = self.rng.randint(0, 1000000)
world_settings = {"shape": shape,
"tick_duration": tick_duration,
"simulation_goal": simulation_goal,
"rnd_seed": rnd_seed,
"visualization_bg_clr": visualization_bg_clr,
"visualization_bg_img": visualization_bg_img,
"verbose": verbose}
return world_settings
def add_logger(self, logger_class, log_strategy=None, save_path=None, file_name=None,
file_extension=None, delimiter=None, **kwargs):
if issubclass(logger_class, GridWorldLogger):
set_params = {'log_strategy': log_strategy, 'save_path': save_path, 'file_name': file_name,
'file_extension': file_extension, 'delimiter': delimiter}
# Add all kwarg
set_params = {**set_params, **kwargs}
# Get the variables this logger class needs, and ignore the rest
accepted_parameters = {}
class_signature = inspect.signature(logger_class.__init__)
class_params = class_signature.parameters
for param in class_params.values():
if param.name in set_params.keys():
if set_params[param.name] is not None:
accepted_parameters[param.name] = set_params[param.name]
else:
accepted_parameters[param.name] = param.default
# Append the class and its parameters to the list of loggers
self.loggers.append((logger_class, accepted_parameters))
else:
raise Exception(f"The logger is not of type, nor inherits from, {GridWorldLogger.__name__}.")
def add_agent(self, location: Union[tuple, list], agent_brain: AgentBrain, name,
customizable_properties: Union[tuple, list] = None, sense_capability: SenseCapability = None,
is_traversable: bool = True, team: str = None, possible_actions: list = None, is_movable: bool = None,
visualize_size: float = None, visualize_shape: Union[float, str] = None, visualize_colour: str = None,
visualize_depth: int = None, visualize_opacity: float = None,
**custom_properties):
"""The helper method within a WorldFactory instance to add a single agent.
This method makes sure that when this
factory generates a GridWorld instance, it contains an AgentBody connected to the given AgentBrain.
All keyword parameters default to None. Which means that their values are obtained from the
"scenarios/defaults.json" file under the segment AgentBody.
Parameters
----------
location
The location (x,y) of the to be added agent.
agent_brain
The AgentBrain instance that will control the agent.
name
The name of the agent, should be unique to allow the visualisation to have a single web page per agent. If
the name is already used, an exception is thrown.
customizable_properties: optional
A list or tuple of names of properties for this agent that can be altered or customized. Either by the agent
itself or by other agents or objects. If a property value gets changed that is not in this list than an
exception is thrown.
sense_capability: optional
The SenseCapability object belonging this this agent's AgentBody. Used by the GridWorld to pre-filter
objects and agents from this agent's states when querying for actions. Defaults to a SenseCapability that
sees all object types within the entire world.
is_traversable: optional
Denotes whether other agents and object can move over this agent. It also throws an exception when this is
set to False and another object/agent with this set to False is added to the same location.
team: optional
The team name. Used to group agents together. Defaults to this agent's name + "_team" to signify it
forms its own team.
possible_actions: optional
A list or tuple of the names of the Action classes this agent can perform. With this you can limit the
actions this agent can perform.
is_movable: optional
Whether this agent can be moved by other agents (currently this only happens with the DropObjectAction and
PickUpAction).
visualize_size: optional
The size of this agent in its visualisation. A value of 1.0 denotes the full grid location square, whereas
a value of 0.5 denotes half, and 0.0 an infinitesimal small size.
visualize_shape: optional
The shape of this agent in its visualisation. Depending on the value it obtains this shape: 0 = a square,
1 = a triangle, 2 = a circle or when "img" the image from `image_filename` is used.
visualize_colour: optional
The colour of this agent in its visualisation. Should be a string hexadecimal colour value.
visualize_depth: optional
The visualisation depth of this agent in its visualisation. It denotes the 'layer' on which it is
visualized. A larger value is more on 'top'.
visualize_opacity: optional
The opacity of this agent in its visualization. A value of 1.0 means full opacity and 0.0 no opacity.
custom_properties: optional
Any additional given keyword arguments will be encapsulated in this dictionary. These will be added to the
AgentBody as custom_properties which can be perceived by other agents and objects or which can be used or
altered (if allowed to by the customizable_properties list) by the AgentBrain or others.
Returns
-------
None
...
Raises
------
AttributeError
When the given agent name is already added to this WorldFactory instance.
"""
# Check if location and agent are of correct type
if not isinstance(location, list) and not isinstance(location, tuple) and len(location) != 2:
raise ValueError(f"The given location {location} while adding the agent with name {name} is not a list, "
f"tuple or of length two.")
if not isinstance(agent_brain, AgentBrain):
raise ValueError(f"The given agent_brain while adding agent with name {name} is not of type "
f"{AgentBrain.__name__} but of type {agent_brain.__class__.__name__}.")
# Check if the agent name is unique
for existingAgent in self.agent_settings:
if existingAgent["mandatory_properties"]["name"] == name:
raise ValueError(f"An agent with the name {name} was already added. Agent names should be unique.",
name)
# Load the defaults for any variable that is not defined
# Obtain any defaults from the defaults.json file if not set already.
if is_traversable is None:
is_traversable = get_default_value(class_name="AgentBody", property_name="is_traversable")
if visualize_size is None:
visualize_size = get_default_value(class_name="AgentBody", property_name="visualize_size")
if visualize_shape is None:
visualize_shape = get_default_value(class_name="AgentBody", property_name="visualize_shape")
if visualize_colour is None:
visualize_colour = get_default_value(class_name="AgentBody", property_name="visualize_colour")
if visualize_opacity is None:
visualize_opacity = get_default_value(class_name="AgentBody", property_name="visualize_opacity")
if visualize_depth is None:
visualize_depth = get_default_value(class_name="AgentBody", property_name="visualize_depth")
if possible_actions is None:
possible_actions = get_default_value(class_name="AgentBody", property_name="possible_actions")
if is_movable is None:
is_movable = get_default_value(class_name="AgentBody", property_name="is_movable")
# If default variables are not given, assign them (most empty, except of sense_capability that defaults to all
# objects with infinite range).
if custom_properties is None:
custom_properties = {}
if sense_capability is None:
sense_capability = create_sense_capability([], []) # Create sense capability that perceives all
if customizable_properties is None:
customizable_properties = []
# Check if the agent is not of HumanAgent, if so; use the add_human_agent method
inh_path = get_inheritence_path(agent_brain.__class__)
if 'HumanAgent' in inh_path:
Exception(f"You are adding an agent that is or inherits from HumanAgent with the name {name}. Use "
f"factory.add_human_agent to add such agents.")
# Define a settings dictionary with all we need to register and add an agent to the GridWorld
agent_setting = {"agent": agent_brain,
"custom_properties": custom_properties,
"customizable_properties": customizable_properties,
"sense_capability": sense_capability,
"mandatory_properties": {
"name": name,
"is_movable": is_movable,
"is_traversable": is_traversable,
"possible_actions": possible_actions,
"is_human_agent": False, # is you want a human agent, use factory.add_human_agent()
"visualize_size": visualize_size,
"visualize_shape": visualize_shape,
"visualize_colour": visualize_colour,
"visualize_opacity": visualize_opacity,
"visualize_depth": visualize_depth,
"location": location,
"team": team}
}
self.agent_settings.append(agent_setting)
def add_team(self, agent_brains: Union[list, tuple], locations: Union[list, tuple], team_name,
custom_properties=None, sense_capability=None,
customizable_properties=None, is_traversable=None,
visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_opacity=None):
"""Adds a group of agents as a single team (meaning that their 'team' property all have the given team name).
All parameters except for the `locations` and `agent_brain` defaults to `None`. Which means that their values
are obtained from the "scenarios/defaults.json" file under the segment AgentBody.
Parameters
----------
agent_brains
The list or tuple of AgentBrain that will control each agent in the team. Should be of the same size as
`locations`.
locations
The list or tuple of locations in the form of [x, y] at which coordinates each agent starts in the team.
Should be of the same size as `locations`.
team_name
The
custom_properties
..
sense_capability
..
customizable_properties
..
is_traversable
..
visualize_size
..
visualize_shape
..
visualize_colour
..
visualize_opacity
..
Returns
-------
None
..
"""
self.add_multiple_agents(agent_brains, locations, custom_properties=custom_properties,
sense_capabilities=sense_capability, customizable_properties=customizable_properties,
is_traversable=is_traversable,
teams=team_name, visualize_sizes=visualize_size, visualize_shapes=visualize_shape,
visualize_colours=visualize_colour, visualize_opacities=visualize_opacity)
def add_multiple_agents(self, agents, locations, custom_properties=None,
sense_capabilities=None, customizable_properties=None,
is_traversable=None,
teams=None, visualize_sizes=None, visualize_shapes=None,
visualize_colours=None, visualize_opacities=None, visualize_depths=None):
"""
Parameters
----------
agents
locations
custom_properties
sense_capabilities
customizable_properties
is_traversable
teams
visualize_sizes
visualize_shapes
visualize_colours
visualize_opacities
visualize_depths
Returns
-------
"""
# If any of the lists are not given, fill them with None and if they are a single value of its expected type we
# copy it in a list. A none value causes the default value to be loaded.
if custom_properties is None:
custom_properties = [{} for _ in range(len(agents))]
elif isinstance(custom_properties, dict):
custom_properties = [custom_properties for _ in range(len(agents))]
if sense_capabilities is None:
sense_capabilities = [None for _ in range(len(agents))]
elif isinstance(sense_capabilities, SenseCapability):
sense_capabilities = [sense_capabilities for _ in range(len(agents))]
if customizable_properties is None:
customizable_properties = [None for _ in range(len(agents))]
elif not any(isinstance(el, list) for el in customizable_properties):
customizable_properties = [customizable_properties for _ in range(len(agents))]
if is_traversable is None:
is_traversable = [None for _ in range(len(agents))]
elif isinstance(is_traversable, bool):
is_traversable = [is_traversable for _ in range(len(agents))]
if teams is None:
teams = [None for _ in range(len(agents))]
elif isinstance(teams, str):
teams = [teams for _ in range(len(agents))]
if visualize_sizes is None:
visualize_sizes = [None for _ in range(len(agents))]
elif isinstance(visualize_sizes, int):
visualize_sizes = [visualize_sizes for _ in range(len(agents))]
if visualize_shapes is None:
visualize_shapes = [None for _ in range(len(agents))]
elif isinstance(visualize_shapes, int):
visualize_shapes = [visualize_shapes for _ in range(len(agents))]
if visualize_colours is None:
visualize_colours = [None for _ in range(len(agents))]
elif isinstance(visualize_colours, str):
visualize_colours = [visualize_colours for _ in range(len(agents))]
if visualize_opacities is None:
visualize_opacities = [None for _ in range(len(agents))]
elif isinstance(visualize_opacities, int):
visualize_opacities = [visualize_opacities for _ in range(len(agents))]
if visualize_depths is None:
visualize_depths = [None for _ in range(len(agents))]
elif isinstance(visualize_depths, int):
visualize_depths = [visualize_depths for _ in range(len(agents))]
# Loop through all agents and add them
for idx, agent in enumerate(agents):
self.add_agent(locations[idx], agent,
sense_capability=sense_capabilities[idx],
customizable_properties=customizable_properties[idx],
is_traversable=is_traversable[idx],
team=teams[idx],
visualize_size=visualize_sizes[idx],
visualize_shape=visualize_shapes[idx],
visualize_colour=visualize_colours[idx],
visualize_depth=visualize_depths[idx],
visualize_opacity=visualize_opacities[idx],
**custom_properties[idx])
def add_agent_prospect(self, location, agent, probability, name="Agent", customizable_properties=None,
sense_capability=None,
is_traversable=None, team=None, possible_actions=None,
is_movable=None,
visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_opacity=None,
visualize_depth=None, **custom_properties):
# Add agent as normal
self.add_agent(location, agent, name, customizable_properties, sense_capability,
is_traversable, team, possible_actions, is_movable,
visualize_size, visualize_shape, visualize_colour, visualize_depth,
visualize_opacity, **custom_properties)
# Get the last settings (which we just added) and add the probability
self.agent_settings[-1]['probability'] = probability
def add_object(self, location, name, callable_class=None, customizable_properties=None,
is_traversable=None, is_movable=None,
visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None,
visualize_opacity=None, **custom_properties):
if callable_class is None:
callable_class = EnvObject
# Check if location and agent are of correct type
assert isinstance(location, list) or isinstance(location, tuple)
assert isinstance(callable_class, Callable)
# Load default parameters if not passed
if is_movable is None:
is_movable = get_default_value(class_name="EnvObject", property_name="is_movable")
# If default variables are not given, assign them (most empty, except of sense_capability that defaults to all
# objects with infinite range).
if custom_properties is None:
custom_properties = {}
if customizable_properties is None:
customizable_properties = []
# Define a settings dictionary with all we need to register and add an agent to the GridWorld
object_setting = {"callable_class": callable_class,
"custom_properties": custom_properties,
"customizable_properties": customizable_properties,
"mandatory_properties": {
"name": name,
"is_traversable": is_traversable,
"visualize_size": visualize_size,
"visualize_shape": visualize_shape,
"visualize_colour": visualize_colour,
"visualize_depth": visualize_depth,
"visualize_opacity": visualize_opacity,
"is_movable": is_movable,
"location": location}
}
self.object_settings.append(object_setting)
def add_object_prospect(self, location, name, probability, callable_class=None, customizable_properties=None,
is_traversable=None,
visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None,
visualize_opacity=None, **custom_properties):
# Add object as normal
self.add_object(location, name, callable_class, customizable_properties,
is_traversable,
visualize_size, visualize_shape, visualize_colour, visualize_depth,
visualize_opacity, **custom_properties)
# Get the last settings (which we just added) and add the probability
self.object_settings[-1]['probability'] = probability
def add_multiple_objects(self, locations, names=None, callable_classes=None, custom_properties=None,
customizable_properties=None, is_traversable=None, visualize_sizes=None,
visualize_shapes=None, visualize_colours=None, visualize_depths=None,
visualize_opacities=None, is_movable=None):
# If any of the lists are not given, fill them with None and if they are a single value of its expected type we
# copy it in a list. A none value causes the default value to be loaded.
if is_movable is None:
is_movable = [None for _ in range(len(locations))]
elif isinstance(is_movable, bool):
is_movable = [is_movable for _ in range(len(locations))]
if callable_classes is None:
callable_classes = [EnvObject for _ in range(len(locations))]
elif isinstance(callable_classes, Callable):
callable_classes = [callable_classes for _ in range(len(locations))]
if names is None:
names = [callable_class.__name__ for callable_class in callable_classes]
elif isinstance(names, str):
names = [names for _ in range(len(locations))]
if custom_properties is None:
custom_properties = [{} for _ in range(len(locations))]
elif isinstance(custom_properties, dict):
custom_properties = [custom_properties for _ in range(len(locations))]
if customizable_properties is None:
customizable_properties = [None for _ in range(len(locations))]
elif not any(isinstance(el, list) for el in customizable_properties):
customizable_properties = [customizable_properties for _ in range(len(locations))]
if is_traversable is None:
is_traversable = [None for _ in range(len(locations))]
elif isinstance(is_traversable, bool):
is_traversable = [is_traversable for _ in range(len(locations))]
if visualize_sizes is None:
visualize_sizes = [None for _ in range(len(locations))]
elif isinstance(visualize_sizes, int):
visualize_sizes = [visualize_sizes for _ in range(len(locations))]
if visualize_shapes is None:
visualize_shapes = [None for _ in range(len(locations))]
elif isinstance(visualize_shapes, int):
visualize_shapes = [visualize_shapes for _ in range(len(locations))]
if visualize_colours is None:
visualize_colours = [None for _ in range(len(locations))]
elif isinstance(visualize_colours, str):
visualize_colours = [visualize_colours for _ in range(len(locations))]
if visualize_opacities is None:
visualize_opacities = [None for _ in range(len(locations))]
elif isinstance(visualize_opacities, int):
visualize_opacities = [visualize_opacities for _ in range(len(locations))]
if visualize_depths is None:
visualize_depths = [None for _ in range(len(locations))]
elif isinstance(visualize_depths, str):
visualize_depths = [visualize_depths for _ in range(len(locations))]
# Loop through all agents and add them
for idx in range(len(locations)):
self.add_object(location=locations[idx], name=names[idx], callable_class=callable_classes[idx],
customizable_properties=customizable_properties[idx],
is_traversable=is_traversable[idx], is_movable=is_movable[idx],
visualize_size=visualize_sizes[idx], visualize_shape=visualize_shapes[idx],
visualize_colour=visualize_colours[idx], visualize_depth=visualize_depths[idx],
visualize_opacity=visualize_opacities[idx], **custom_properties[idx])
def add_human_agent(self, location, agent, name="HumanAgent", customizable_properties=None, sense_capability=None,
is_traversable=None, team=None, possible_actions=None,
is_movable=None,
visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None,
visualize_opacity=None, key_action_map=None, **custom_properties):
# Check if location and agent are of correct type
assert isinstance(location, list) or isinstance(location, tuple)
assert isinstance(agent, HumanAgentBrain)
for existingAgent in self.agent_settings:
if existingAgent["mandatory_properties"]["name"] == name:
raise Exception(f"A human agent with the name {name} was already added. Agent names should be unique.",
name)
# Load the defaults for any variable that is not defined
# Obtain any defaults from the defaults.json file if not set already.
if is_traversable is None:
is_traversable = get_default_value(class_name="AgentBody", property_name="is_traversable")
if visualize_size is None:
visualize_size = get_default_value(class_name="AgentBody", property_name="visualize_size")
if visualize_shape is None:
visualize_shape = get_default_value(class_name="AgentBody", property_name="visualize_shape")
if visualize_colour is None:
visualize_colour = get_default_value(class_name="AgentBody", property_name="visualize_colour")
if visualize_opacity is None:
visualize_opacity = get_default_value(class_name="AgentBody", property_name="visualize_opacity")
if visualize_depth is None:
visualize_depth = get_default_value(class_name="AgentBody", property_name="visualize_depth")
if possible_actions is None:
possible_actions = get_default_value(class_name="AgentBody", property_name="possible_actions")
if is_movable is None:
is_movable = get_default_value(class_name="AgentBody", property_name="is_movable")
# If default variables are not given, assign them (most empty, except of sense_capability that defaults to all
# objects with infinite range).
if custom_properties is None:
custom_properties = {}
if sense_capability is None:
sense_capability = create_sense_capability([], []) # Create sense capability that perceives all
if customizable_properties is None:
customizable_properties = []
# Check if the agent is of HumanAgent, if not; use the add_agent method
inh_path = get_inheritence_path(agent.__class__)
if 'HumanAgent' not in inh_path:
Exception(f"You are adding an agent that does not inherit from HumanAgent with the name {name}. Use "
f"factory.add_agent to add autonomous agents.")
# Append the user input map to the custom properties
custom_properties["key_action_map"] = key_action_map
# Define a settings dictionary with all we need to register and add an agent to the GridWorld
hu_ag_setting = {"agent": agent,
"custom_properties": custom_properties,
"customizable_properties": customizable_properties,
"sense_capability": sense_capability,
"mandatory_properties": {
"name": name,
"is_movable": is_movable,
"is_traversable": is_traversable,
"possible_actions": possible_actions,
"is_human_agent": True,
"visualize_size": visualize_size,
"visualize_shape": visualize_shape,
"visualize_colour": visualize_colour,
"visualize_opacity": visualize_opacity,
"visualize_depth": visualize_depth,
"location": location,
"team": team}
}
self.agent_settings.append(hu_ag_setting)
def add_area(self, top_left_location, width, height, name, customizable_properties=None, visualize_colour=None,
visualize_opacity=None, **custom_properties):
# Check if width and height are large enough to make an actual room (with content)
if width < 1 or height < 1:
raise Exception(f"While adding area {name}; The width {width} and/or height {height} should both be larger"
f" than 0.")
# Get all locations in the rectangle
locs = self.__list_area_locs(top_left_location, width, height)
# Add all area objects
self.add_multiple_objects(locations=locs, callable_classes=AreaTile,
customizable_properties=customizable_properties, visualize_colours=visualize_colour,
visualize_opacities=visualize_opacity, **custom_properties)
def add_smoke_area(self, top_left_location, width, height, name, visualize_colour=None,
smoke_thickness_multiplier=1.0, visualize_depth=None, **custom_properties):
# Check if width and height are large enough to make an actual room (with content)
if width < 1 or height < 1:
raise Exception(f"While adding area {name}; The width {width} and/or height {height} should both be larger"
f" than 0.")
# Get all locations in the rectangle
min_x = top_left_location[0]
max_x = top_left_location[0] + width
min_y = top_left_location[1]
max_y = top_left_location[1] + height
noise_grid = utils._white_noise(min_x, max_x, min_y, max_y, rng=self.rng)
for x in range(noise_grid.shape[0]):
for y in range(noise_grid.shape[1]):
# get noise point
noise = noise_grid[x, y]
# convert from [-1,1] range to [0,1] range, and flip
opacity = 1 - ((noise + 1.0) / 2.0)
opacity = np.clip(opacity * smoke_thickness_multiplier, 0, 1)
# add the smokeTile
self.add_object(location=[x, y], name=name, callable_class=SmokeTile,
visualize_colour=visualize_colour, visualize_opacity=opacity,
visualize_depth=visualize_depth, **custom_properties)
def __list_area_locs(self, top_left_location, width, height):
"""
Provided an area with the top_left_location, width and height,
generate a list containing all coordinates in that area
"""
# Get all locations in the rectangle
locs = []
min_x = top_left_location[0]
max_x = top_left_location[0] + width
min_y = top_left_location[1]
max_y = top_left_location[1] + height
for x in range(min_x, max_x):
for y in range(min_y, max_y):
locs.append((x, y))
return locs
def add_line(self, start, end, name, callable_class=None, customizable_properties=None,
is_traversable=None, is_movable=None,
visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None,
visualize_opacity=None, **custom_properties):
# Get the coordinates on the given line
line_coords = _get_line_coords(start, end)
# Construct the names
names = [name for _ in line_coords]
# Add the actual properties
self.add_multiple_objects(locations=line_coords, names=names, callable_classes=callable_class,
custom_properties=custom_properties, customizable_properties=customizable_properties,
is_traversable=is_traversable, visualize_sizes=visualize_size,
visualize_shapes=visualize_shape, visualize_colours=visualize_colour,
visualize_opacities=visualize_opacity, visualize_depths=visualize_depth,
is_movable=is_movable)
def add_room(self, top_left_location, width, height, name, door_locations=None, with_area_tiles=False,
doors_open=False,
wall_custom_properties=None, wall_customizable_properties=None,
area_custom_properties=None, area_customizable_properties=None,
area_visualize_colour=None, area_visualize_opacity=None):
# Check if width and height are large enough to make an actual room (with content)
if width <= 2 or height <= 2:
raise Exception(f"While adding room {name}; The width {width} and/or height {height} should both be larger"
f" than 2.")
# Check if the with_area boolean is True when any area properties are given
if with_area_tiles is False and (
area_custom_properties is not None or
area_customizable_properties is not None or
area_visualize_colour is not None or
area_visualize_opacity is not None):
warnings.warn(f"While adding room {name}: The boolean with_area_tiles is set to {with_area_tiles} while "
f"also providing specific area statements. Treating with_area_tiles as True.")
with_area_tiles = True
# Subtract 1 from both width and height, since the top left already counts as a size of 1,1
width -= 1
height -= 1
# Set corner coordinates
top_left = top_left_location
top_right = (top_left_location[0] + width, top_left_location[1])
bottom_left = (top_left_location[0], top_left_location[1] + height)
bottom_right = (top_left_location[0] + width, top_left_location[1] + height)
# Get all edge coordinates
top = _get_line_coords(top_left, top_right)
right = _get_line_coords(top_right, bottom_right)
bottom = _get_line_coords(bottom_left, bottom_right)
left = _get_line_coords(top_left, bottom_left)
# Combine in one and remove duplicates
all_ = top
all_.extend(right)
all_.extend(bottom)
all_.extend(left)
all_ = list(set(all_))
# Check if all door locations are at wall locations, if so remove those wall locations
door_locations = [] if door_locations is None else door_locations
for door_loc in door_locations:
if door_loc in all_:
all_.remove(door_loc)
else:
raise Exception(f"While adding room {name}, the requested door location {door_loc} is not in a wall.")
# Add all walls
names = [f"{name} - wall@{loc}" for loc in all_]
self.add_multiple_objects(locations=all_, names=names, callable_classes=Wall,
custom_properties=wall_custom_properties,
customizable_properties=wall_customizable_properties)
# Add all doors
for door_loc in door_locations:
self.add_object(location=door_loc, name=f"{name} - door@{door_loc}", callable_class=Door,
is_open=doors_open)
# Add all area tiles if required
if with_area_tiles:
area_top_left = (top_left[0] + 1, top_left[1] + 1)
area_width = width - 1
area_height = height - 1
# If properties happens to be none, set it to empty dict
if area_custom_properties is None:
area_custom_properties = {}
self.add_area(top_left_location=area_top_left, width=area_width, height=area_height, name=f"{name}_area",
visualize_colour=area_visualize_colour, visualize_opacity=area_visualize_opacity,
customizable_properties=area_customizable_properties, **area_custom_properties)
def __create_world(self):
# Create the world
world = self.__create_grid_world()
# Create all objects first
objs = []
for obj_settings in self.object_settings:
env_object = self.__create_env_object(obj_settings)
if env_object is not None:
objs.append(env_object)
# Then create all agents
avatars = []
for agent_settings in self.agent_settings:
agent, agent_avatar = self.__create_agent_avatar(agent_settings)
if agent_avatar is not None:
avatars.append((agent, agent_avatar))
# Register all objects (including checks)
for env_object in objs:
world._register_env_object(env_object)
# Register all agents (including checks)
for agent, agent_avatar in avatars:
world._register_agent(agent, agent_avatar)
# Register all teams and who is in them
world._register_teams()
# Add all loggers if any
for logger_class, arguments in self.loggers:
logger = logger_class(**arguments)
logger._set_world_nr(self.worlds_created)
world._register_logger(logger)
# Return the (successful/stable) world
return world
def __create_grid_world(self):
args = self.world_settings
# create a world ID in the shape of "world_" + world number + seeded random int
args['world_ID'] = f"world_{self.worlds_created}"
world = GridWorld(**args)
return world
def __create_env_object(self, settings):
# First we check if this settings represent a probabilistic object, because then we expect settings to contain
# a probability setting.
if 'probability' in settings.keys():
prob = settings['probability']
p = self.rng.rand()
if p > prob:
return None
callable_class = settings['callable_class']
custom_props = settings['custom_properties']
customizable_props = settings['customizable_properties']
mandatory_props = settings['mandatory_properties']
if callable_class == EnvObject: # If it is a 'normal' EnvObject we do not treat it differently
# Collect all arguments in a dictionary
args = {'location': mandatory_props['location'],
'name': mandatory_props['name'],
'class_callable': callable_class,
'customizable_properties': customizable_props,
'is_traversable': mandatory_props['is_traversable'],
'visualize_size': mandatory_props['visualize_size'],
'visualize_shape': mandatory_props['visualize_shape'],
'visualize_colour': mandatory_props['visualize_colour'],
'visualize_opacity': mandatory_props['visualize_opacity'],
'visualize_depth': mandatory_props['visualize_depth'],
**custom_props}
else: # else we need to check what this object's constructor requires and obtain those properties only
# Get all variables required by constructor
argspecs = inspect.getfullargspec(callable_class)
args = argspecs.args # does not give *args or **kwargs names
defaults = argspecs.defaults # defaults (if any) of the last n elements in args
varkw = argspecs.varkw # **kwargs names
argspecsv2 = inspect.getfullargspec(callable_class)
# Now assign the default values to kwargs dictionary
args = OrderedDict({arg: "not_set" for arg in reversed(args[1:])})
if defaults is not None:
for idx, default in enumerate(reversed(defaults)):
k = list(args.keys())[idx]
args[k] = default
# Check if all arguments are present (fails if a required argument without a default value is not given)
for arg, default in args.items():
if arg not in custom_props.keys() and arg not in mandatory_props.keys() and default == "not_set":
raise Exception(f"Cannot create environment object of type {callable_class.__name__} with name "
f"{mandatory_props['name']}, as its constructor requires the argument named {arg} "
f"which is not given as a property.")
elif arg in custom_props.keys() and custom_props[arg] is not None:
# an argument is present in custom_props, which overrides constructor defaults
args[arg] = custom_props[arg]
elif arg in mandatory_props.keys() and mandatory_props[arg] is not None:
# an argument is present in mandatory_props, which overrides constructor defaults
args[arg] = mandatory_props[arg]
# We provide a warning if some custom properties are given which are not used for this class
kwargs = [prop_name for prop_name in custom_props.keys() if prop_name not in args.keys()]
if varkw is None and len(kwargs) > 0:
warnings.warn(f"The following properties are not used in the creation of environment object of type "
f"{callable_class.__name__} with name {mandatory_props['name']}; {kwargs}, because "
f"the class does nto have a **kwargs argument in the constructor.")
# if a **kwargs argument was defined in the object constructor, pass all custom properties to the object
elif varkw is not None and len(kwargs) > 0:
for arg in kwargs:
args[arg] = custom_props[arg]
args = self.__instantiate_random_properties(args)
env_object = callable_class(**args)
return env_object
def __create_agent_avatar(self, settings):
agent = settings['agent']
# First we check if this settings represent a probabilistic object, because then we expect settings to contain
# a probability setting.
if 'probability' in settings.keys():
prob = settings['probability']
p = self.rng.rand()
if p > prob:
return agent, None
sense_capability = settings['sense_capability']
custom_props = settings['custom_properties']
customizable_props = settings['customizable_properties']
mandatory_props = settings['mandatory_properties']
args = {**mandatory_props,
'isAgent': True,
'sense_capability': sense_capability,
'class_callable': agent.__class__,
'callback_agent_get_action': agent._get_action,
'callback_agent_set_action_result': agent._set_action_result,
'callback_agent_observe': agent.filter_observations,
'callback_agent_log': agent._get_log_data,
'callback_agent_get_messages': agent._get_messages,
'callback_agent_set_messages': agent._set_messages,
'callback_agent_initialize': agent.initialize,
'customizable_properties': customizable_props,
**custom_props}
# Parse arguments and create the AgentAvatar
args = self.__instantiate_random_properties(args)
avatar = AgentBody(**args)
# We return the agent and avatar (as we will complete the initialisation of the agent when we register it)
return agent, avatar
def __instantiate_random_properties(self, args):
# Checks if all given arguments in the dictionary are not None, and if they are of RandomProperty or
# RandomLocation, their (random) value is retrieved.
for k, v in args.items():
if isinstance(v, RandomProperty):
args[k] = v._get_property(self.rng)
return args
def __reset_random(self):
# TODO resets all RandomProperty and RandomLocation, is called after creating a world so all duplicates can be
# TODO selected again.
pass
def startup(self):
""" Start any world-overarching MATRX scripts, such as, if requested, the API or MATRX visualization.
Returns
-------
"""
if self.run_matrx_api:
self.api_info["api_thread"] = api.run_api(self.verbose)
if self.run_matrx_visualizer:
self.matrx_visualizer_thread = visualization_server.run_matrx_visualizer(self.verbose)
def stop(self):
""" Stop any world-overarching MATRX scripts, such as, if started, the API or MATRX visualization.
Returns
-------
"""
if self.run_matrx_api:
print("Shutting down Matrx API")
r = requests.get("http://localhost:" + str(api.port) + "/shutdown_API")
self.api_info["api_thread"].join()
if self.run_matrx_visualizer:
print("Shutting down Matrx visualizer")
r = requests.get("http://localhost:" + str(visualization_server.port) + "/shutdown_visualizer")
self.matrx_visualizer_thread.join()
class RandomProperty:
def __init__(self, values, distribution=None, allow_duplicates=True):
# If distribution is None, its uniform (equal probability to all values)
if distribution is None:
distribution = [1 / len(values) for _ in range(values)]
# Normalize distribution if not already
if sum(distribution) != 1.0:
distribution = [el / sum(distribution) for el in distribution]
# Check that the distribution is complete
assert len(distribution) == len(values)
# Assign values and distribution
self.values = values
self.distribution = distribution
self.allow_duplicates = allow_duplicates
self.selected_values = set()
def _get_property(self, rng: RandomState, size=None):
vals = self.values.copy()
if not self.allow_duplicates:
for it in self.selected_values:
vals.remove(it)
choice = rng.choice(vals, p=self.distribution, size=size, replace=self.allow_duplicates)
self.selected_values.add(choice)
return choice
def reset(self):
self.selected_values = set()
| [
"numpy.clip",
"matrx.utils.utils._get_line_coords",
"matrx.utils.utils._white_noise",
"inspect.signature",
"inspect.getfullargspec",
"matrx.API.api.run_api",
"matrx.utils.utils.get_inheritence_path",
"matrx.objects.agent_body.AgentBody",
"matrx.grid_world.GridWorld",
"matrx.utils.utils.create_sens... | [((6726, 6760), 'numpy.random.RandomState', 'np.random.RandomState', (['random_seed'], {}), '(random_seed)\n', (6747, 6760), True, 'import numpy as np\n'), ((19054, 19097), 'matrx.utils.utils.get_inheritence_path', 'get_inheritence_path', (['agent_brain.__class__'], {}), '(agent_brain.__class__)\n', (19074, 19097), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((37098, 37135), 'matrx.utils.utils.get_inheritence_path', 'get_inheritence_path', (['agent.__class__'], {}), '(agent.__class__)\n', (37118, 37135), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((40286, 40346), 'matrx.utils.utils._white_noise', 'utils._white_noise', (['min_x', 'max_x', 'min_y', 'max_y'], {'rng': 'self.rng'}), '(min_x, max_x, min_y, max_y, rng=self.rng)\n', (40304, 40346), False, 'from matrx.utils import utils\n'), ((42000, 42028), 'matrx.utils.utils._get_line_coords', '_get_line_coords', (['start', 'end'], {}), '(start, end)\n', (42016, 42028), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((44500, 44537), 'matrx.utils.utils._get_line_coords', '_get_line_coords', (['top_left', 'top_right'], {}), '(top_left, top_right)\n', (44516, 44537), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((44554, 44595), 'matrx.utils.utils._get_line_coords', '_get_line_coords', (['top_right', 'bottom_right'], {}), '(top_right, bottom_right)\n', (44570, 44595), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((44613, 44656), 'matrx.utils.utils._get_line_coords', '_get_line_coords', (['bottom_left', 'bottom_right'], {}), '(bottom_left, bottom_right)\n', (44629, 44656), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((44672, 44711), 'matrx.utils.utils._get_line_coords', '_get_line_coords', (['top_left', 'bottom_left'], {}), '(top_left, bottom_left)\n', (44688, 44711), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((48082, 48099), 'matrx.grid_world.GridWorld', 'GridWorld', ([], {}), '(**args)\n', (48091, 48099), False, 'from matrx.grid_world import GridWorld\n'), ((54014, 54031), 'matrx.objects.agent_body.AgentBody', 'AgentBody', ([], {}), '(**args)\n', (54023, 54031), False, 'from matrx.objects.agent_body import AgentBody\n'), ((7558, 7603), 'matrx.sim_goals.sim_goal.LimitedTimeGoal', 'LimitedTimeGoal', ([], {'max_nr_ticks': 'simulation_goal'}), '(max_nr_ticks=simulation_goal)\n', (7573, 7603), False, 'from matrx.sim_goals.sim_goal import LimitedTimeGoal, SimulationGoal\n'), ((8430, 8461), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (8451, 8461), False, 'import warnings\n'), ((8571, 8603), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (8592, 8603), False, 'import warnings\n'), ((11259, 11299), 'inspect.signature', 'inspect.signature', (['logger_class.__init__'], {}), '(logger_class.__init__)\n', (11276, 11299), False, 'import inspect\n'), ((17426, 17499), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""is_traversable"""'}), "(class_name='AgentBody', property_name='is_traversable')\n", (17443, 17499), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((17564, 17637), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_size"""'}), "(class_name='AgentBody', property_name='visualize_size')\n", (17581, 17637), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((17704, 17778), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_shape"""'}), "(class_name='AgentBody', property_name='visualize_shape')\n", (17721, 17778), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((17847, 17922), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_colour"""'}), "(class_name='AgentBody', property_name='visualize_colour')\n", (17864, 17922), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((17993, 18069), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_opacity"""'}), "(class_name='AgentBody', property_name='visualize_opacity')\n", (18010, 18069), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((18136, 18210), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_depth"""'}), "(class_name='AgentBody', property_name='visualize_depth')\n", (18153, 18210), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((18279, 18354), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""possible_actions"""'}), "(class_name='AgentBody', property_name='possible_actions')\n", (18296, 18354), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((18411, 18480), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""is_movable"""'}), "(class_name='AgentBody', property_name='is_movable')\n", (18428, 18480), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((18782, 18813), 'matrx.utils.utils.create_sense_capability', 'create_sense_capability', (['[]', '[]'], {}), '([], [])\n', (18805, 18813), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((28308, 28377), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""EnvObject"""', 'property_name': '"""is_movable"""'}), "(class_name='EnvObject', property_name='is_movable')\n", (28325, 28377), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((35479, 35552), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""is_traversable"""'}), "(class_name='AgentBody', property_name='is_traversable')\n", (35496, 35552), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((35617, 35690), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_size"""'}), "(class_name='AgentBody', property_name='visualize_size')\n", (35634, 35690), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((35757, 35831), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_shape"""'}), "(class_name='AgentBody', property_name='visualize_shape')\n", (35774, 35831), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((35900, 35975), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_colour"""'}), "(class_name='AgentBody', property_name='visualize_colour')\n", (35917, 35975), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((36046, 36122), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_opacity"""'}), "(class_name='AgentBody', property_name='visualize_opacity')\n", (36063, 36122), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((36189, 36263), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""visualize_depth"""'}), "(class_name='AgentBody', property_name='visualize_depth')\n", (36206, 36263), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((36332, 36407), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""possible_actions"""'}), "(class_name='AgentBody', property_name='possible_actions')\n", (36349, 36407), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((36464, 36533), 'matrx.utils.utils.get_default_value', 'get_default_value', ([], {'class_name': '"""AgentBody"""', 'property_name': '"""is_movable"""'}), "(class_name='AgentBody', property_name='is_movable')\n", (36481, 36533), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((36835, 36866), 'matrx.utils.utils.create_sense_capability', 'create_sense_capability', (['[]', '[]'], {}), '([], [])\n', (36858, 36866), False, 'from matrx.utils.utils import get_inheritence_path, get_default_value, _get_line_coords, create_sense_capability\n'), ((43759, 43949), 'warnings.warn', 'warnings.warn', (['f"""While adding room {name}: The boolean with_area_tiles is set to {with_area_tiles} while also providing specific area statements. Treating with_area_tiles as True."""'], {}), "(\n f'While adding room {name}: The boolean with_area_tiles is set to {with_area_tiles} while also providing specific area statements. Treating with_area_tiles as True.'\n )\n", (43772, 43949), False, 'import warnings\n'), ((49795, 49833), 'inspect.getfullargspec', 'inspect.getfullargspec', (['callable_class'], {}), '(callable_class)\n', (49817, 49833), False, 'import inspect\n'), ((50079, 50117), 'inspect.getfullargspec', 'inspect.getfullargspec', (['callable_class'], {}), '(callable_class)\n', (50101, 50117), False, 'import inspect\n'), ((55000, 55025), 'matrx.API.api.run_api', 'api.run_api', (['self.verbose'], {}), '(self.verbose)\n', (55011, 55025), False, 'from matrx.API import api\n'), ((55108, 55163), 'matrx_visualizer.visualization_server.run_matrx_visualizer', 'visualization_server.run_matrx_visualizer', (['self.verbose'], {}), '(self.verbose)\n', (55149, 55163), False, 'from matrx_visualizer import visualization_server\n'), ((40665, 40716), 'numpy.clip', 'np.clip', (['(opacity * smoke_thickness_multiplier)', '(0)', '(1)'], {}), '(opacity * smoke_thickness_multiplier, 0, 1)\n', (40672, 40716), True, 'import numpy as np\n'), ((51791, 52047), 'warnings.warn', 'warnings.warn', (['f"""The following properties are not used in the creation of environment object of type {callable_class.__name__} with name {mandatory_props[\'name\']}; {kwargs}, because the class does nto have a **kwargs argument in the constructor."""'], {}), '(\n f"The following properties are not used in the creation of environment object of type {callable_class.__name__} with name {mandatory_props[\'name\']}; {kwargs}, because the class does nto have a **kwargs argument in the constructor."\n )\n', (51804, 52047), False, 'import warnings\n')] |
import os
import sys
import time
import numpy
a = numpy.full(20000000, 1.0, dtype = numpy.float64)
b = numpy.full(20000000, 1.0, dtype = numpy.float64)
# Stride 1
time1 = time.time()
for _ in range(2000):
sum = numpy.dot(a, b)
time2 = time.time() - time1
print("Time for dot product of 20M elements, stride 1, 40.0M FLOPS, (2000 iterations) = {0}".format(time2))
# Stride 2
time1 = time.time()
for _ in range(2000):
sum = ...
time2 = time.time() - time1
print("Time for dot product of 20M elements, stride 2, 20.0M FLOPS, (2000 iterations) = {0}".format(time2))
# Stride 4
time1 = time.time()
for _ in range(2000):
sum = ...
time2 = time.time() - time1
print("Time for dot product of 20M elements, stride 4, 10.0M FLOPS, (2000 iterations) = {0}".format(time2))
# Stride 8
time1 = time.time()
for _ in range(2000):
sum = ...
time2 = time.time() - time1
print("Time for dot product of 20M elements, stride 8, 5.0M FLOPS, (2000 iterations) = {0}".format(time2))
# Stride 16
time1 = time.time()
for _ in range(2000):
sum = ...
time2 = time.time() - time1
print("Time for dot product of 20M elements, stride 16, 2.5M FLOPS, (2000 iterations) = {0}".format(time2))
# Stride 32
time1 = time.time()
for _ in range(2000):
sum = ...
time2 = time.time() - time1
print("Time for dot product of 20M elements, stride 32, 1.25M FLOPS, (2000 iterations) = {0}".format(time2))
| [
"numpy.full",
"numpy.dot",
"time.time"
] | [((51, 97), 'numpy.full', 'numpy.full', (['(20000000)', '(1.0)'], {'dtype': 'numpy.float64'}), '(20000000, 1.0, dtype=numpy.float64)\n', (61, 97), False, 'import numpy\n'), ((104, 150), 'numpy.full', 'numpy.full', (['(20000000)', '(1.0)'], {'dtype': 'numpy.float64'}), '(20000000, 1.0, dtype=numpy.float64)\n', (114, 150), False, 'import numpy\n'), ((173, 184), 'time.time', 'time.time', ([], {}), '()\n', (182, 184), False, 'import time\n'), ((390, 401), 'time.time', 'time.time', ([], {}), '()\n', (399, 401), False, 'import time\n'), ((595, 606), 'time.time', 'time.time', ([], {}), '()\n', (604, 606), False, 'import time\n'), ((800, 811), 'time.time', 'time.time', ([], {}), '()\n', (809, 811), False, 'import time\n'), ((1006, 1017), 'time.time', 'time.time', ([], {}), '()\n', (1015, 1017), False, 'import time\n'), ((1212, 1223), 'time.time', 'time.time', ([], {}), '()\n', (1221, 1223), False, 'import time\n'), ((215, 230), 'numpy.dot', 'numpy.dot', (['a', 'b'], {}), '(a, b)\n', (224, 230), False, 'import numpy\n'), ((240, 251), 'time.time', 'time.time', ([], {}), '()\n', (249, 251), False, 'import time\n'), ((445, 456), 'time.time', 'time.time', ([], {}), '()\n', (454, 456), False, 'import time\n'), ((650, 661), 'time.time', 'time.time', ([], {}), '()\n', (659, 661), False, 'import time\n'), ((855, 866), 'time.time', 'time.time', ([], {}), '()\n', (864, 866), False, 'import time\n'), ((1061, 1072), 'time.time', 'time.time', ([], {}), '()\n', (1070, 1072), False, 'import time\n'), ((1267, 1278), 'time.time', 'time.time', ([], {}), '()\n', (1276, 1278), False, 'import time\n')] |
import sys
sys.path.append('..')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize
import projgrad
from scipy.stats.mstats import gmean
import matplotlib
colors = matplotlib.rcParams['axes.prop_cycle'].by_key()['color']
black = matplotlib.rcParams['axes.labelcolor']
tcellcolor = '#0E1E97'
tcellcoloralt = '#0e7b97'
import plotting
from lib import *
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-t", "--talk", action="store_true", dest="talk", default=False,
help="optimize figure for talks")
(options, args) = parser.parse_args()
talk = options.talk
if talk:
plt.style.use('../talk.mplstyle')
else:
plt.style.use('../paper.mplstyle')
def fcompfull_dC(x, t, alpha, K, dC, delta=0.0):
"dC: function called as dC(C, t) giving rhs for dC/dt"
T, C = x
B = 0.5*(T+C+K - ((T+C+K)**2 - 4*T*C)**.5)
return [alpha*B-delta*T, dC(C, t)]
def make_dnu(x, xt, factor):
x /= np.sum(x)
dC = lambda C, t: 0 if ((t > xt[-1]) or (t <= xt[0])) else x[xt.searchsorted(t)-1]*factor
return dC
color_index = [0, 2, 1]
def plot_kinetics(Cfactor, T0, alpha, mu, K, delta, tau, axes=None, lspmhc=':', arrows=True):
if axes is None:
figsize = (5.5, 7.0) if talk else (2.75, 3.5)
fig, axes = plt.subplots(figsize=figsize, nrows=3, sharex=True)
ts = np.linspace(0, 7, 200)
T = 4.0
fold = 5.0
dnu = make_dnu(fold**np.arange(1, 5), np.arange(5), Cfactor)
C0dCs = [('Pulse', 0.0, lambda C, t: Cfactor*np.heaviside(-t+1, 0.5)),
('Constant', 0.0, lambda C, t: Cfactor*np.heaviside(-t+T, 0.5)/T),
('Exponential', 0.0, lambda C, t: dnu(C, t))]
T6 = []
for i, (name, C0, dC) in enumerate(C0dCs):
color = colors[color_index[i]]
axes[0].plot(ts, [dC(0.0, t)/1e6 for t in ts], c=color, ls=lspmhc, label=name)
xs = odeint(fcompfull_dC, [T0, C0], ts, args=(alpha, K, lambda C, t: -mu*C + dC(C, t), delta), max_step=0.001)
T6.append(xs[ts.searchsorted(6), 0])
axes[2].plot(ts, xs[:, 0], c=color)
axes[1].plot(ts, xs[:, 1], ls=lspmhc, c=color)#, label='pMHCs')
axes[2].set_xlabel('Time in days')
axes[0].set_ylabel('Antigen input\nin $10^6$/day')
axes[1].set_ylabel('pMHC\nnumber')
axes[2].set_ylabel('T cell\nnumber')
axes[2].set_yscale('log')
axes[1].set_yscale('log')
axes[1].set_ylim(70.0)
axes[0].set_ylim(0.0, 3.0)
if not talk:
axes[0].legend()
if arrows:
axes[2].annotate('', xy=(6.0, T6[1]), xytext=(6.0, T6[0]),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2", color=colors[2]))
axes[2].annotate('', xy=(6.0, T6[2]), xytext=(6.0, T6[0]),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2", color=colors[1]))
axes[2].annotate('%g'%round(T6[1]/T6[0]), xy=(5.75, np.exp(0.5*(np.log(T6[1])+np.log(T6[0])))), va='center', ha='right',
color=colors[2])
axes[2].annotate('%g'%round(T6[2]/T6[0]), xy=(6.25, np.exp(0.5*(np.log(T6[1])+np.log(T6[0])))), va='center', ha='left', color=colors[1])
for ax in axes:
ax.set_xlim(0, max(ts))
return axes
def Tday6(x, xt, Cfactor=2e6, T0=1e2, alpha=2.47, mu=3.1,
K=1e1, delta=0.23, T=4.0, tau=1.0):
ts = list(xt)
ts.extend([6.0])
ts = np.array(ts)
dC = make_dnu(x, xt, Cfactor)
C0 = 0.0
xs = odeint(fcompfull_dC, [T0, C0], ts, args=(alpha, K, lambda C, t: -mu*C + dC(C, t), delta),
max_step=0.01, nsteps=1e6)
return xs[-1, 0]/T0
def objective(x, xt, **kwargs):
return -Tday6(x, xt, **kwargs), scipy.optimize.approx_fprime(x, lambda x: -Tday6(x, xt, **kwargs), epsilon=(x*1e-4+1e-8))
N = 4
xt = np.linspace(0, 4, N+1)
x0 = np.ones(N)/N
def callback(f, x):
print(x)
res = projgrad.minimize(objective, x0, args=(xt,), disp=True, nboundupdate=1, callback=callback, algo='slow', maxiters=20)
N = 4
xt = np.linspace(0, 4, N+1)
exp = scipy.optimize.minimize_scalar(lambda a: -Tday6(a**np.arange(N)/np.sum(a**np.arange(N)), xt),
method='brent', bracket=(0.1, 20.0))
print(exp)
fig, axes = plt.subplots(figsize=(1.75, 3.1), nrows=3, sharex=True)
plot_kinetics(Cfactor=2e6, T0=1e2, alpha=2.47, mu=3.1, K=1e1, delta=0.23, tau=0.5, axes=axes, lspmhc='-', arrows=False)
axes[2].set_xticks(np.arange(0, 8, 2))
axes[2].set_yticks(10**np.arange(2, 7, 2))
axes[1].set_yticks(10**np.arange(2, 7, 2))
plotting.label_axes(axes, xy=(-0.65, 0.95), labelstyle='%s', fontweight='bold')
fig.tight_layout(pad=0.1)
fig.savefig('fig4ABC%s.png'%('talk' if talk else ''), dpi=300)
fig.savefig('fig4ABC%s.svg'%('talk' if talk else ''))
fig, axes = plt.subplots(figsize=(1.75, 2.9), nrows=2)
Cfactor = 2e6
T6s = []
ax = axes[0]
protocols = [(res.x, 'Optimal'), (5.0**np.arange(N), 'Experiment')]
colorsh = ['k', colors[1]]
for i, (x, label) in enumerate(protocols):
x /= np.sum(x)
T6 = Tday6(x, xt)
print(T6)
T6s.append(T6)
dC = lambda C, t: -1 if t > xt[-1] else x[xt.searchsorted(t)-1]*(N/4.0)*Cfactor
ts = np.linspace(1e-3, 6, 1000)
ax.plot(ts, [dC(0, t) for t in ts], color=colorsh[i],
label=label)
for T6 in T6s[1:]:
print('fold expansion %g%%'%round((T6-T6s[0])/T6s[0]*100))
ax.legend(ncol=1, loc='upper left')
ax.set_xlim(0.0)
ax.set_xticks(np.arange(0, 9, 2))
ax.set_ylim(2e3, 7e6)
ax.set_yscale('log')
ax.set_xlabel('Time in days')
ax.set_ylabel('Antigen input\n in 1/day')
ax = axes[1]
folds = np.logspace(-1.1, 1.1)
ax.plot(folds, [Tday6(fold**np.arange(N)/np.sum(fold**np.arange(N)), xt)/(-res.fun) for fold in folds],
c=colors[5])
ax.plot([5.0], [Tday6(fold**np.arange(N)/np.sum(fold**np.arange(N)), xt)/(-res.fun) for fold in [5.0]],
'o', c=colors[1])
ax.set_xscale('log')
ax.set_xticks([0.2, 1, 5.0])
ax.set_xlim(min(folds), max(folds))
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xlabel('Fold change per day')
ax.set_ylabel('Fold expansion\n rel. to optimal')
if not talk:
plotting.label_axes(axes, labels='DE', xy=(-0.55, 1.0), labelstyle='%s', fontweight='bold')
fig.tight_layout(pad=0.1)
fig.savefig('fig4DE%s.png'%('talk' if talk else ''), dpi=300)
fig.savefig('fig4DE%s.svg'%('talk' if talk else ''))
| [
"numpy.ones",
"numpy.log",
"optparse.OptionParser",
"matplotlib.pyplot.style.use",
"numpy.heaviside",
"plotting.label_axes",
"numpy.sum",
"numpy.linspace",
"numpy.array",
"matplotlib.ticker.ScalarFormatter",
"projgrad.minimize",
"numpy.logspace",
"sys.path.append",
"matplotlib.pyplot.subpl... | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((440, 454), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (452, 454), False, 'from optparse import OptionParser\n'), ((3822, 3846), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(N + 1)'], {}), '(0, 4, N + 1)\n', (3833, 3846), True, 'import numpy as np\n'), ((3902, 4022), 'projgrad.minimize', 'projgrad.minimize', (['objective', 'x0'], {'args': '(xt,)', 'disp': '(True)', 'nboundupdate': '(1)', 'callback': 'callback', 'algo': '"""slow"""', 'maxiters': '(20)'}), "(objective, x0, args=(xt,), disp=True, nboundupdate=1,\n callback=callback, algo='slow', maxiters=20)\n", (3919, 4022), False, 'import projgrad\n'), ((4031, 4055), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(N + 1)'], {}), '(0, 4, N + 1)\n', (4042, 4055), True, 'import numpy as np\n'), ((4252, 4307), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(1.75, 3.1)', 'nrows': '(3)', 'sharex': '(True)'}), '(figsize=(1.75, 3.1), nrows=3, sharex=True)\n', (4264, 4307), True, 'import matplotlib.pyplot as plt\n'), ((4553, 4632), 'plotting.label_axes', 'plotting.label_axes', (['axes'], {'xy': '(-0.65, 0.95)', 'labelstyle': '"""%s"""', 'fontweight': '"""bold"""'}), "(axes, xy=(-0.65, 0.95), labelstyle='%s', fontweight='bold')\n", (4572, 4632), False, 'import plotting\n'), ((4791, 4833), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(1.75, 2.9)', 'nrows': '(2)'}), '(figsize=(1.75, 2.9), nrows=2)\n', (4803, 4833), True, 'import matplotlib.pyplot as plt\n'), ((5592, 5614), 'numpy.logspace', 'np.logspace', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (5603, 5614), True, 'import numpy as np\n'), ((661, 694), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""../talk.mplstyle"""'], {}), "('../talk.mplstyle')\n", (674, 694), True, 'import matplotlib.pyplot as plt\n'), ((705, 739), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""../paper.mplstyle"""'], {}), "('../paper.mplstyle')\n", (718, 739), True, 'import matplotlib.pyplot as plt\n'), ((987, 996), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (993, 996), True, 'import numpy as np\n'), ((1381, 1403), 'numpy.linspace', 'np.linspace', (['(0)', '(7)', '(200)'], {}), '(0, 7, 200)\n', (1392, 1403), True, 'import numpy as np\n'), ((3421, 3433), 'numpy.array', 'np.array', (['ts'], {}), '(ts)\n', (3429, 3433), True, 'import numpy as np\n'), ((3850, 3860), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (3857, 3860), True, 'import numpy as np\n'), ((4447, 4465), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(2)'], {}), '(0, 8, 2)\n', (4456, 4465), True, 'import numpy as np\n'), ((5017, 5026), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (5023, 5026), True, 'import numpy as np\n'), ((5175, 5202), 'numpy.linspace', 'np.linspace', (['(0.001)', '(6)', '(1000)'], {}), '(0.001, 6, 1000)\n', (5186, 5202), True, 'import numpy as np\n'), ((5435, 5453), 'numpy.arange', 'np.arange', (['(0)', '(9)', '(2)'], {}), '(0, 9, 2)\n', (5444, 5453), True, 'import numpy as np\n'), ((5991, 6026), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (6024, 6026), False, 'import matplotlib\n'), ((6133, 6228), 'plotting.label_axes', 'plotting.label_axes', (['axes'], {'labels': '"""DE"""', 'xy': '(-0.55, 1.0)', 'labelstyle': '"""%s"""', 'fontweight': '"""bold"""'}), "(axes, labels='DE', xy=(-0.55, 1.0), labelstyle='%s',\n fontweight='bold')\n", (6152, 6228), False, 'import plotting\n'), ((1320, 1371), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'nrows': '(3)', 'sharex': '(True)'}), '(figsize=figsize, nrows=3, sharex=True)\n', (1332, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1485), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1482, 1485), True, 'import numpy as np\n'), ((4490, 4508), 'numpy.arange', 'np.arange', (['(2)', '(7)', '(2)'], {}), '(2, 7, 2)\n', (4499, 4508), True, 'import numpy as np\n'), ((4533, 4551), 'numpy.arange', 'np.arange', (['(2)', '(7)', '(2)'], {}), '(2, 7, 2)\n', (4542, 4551), True, 'import numpy as np\n'), ((1456, 1471), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (1465, 1471), True, 'import numpy as np\n'), ((4909, 4921), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4918, 4921), True, 'import numpy as np\n'), ((1545, 1570), 'numpy.heaviside', 'np.heaviside', (['(-t + 1)', '(0.5)'], {}), '(-t + 1, 0.5)\n', (1557, 1570), True, 'import numpy as np\n'), ((1623, 1648), 'numpy.heaviside', 'np.heaviside', (['(-t + T)', '(0.5)'], {}), '(-t + T, 0.5)\n', (1635, 1648), True, 'import numpy as np\n'), ((4111, 4123), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4120, 4123), True, 'import numpy as np\n'), ((5643, 5655), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5652, 5655), True, 'import numpy as np\n'), ((5768, 5780), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5777, 5780), True, 'import numpy as np\n'), ((4134, 4146), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4143, 4146), True, 'import numpy as np\n'), ((5669, 5681), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5678, 5681), True, 'import numpy as np\n'), ((5794, 5806), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5803, 5806), True, 'import numpy as np\n'), ((2947, 2960), 'numpy.log', 'np.log', (['T6[1]'], {}), '(T6[1])\n', (2953, 2960), True, 'import numpy as np\n'), ((2961, 2974), 'numpy.log', 'np.log', (['T6[0]'], {}), '(T6[0])\n', (2967, 2974), True, 'import numpy as np\n'), ((3118, 3131), 'numpy.log', 'np.log', (['T6[1]'], {}), '(T6[1])\n', (3124, 3131), True, 'import numpy as np\n'), ((3132, 3145), 'numpy.log', 'np.log', (['T6[0]'], {}), '(T6[0])\n', (3138, 3145), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Tests for qiskit.Result"""
import unittest
from numpy import array_equal
import qiskit
from qiskit.wrapper import execute, register, available_backends
from .common import QiskitTestCase, requires_qe_access
class TestQiskitResult(QiskitTestCase):
"""Test qiskit.Result API"""
def setUp(self):
qr = qiskit.QuantumRegister(1)
cr = qiskit.ClassicalRegister(1)
self._qc1 = qiskit.QuantumCircuit(qr, cr, name='qc1')
self._qc2 = qiskit.QuantumCircuit(qr, cr, name='qc2')
self._qc1.measure(qr[0], cr[0])
self._qc2.x(qr[0])
self._qc2.measure(qr[0], cr[0])
self.backend = 'local_qasm_simulator'
self._result1 = execute(self._qc1, self.backend).result()
self._result2 = execute(self._qc2, self.backend).result()
def test_local_result_fields(self):
"""Test components of a result from a local simulator."""
self.assertIn('qasm_simulator', self._result1.backend_name)
self.assertIsInstance(self._result1.job_id, str)
self.assertEqual(self._result1.status, 'COMPLETED')
self.assertEqual(self._result1.circuit_statuses(), ['DONE'])
@requires_qe_access
def test_remote_result_fields(self, qe_token, qe_url):
"""Test components of a result from a remote simulator."""
register(qe_token, qe_url)
remote_backend = available_backends({'local': False, 'simulator': True})[0]
remote_result = execute(self._qc1, remote_backend).result()
self.assertEqual(remote_result.backend_name, remote_backend)
self.assertIsInstance(remote_result.job_id, str)
self.assertEqual(remote_result.status, 'COMPLETED')
self.assertEqual(remote_result.circuit_statuses(), ['DONE'])
def test_qubitpol(self):
"""Test the results of the qubitpol function in Results.
Do two 2Q circuits: on 1st do nothing, and on 2nd do X on the first qubit.
"""
qr = qiskit.QuantumRegister(2)
cr = qiskit.ClassicalRegister(2)
qc1 = qiskit.QuantumCircuit(qr, cr)
qc2 = qiskit.QuantumCircuit(qr, cr)
qc2.x(qr[0])
qc1.measure(qr, cr)
qc2.measure(qr, cr)
circuits = [qc1, qc2]
xvals_dict = {circuits[0].name: 0, circuits[1].name: 1}
result = execute(circuits, self.backend).result()
yvals, xvals = result.get_qubitpol_vs_xval(2, xvals_dict=xvals_dict)
self.assertTrue(array_equal(yvals, [[-1, -1], [1, -1]]))
self.assertTrue(array_equal(xvals, [0, 1]))
def test_average_data(self):
"""Test average_data."""
qr = qiskit.QuantumRegister(2)
cr = qiskit.ClassicalRegister(2)
qc = qiskit.QuantumCircuit(qr, cr, name="qc")
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
shots = 10000
result = execute(qc, self.backend, shots=shots).result()
observable = {"00": 1, "11": 1, "01": -1, "10": -1}
mean_zz = result.average_data("qc", observable)
observable = {"00": 1, "11": -1, "01": 1, "10": -1}
mean_zi = result.average_data("qc", observable)
observable = {"00": 1, "11": -1, "01": -1, "10": 1}
mean_iz = result.average_data("qc", observable)
self.assertAlmostEqual(mean_zz, 1, places=1)
self.assertAlmostEqual(mean_zi, 0, places=1)
self.assertAlmostEqual(mean_iz, 0, places=1)
def test_extend_result(self):
"""Test extending a Result instance is possible."""
result1, result2 = (self._result1, self._result2)
counts1 = result1.get_counts(self._qc1.name)
counts2 = result2.get_counts(self._qc2.name)
result1 += result2 # extend a result
self.assertEqual(
[
result1.get_counts(self._qc1.name),
result2.get_counts(self._qc2.name)
],
[counts1, counts2]
)
def test_combine_results(self):
"""Test combining results in a new Result instance is possible."""
result1, result2 = (self._result1, self._result2)
counts1 = result1.get_counts(self._qc1.name)
counts2 = result2.get_counts(self._qc2.name)
new_result = result1 + result2 # combine results
self.assertEqual(
[
new_result.get_counts(self._qc1.name),
new_result.get_counts(self._qc2.name)
],
[counts1, counts2]
)
self.assertIsNot(new_result, result1)
self.assertIsNot(new_result, result2)
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"qiskit.wrapper.available_backends",
"qiskit.ClassicalRegister",
"qiskit.wrapper.register",
"numpy.array_equal",
"qiskit.wrapper.execute",
"unittest.main",
"qiskit.QuantumCircuit",
"qiskit.QuantumRegister"
] | [((4808, 4834), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4821, 4834), False, 'import unittest\n'), ((518, 543), 'qiskit.QuantumRegister', 'qiskit.QuantumRegister', (['(1)'], {}), '(1)\n', (540, 543), False, 'import qiskit\n'), ((557, 584), 'qiskit.ClassicalRegister', 'qiskit.ClassicalRegister', (['(1)'], {}), '(1)\n', (581, 584), False, 'import qiskit\n'), ((605, 646), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['qr', 'cr'], {'name': '"""qc1"""'}), "(qr, cr, name='qc1')\n", (626, 646), False, 'import qiskit\n'), ((667, 708), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['qr', 'cr'], {'name': '"""qc2"""'}), "(qr, cr, name='qc2')\n", (688, 708), False, 'import qiskit\n'), ((1515, 1541), 'qiskit.wrapper.register', 'register', (['qe_token', 'qe_url'], {}), '(qe_token, qe_url)\n', (1523, 1541), False, 'from qiskit.wrapper import execute, register, available_backends\n'), ((2153, 2178), 'qiskit.QuantumRegister', 'qiskit.QuantumRegister', (['(2)'], {}), '(2)\n', (2175, 2178), False, 'import qiskit\n'), ((2192, 2219), 'qiskit.ClassicalRegister', 'qiskit.ClassicalRegister', (['(2)'], {}), '(2)\n', (2216, 2219), False, 'import qiskit\n'), ((2234, 2263), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (2255, 2263), False, 'import qiskit\n'), ((2278, 2307), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (2299, 2307), False, 'import qiskit\n'), ((2811, 2836), 'qiskit.QuantumRegister', 'qiskit.QuantumRegister', (['(2)'], {}), '(2)\n', (2833, 2836), False, 'import qiskit\n'), ((2850, 2877), 'qiskit.ClassicalRegister', 'qiskit.ClassicalRegister', (['(2)'], {}), '(2)\n', (2874, 2877), False, 'import qiskit\n'), ((2891, 2931), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['qr', 'cr'], {'name': '"""qc"""'}), "(qr, cr, name='qc')\n", (2912, 2931), False, 'import qiskit\n'), ((1567, 1622), 'qiskit.wrapper.available_backends', 'available_backends', (["{'local': False, 'simulator': True}"], {}), "({'local': False, 'simulator': True})\n", (1585, 1622), False, 'from qiskit.wrapper import execute, register, available_backends\n'), ((2638, 2677), 'numpy.array_equal', 'array_equal', (['yvals', '[[-1, -1], [1, -1]]'], {}), '(yvals, [[-1, -1], [1, -1]])\n', (2649, 2677), False, 'from numpy import array_equal\n'), ((2703, 2729), 'numpy.array_equal', 'array_equal', (['xvals', '[0, 1]'], {}), '(xvals, [0, 1])\n', (2714, 2729), False, 'from numpy import array_equal\n'), ((887, 919), 'qiskit.wrapper.execute', 'execute', (['self._qc1', 'self.backend'], {}), '(self._qc1, self.backend)\n', (894, 919), False, 'from qiskit.wrapper import execute, register, available_backends\n'), ((953, 985), 'qiskit.wrapper.execute', 'execute', (['self._qc2', 'self.backend'], {}), '(self._qc2, self.backend)\n', (960, 985), False, 'from qiskit.wrapper import execute, register, available_backends\n'), ((1650, 1684), 'qiskit.wrapper.execute', 'execute', (['self._qc1', 'remote_backend'], {}), '(self._qc1, remote_backend)\n', (1657, 1684), False, 'from qiskit.wrapper import execute, register, available_backends\n'), ((2496, 2527), 'qiskit.wrapper.execute', 'execute', (['circuits', 'self.backend'], {}), '(circuits, self.backend)\n', (2503, 2527), False, 'from qiskit.wrapper import execute, register, available_backends\n'), ((3085, 3123), 'qiskit.wrapper.execute', 'execute', (['qc', 'self.backend'], {'shots': 'shots'}), '(qc, self.backend, shots=shots)\n', (3092, 3123), False, 'from qiskit.wrapper import execute, register, available_backends\n')] |
# -*- coding: utf-8 -*-
"""Augmentation methods.
- Author: Curt-Park
- Email: <EMAIL>
- Reference:
https://arxiv.org/pdf/1805.09501.pdf
https://github.com/kakaobrain/fast-autoaugment/
"""
from abc import ABC
from itertools import chain
import random
from typing import List, Tuple
from PIL.Image import Image
import numpy as np
import torch
from torch.utils.data import Dataset
from src.augmentation.transforms import transforms_info
from src.utils import get_rand_bbox_coord, to_onehot
class Augmentation(ABC):
"""Abstract class used by all augmentation methods."""
def __init__(self, n_level: int = 10) -> None:
"""Initialize."""
self.transforms_info = transforms_info()
self.n_level = n_level
def _apply_augment(self, img: Image, name: str, level: int) -> Image:
"""Apply and get the augmented image.
Args:
img (Image): an image to augment
level (int): magnitude of augmentation in [0, n_level)
returns:
Image: an augmented image
"""
assert 0 <= level < self.n_level
augment_fn, low, high = self.transforms_info[name]
return augment_fn(img.copy(), level * (high - low) / self.n_level + low)
class SequentialAugmentation(Augmentation):
"""Sequential augmentation class."""
def __init__(
self,
policies: List[Tuple[str, float, int]],
n_level: int = 10,
) -> None:
"""Initialize."""
super(SequentialAugmentation, self).__init__(n_level)
self.policies = policies
def __call__(self, img: Image) -> Image:
"""Run augmentations."""
for name, pr, level in self.policies:
if random.random() > pr:
continue
img = self._apply_augment(img, name, level)
return img
class AutoAugmentation(Augmentation):
"""Auto augmentation class.
References:
https://arxiv.org/pdf/1805.09501.pdf
"""
def __init__(
self,
policies: List[List[Tuple[str, float, int]]],
n_select: int = 1,
n_level: int = 10,
) -> None:
"""Initialize."""
super(AutoAugmentation, self).__init__(n_level)
self.policies = policies
self.n_select = n_select
def __call__(self, img: Image) -> Image:
"""Run augmentations."""
chosen_policies = random.sample(self.policies, k=self.n_select)
for name, pr, level in chain.from_iterable(chosen_policies):
if random.random() > pr:
continue
img = self._apply_augment(img, name, level)
return img
class RandAugmentation(Augmentation):
"""Random augmentation class.
References:
RandAugment: Practical automated data augmentation with a reduced search space
(https://arxiv.org/abs/1909.13719)
"""
def __init__(
self,
transforms: List[str],
n_select: int = 2,
level: int = 14,
n_level: int = 31,
) -> None:
"""Initialize."""
super(RandAugmentation, self).__init__(n_level)
self.n_select = n_select
self.level = level if type(level) is int and 0 <= level < n_level else None
self.transforms = transforms
def __call__(self, img: Image) -> Image:
"""Run augmentations."""
chosen_transforms = random.sample(self.transforms, k=self.n_select)
for transf in chosen_transforms:
level = self.level if self.level else random.randint(0, self.n_level - 1)
img = self._apply_augment(img, transf, level)
return img
class CutMix(Dataset):
"""A Dataset class for CutMix.
References:
https://github.com/ildoonet/cutmix
"""
def __init__(
self, dataset: Dataset, num_classes: int, beta: float = 1.0, prob: float = 0.5
) -> None:
self.dataset = dataset
self.num_classes = num_classes
self.beta = beta
self.prob = prob
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert image and label to a cutmix image and label.
Combine two training samples by cutting and pasting two images along a random box.
The ground truth label is also "mixed" via the combination ratio.
The combination ratio is sampled from a beta distribution.
"""
img, label = self.dataset[index] # label: int
label = torch.tensor([label], dtype=torch.long)
label_onehot = to_onehot(label, self.num_classes)
# sampling the length ratio of random box to the image
len_ratio = np.sqrt(np.random.beta(self.beta, self.beta))
if random.random() > self.prob or len_ratio < 1e-3:
return img, label_onehot.squeeze_(0)
w, h = img.size()[-2], img.size()[-1]
(x0, y0), (x1, y1) = get_rand_bbox_coord(w, h, len_ratio)
# compute the combination ratio
comb_ratio = (x1 - x0) * (y1 - y0) / (w * h)
rand_ind = np.random.randint(len(self))
rand_img, rand_label = self.dataset[rand_ind]
rand_label = torch.tensor([rand_label], dtype=torch.long)
img[:, x0:x1, y0:y1] = rand_img[:, x0:x1, y0:y1]
label_onehot = (1 - comb_ratio) * label_onehot + comb_ratio * to_onehot(
rand_label, self.num_classes
)
return img, label_onehot.squeeze_(0)
def __len__(self) -> int:
return len(self.dataset)
| [
"random.sample",
"src.augmentation.transforms.transforms_info",
"src.utils.to_onehot",
"numpy.random.beta",
"itertools.chain.from_iterable",
"torch.tensor",
"random.random",
"src.utils.get_rand_bbox_coord",
"random.randint"
] | [((694, 711), 'src.augmentation.transforms.transforms_info', 'transforms_info', ([], {}), '()\n', (709, 711), False, 'from src.augmentation.transforms import transforms_info\n'), ((2385, 2430), 'random.sample', 'random.sample', (['self.policies'], {'k': 'self.n_select'}), '(self.policies, k=self.n_select)\n', (2398, 2430), False, 'import random\n'), ((2462, 2498), 'itertools.chain.from_iterable', 'chain.from_iterable', (['chosen_policies'], {}), '(chosen_policies)\n', (2481, 2498), False, 'from itertools import chain\n'), ((3368, 3415), 'random.sample', 'random.sample', (['self.transforms'], {'k': 'self.n_select'}), '(self.transforms, k=self.n_select)\n', (3381, 3415), False, 'import random\n'), ((4447, 4486), 'torch.tensor', 'torch.tensor', (['[label]'], {'dtype': 'torch.long'}), '([label], dtype=torch.long)\n', (4459, 4486), False, 'import torch\n'), ((4510, 4544), 'src.utils.to_onehot', 'to_onehot', (['label', 'self.num_classes'], {}), '(label, self.num_classes)\n', (4519, 4544), False, 'from src.utils import get_rand_bbox_coord, to_onehot\n'), ((4860, 4896), 'src.utils.get_rand_bbox_coord', 'get_rand_bbox_coord', (['w', 'h', 'len_ratio'], {}), '(w, h, len_ratio)\n', (4879, 4896), False, 'from src.utils import get_rand_bbox_coord, to_onehot\n'), ((5114, 5158), 'torch.tensor', 'torch.tensor', (['[rand_label]'], {'dtype': 'torch.long'}), '([rand_label], dtype=torch.long)\n', (5126, 5158), False, 'import torch\n'), ((4636, 4672), 'numpy.random.beta', 'np.random.beta', (['self.beta', 'self.beta'], {}), '(self.beta, self.beta)\n', (4650, 4672), True, 'import numpy as np\n'), ((1711, 1726), 'random.random', 'random.random', ([], {}), '()\n', (1724, 1726), False, 'import random\n'), ((2515, 2530), 'random.random', 'random.random', ([], {}), '()\n', (2528, 2530), False, 'import random\n'), ((3507, 3542), 'random.randint', 'random.randint', (['(0)', '(self.n_level - 1)'], {}), '(0, self.n_level - 1)\n', (3521, 3542), False, 'import random\n'), ((4686, 4701), 'random.random', 'random.random', ([], {}), '()\n', (4699, 4701), False, 'import random\n'), ((5286, 5325), 'src.utils.to_onehot', 'to_onehot', (['rand_label', 'self.num_classes'], {}), '(rand_label, self.num_classes)\n', (5295, 5325), False, 'from src.utils import get_rand_bbox_coord, to_onehot\n')] |
# Author: <NAME>, https://users.soe.ucsc.edu/~cicekm/
from .InputEstimatorABC import InputEstimatorABC
from .FaceDetectors import CVFaceDetector
from .LandmarkDetectors import LandmarkDetector
from ...Paths import CV2Res10SSD_frozen_face_model_path
from abc import ABC, abstractmethod
import numpy as np, math
from pykalman import KalmanFilter
import cv2
class HeadPoseEstimatorABC(InputEstimatorABC):
def __init__(self, faceDetector = None, landmarkDetector = None, poseCalculator = None, *args, **kwargs):
self._faceDetector = faceDetector
self._landmarkDetector = landmarkDetector
self._poseCalculator = poseCalculator
self._headPose3D = np.zeros((3,))
@abstractmethod
def calculateHeadPose(self, frame):
raise NotImplementedError
@abstractmethod
def _calculateHeadPoseWithAnnotations(self, frame):
raise NotImplementedError
def estimateInputValues(self, frame):
return self.calculateHeadPose(frame)
def estimateInputValuesWithAnnotations(self, frame):
return self._calculateHeadPoseWithAnnotations(frame)
@property
def inputValues(self):
return self._headPose3D
def returns3D(self):
return True
# The code is derived from the following repository:
# https://github.com/yinguobing/head-pose-estimation
class PoseCalculatorABC(ABC):
@staticmethod
def _get_points(rear_w, rear_h, rear_depth, front_w, front_h, front_depth):
point_3d = []
point_3d.append((-rear_w, -rear_h, rear_depth))
point_3d.append((-rear_w, rear_h, rear_depth))
point_3d.append((rear_w, rear_h, rear_depth))
point_3d.append((rear_w, -rear_h, rear_depth))
point_3d.append((-rear_w, -rear_h, rear_depth))
point_3d.append((-front_w, -front_h, front_depth))
point_3d.append((-front_w, front_h, front_depth))
point_3d.append((front_w, front_h, front_depth))
point_3d.append((front_w, -front_h, front_depth))
point_3d.append((-front_w, -front_h, front_depth))
point_3d = np.array(point_3d, dtype='float32').reshape(-1, 3)
return point_3d
@staticmethod
def _get_3d_points(rear_size = 7.5, rear_depth = 0, front_size = 10.0, front_depth = 10.0):
return PoseCalculatorABC._get_points(-rear_size, -rear_size, rear_depth, -front_size, -front_size, front_depth)
def __init__(self, *args, **kwargs):
self._pose = np.zeros((3,))
self._rectCorners3D = self._get_3d_points()
self._front_depth = 100
self._rectCorners3D = self._get_3d_points(rear_size = 50, rear_depth = 0,
front_size = 50, front_depth = self._front_depth)
self._projectionPoints = None
super().__init__(*args, **kwargs)
@abstractmethod
def calculatePose(self, shape):
raise NotImplementedError
def calculateProjectionPoints(self, shape, recalculatePose = False):
if recalculatePose:
self.calculatePose(shape)
if not (self._rotation_vector is None or self._translation_vector is None):
point_2d, _ = cv2.projectPoints(self._rectCorners3D,
self._rotation_vector, self._translation_vector,
self._camera_matrix, self._dist_coeffs)
self._projectionPoints = np.int32(point_2d.reshape(-1, 2))
return self._projectionPoints
@property
def pose(self):
return self._pose
class YinsKalmanFilteredHeadPoseCalculator(PoseCalculatorABC):
@staticmethod
def _getCameraMatrix(size):
scale = 3840/size[0]
focal_length = [2667.497359647048143, 2667.497359647048143]
focal_length = [l/scale for l in focal_length]
camera_center = (1991.766193951624246, 1046.480313913574491)
camera_center = [l/scale for l in camera_center]
camera_matrix = np.array(
[[focal_length[0], 0, camera_center[0]],
[0, focal_length[1], camera_center[1]],
[0, 0, 1]], dtype="double")
return camera_matrix
@staticmethod
def __get_full_model_points(filename):
"""Get all 68 3D model points from file"""
raw_values = []
with open(filename) as file:
for line in file:
raw_values.append(line)
model_points = np.array(raw_values, dtype=np.float32)
model_points = np.reshape(model_points, (3, -1)).T
return model_points
def __get_kalman_filter(self):
w, h = 1920, 1080
self._mf = [0, 0, 0, 0, 0, 0]
self._cf = [0.001, 0.001, 0.001, math.pi/1800, math.pi/1800, math.pi/1800]
#self._cf = 6*[[0.001, 0.001, 0.001, math.pi/1800, math.pi/1800, math.pi/1800]]
self._kf = []
for m, c in zip(self._mf, self._cf):
self._kf.append(KalmanFilter(initial_state_mean=m, initial_state_covariance=c))
#self._mf = [[m] for m in self._mf]
return self._kf
def __init__(self, face_model_path = None, inputFramesize = (1920, 1080), *args, **kwargs):
super().__init__(*args, **kwargs)
if face_model_path == None:
face_model_path = CV2Res10SSD_frozen_face_model_path
self._faceModelPoints = self.__get_full_model_points(face_model_path)
self._inputFramesize = inputFramesize
self._front_depth = 100
self._rectCorners3D = self._get_3d_points(rear_size = 80, rear_depth = 0,
front_size = 10, front_depth = self._front_depth)
# Camera internals
self._camera_matrix = self._getCameraMatrix(inputFramesize)
self._dist_coeffs = np.array([[0.2562583722261407293], [-0.5884400171468063823],
[0.001658348839202715592], [-0.0006434617243149612104]
,[0.3660073010818283845]])
self._rotation_vector = np.array([[-0.0], [0.0], [-0.0]])
self._translation_vector = np.array([[0.0], [0.0], [550.0]])
self._kf = self.__get_kalman_filter()
def solve_pose_by_68_points(self, image_points):
image_points = image_points.astype('float32')
(_, rotation_vector, translation_vector) = \
cv2.solvePnP(self._faceModelPoints, image_points,
self._camera_matrix, self._dist_coeffs,
rvec=self._rotation_vector,
tvec=self._translation_vector, useExtrinsicGuess=True)
self._rotation_vector = rotation_vector
self._translation_vector = translation_vector
return (rotation_vector, translation_vector)
def calculatePose(self, shape):
pose = self.solve_pose_by_68_points(shape)
self._pose = np.concatenate((self._translation_vector,
self._rotation_vector), 0)
for i, kf in enumerate(self._kf):
self._mf[i], self._cf[i] = self._kf[i].filter_update(self._mf[i],
self._cf[i],
self._pose[i])
self._pose[:] = self._mf
self._rotation_vector = self._pose[3:]
self._translation_vector = self._pose[:3]
return self._pose
class PoseEstimator(HeadPoseEstimatorABC):
def __init__(self, faceDetector = None, landmarkDetector = None,
poseCalculator = None, face_landmark_path = None,
inputFramesize = (1920, 1080), *args, **kwargs):
if landmarkDetector == None:
if faceDetector == None:
faceDetector = CVFaceDetector(squaringFaceBox = True)
landmarkDetector = LandmarkDetector(faceDetector)
if poseCalculator == None:
poseCalculator = YinsKalmanFilteredHeadPoseCalculator(inputFramesize = inputFramesize)
self._headPose3D = np.zeros((3,))
super().__init__(faceDetector, landmarkDetector, poseCalculator, *args, **kwargs)
def calculateHeadPose(self, frame):
self._landmarks = self._landmarkDetector.detectFacialLandmarks(frame)
if len(self._landmarks) == 0:
return self._headPose3D
else:
self._headPose3D = self._poseCalculator.calculatePose(self._landmarks)
return self._headPose3D
def _calculateHeadPoseWithAnnotations(self, frame):
self._headPose3D = self.calculateHeadPose(frame)
self._pPoints = self._poseCalculator.calculateProjectionPoints(self._landmarks)
return self._headPose3D, self._pPoints, self._landmarks
@property
def headPose(self):
return self._headPose3D
@property
def poseCalculator(self):
return self._poseCalculator
class MuratcansHeadGazeCalculator(YinsKalmanFilteredHeadPoseCalculator):
def __init__(self, face_model_path = None, inputFramesize = (1920, 1080), *args, **kwargs):
super().__init__(face_model_path, inputFramesize, *args, **kwargs)
self._front_depth = 700
self._rectCorners3D = self._get_3d_points(rear_size = 40, rear_depth = 0,
front_size = 40,
front_depth = self._front_depth)
self._objectPointsVec = [self._faceModelPoints]
self._imagePointsVec = []
def calibrateCamera(self, imagePoints):
ip = imagePoints.astype('float32')
#print(imagePoints)
self._imagePointsVec.append(ip)
n = 7
if len(self._imagePointsVec) < n+1:
return
self._imagePointsVec.pop(0)
flags=(cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_PRINCIPAL_POINT + cv2.SOLVEPNP_ITERATIVE)
retval, cameraMatrix, distCoeffs, rvecs, tvecs = \
cv2.calibrateCamera(self._objectPointsVec, self._imagePointsVec,
(1920, 1080), self._camera_matrix,
self._dist_coeffs, flags=flags)
self._camera_matrix, self._dist_coeffs = cameraMatrix, distCoeffs
self._rotation_vector, self._translation_vector = rvecs[0], tvecs[0]
def calculateProjectionPointsAsGaze(self, shape, recalculatePose = False):
if recalculatePose:
self.calculatePose(shape)
if not (self._rotation_vector is None or self._translation_vector is None):
self._front_depth = np.linalg.norm(self._translation_vector)
self._rectCorners3D = \
self._get_3d_points(rear_size = 0, rear_depth = 55, front_size = 0,
front_depth = 35*self._front_depth)
rv = self._rotation_vector.copy()
rv *= -1
rv[0] -= 0.2
point_2d, _ = cv2.projectPoints(self._rectCorners3D, rv,
self._translation_vector,
self._camera_matrix,
self._dist_coeffs)
self._projectionPoints = np.int32(point_2d.reshape(-1, 2))
return self._projectionPoints
def calculateHeadGazeProjection(self):
output = 5*(self.get3DNose()[-1] -
self.get3DScreen()[3])
output[0] *= -1
return output[:-1]
def calculateHeadGazeWithProjectionPoints(self, shape):
self._pose = self.calculatePose(shape)
self._projectionPoints = self.calculateProjectionPointsAsGaze(shape)
output = self.calculateHeadGazeProjection()
return output, self._projectionPoints
def calculateReverseHeadGazeWithProjectionPoints(self, shape):
pose = self.solve_pose_by_68_points(shape)
output = self.calculateHeadGazeProjection()
output[0] = self._inputFramesize[0]-output[0]
self._rotation_vector[0, 0] *= -1
self._rotation_vector[1, 0] *= -1
#self._translation_vector[0, 0] *= -1
rv = np.array([math.degrees(t[0]) for t in self._rotation_vector])
self._pose = np.concatenate((self._translation_vector[:,0], rv), 0)
self._projectionPoints = self.calculateProjectionPointsAsGaze(shape)
return output, self._projectionPoints
def translateTo3D(self, points):
rotation_mat, _ = cv2.Rodrigues(self._rotation_vector)
project_mat = cv2.hconcat((rotation_mat, self._translation_vector))
project_mat = np.concatenate((project_mat, np.zeros((1, 4))), 0)
project_mat[-1, -1] = 1
points_ = np.concatenate((points, np.ones((points.shape[0], 1))), 1)
points3d = np.matmul(project_mat, points_.T).T[:, :-1]
return points3d
def updatePose(self, pose):
self._pose = pose
self._translation_vector = pose[:3].reshape((3, 1))
self._rotation_vector = np.array([t for t in pose[3:]])
self._rotation_vector = self._rotation_vector.reshape((3, 1))
self._front_depth = self._translation_vector[2, 0]
def get3DNose(self):
nose = self._get_3d_points(rear_size = 0,
rear_depth = 0, front_size = 0,
front_depth = self._front_depth)
nose = self.translateTo3D(nose)
p1, p2 = nose[0], nose[-1]; dist = p1 - p2
norm = np.linalg.norm(dist); unit = dist/norm
nose[-int(nose.shape[0]/2):] = p2 - (p2[-1]/unit[-1]) * unit
return nose
def get3DScreen(self):
t_vec = np.array([[0], [162], [0.0]])
return t_vec.T + self._get_points(192, 107, 0, 192, 107, 0)
def calculate3DScreen(self):
translation_vector = np.array([[0], [0], [0.0]])
rotation_vector = np.array([[-0.0], [0.0], [-0.0]])
corners3D = self._get_points(192, 107, 700, 192, 107, 267)
point_2d, _ = cv2.projectPoints(corners3D, rotation_vector,
translation_vector, self._camera_matrix,
self._dist_coeffs)
projectionPoints = np.int32(point_2d.reshape(-1, 2))
return projectionPoints
def calculate3DLandmarks(self):
face = self._faceModelPoints.copy()
return self.translateTo3D(face)
def calculateAll3DPoints(self):
landmarks3d = self.calculate3DLandmarks()
screen = self.get3DScreen()
nose = self.get3DNose()
all3DPoints = np.concatenate((screen, landmarks3d, nose))
return all3DPoints
def calculate3DProjection(self, points):
translation_vector = np.array([[0], [0], [0.0]])
rotation_vector = np.array([[-0.0], [0.0], [-0.0]])
point_2d, _ = cv2.projectPoints(points, rotation_vector,
translation_vector, self._camera_matrix,
self._dist_coeffs)
projectionPoints = np.int32(point_2d.reshape(-1, 2))
return projectionPoints
def calculate3DScreenProjection(self):
screen = self.get3DScreen()
return self.calculate3DProjection(screen)
def calculate3DLandmarksProjection(self):
landmarks3d = self.calculate3DLandmarks()
return self.calculate3DProjection(landmarks3d)
def calculate3DNoseProjection(self):
nose = self.get3DNose()
return self.calculate3DProjection(nose)
def calculateAll3DProjections(self):
screenProj = self.calculate3DScreenProjection()
landmarksProj = self.calculate3DLandmarksProjection()
noseProj = self.calculate3DNoseProjection()
return screenProj, landmarksProj, noseProj
class HeadGazer(PoseEstimator):
def __init__(self, faceDetector = None, landmarkDetector = None,
poseCalculator = None, face_landmark_path = None,
inputFramesize = (1280, 720), *args, **kwargs):
if poseCalculator == None:
poseCalculator = \
MuratcansHeadGazeCalculator(inputFramesize = inputFramesize)
self._pPoints = np.zeros((1, 2))
self._gazingFrameSize = inputFramesize
self._halfFrameHeight = inputFramesize[1]/2
super().__init__(faceDetector, landmarkDetector, poseCalculator,
face_landmark_path, inputFramesize, *args, **kwargs)
def calculateHeadPose(self, frame):
self._landmarks = self._landmarkDetector.detectFacialLandmarks(frame)
if len(self._landmarks) == 0:
return self._headPose3D
else:
self._headPose3D = \
self._poseCalculator.calculatePose(self._landmarks)
return self._headPose3D
def calculateHeadGaze(self, frame):
self._landmarks = self._landmarkDetector.detectFacialLandmarks(frame)
if len(self._landmarks) != 0:
self._halfFrameHeight = frame.shape[0]/2
self._headPose3D, self._pPoints = self._poseCalculator\
.calculateHeadGazeWithProjectionPoints(self._landmarks)
return self._headPose3D
def _calculateHeadPoseWithAnnotations(self, frame):
self._headPose3D = self.calculateHeadGaze(frame)
return self._headPose3D, self._pPoints, self._landmarks
def calculateHeadPoseWithAnnotations(self, frame, landmarks = None):
if landmarks is None:
self._headPose3D = self.calculateHeadGaze(frame)
else:
self._landmarks = landmarks
if len(self._landmarks) != 0:
self._halfFrameHeight = frame.shape[0]/2
g = self._poseCalculator\
.calculateHeadGazeWithProjectionPoints(self._landmarks)
self._headPose3D, self._pPoints = g
return self._headPose3D, self._pPoints, self._landmarks
def estimateReverseInputValuesWithAnnotations(self, frame):
self._landmarks = self._landmarkDetector.detectFacialLandmarks(frame)
if len(self._landmarks) != 0:
self._halfFrameHeight = frame.shape[0]/2
g = self._poseCalculator\
.calculateReverseHeadGazeWithProjectionPoints(self._landmarks)
self._headPose3D, self._pPoints = g
return self._headPose3D, self._pPoints, self._landmarks
def getHeadPose(self):
return self._poseCalculator.pose
def get3DNoseTip(self):
return self._poseCalculator.get3DNose()[-1]
def getGazingFrameDimensions(self):
#return int(1920), int(self._halfFrameHeight + 1080)
#print(self._gazingFrameSize)
return self._gazingFrameSize
| [
"numpy.reshape",
"numpy.ones",
"cv2.projectPoints",
"numpy.linalg.norm",
"math.degrees",
"numpy.array",
"numpy.zeros",
"cv2.solvePnP",
"cv2.Rodrigues",
"numpy.matmul",
"numpy.concatenate",
"cv2.calibrateCamera",
"pykalman.KalmanFilter",
"cv2.hconcat"
] | [((680, 694), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (688, 694), True, 'import numpy as np, math\n'), ((2491, 2505), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2499, 2505), True, 'import numpy as np, math\n'), ((4014, 4135), 'numpy.array', 'np.array', (['[[focal_length[0], 0, camera_center[0]], [0, focal_length[1], camera_center\n [1]], [0, 0, 1]]'], {'dtype': '"""double"""'}), "([[focal_length[0], 0, camera_center[0]], [0, focal_length[1],\n camera_center[1]], [0, 0, 1]], dtype='double')\n", (4022, 4135), True, 'import numpy as np, math\n'), ((4467, 4505), 'numpy.array', 'np.array', (['raw_values'], {'dtype': 'np.float32'}), '(raw_values, dtype=np.float32)\n', (4475, 4505), True, 'import numpy as np, math\n'), ((5799, 5933), 'numpy.array', 'np.array', (['[[0.25625837222614073], [-0.5884400171468064], [0.0016583488392027156], [-\n 0.0006434617243149612], [0.3660073010818284]]'], {}), '([[0.25625837222614073], [-0.5884400171468064], [\n 0.0016583488392027156], [-0.0006434617243149612], [0.3660073010818284]])\n', (5807, 5933), True, 'import numpy as np, math\n'), ((6051, 6084), 'numpy.array', 'np.array', (['[[-0.0], [0.0], [-0.0]]'], {}), '([[-0.0], [0.0], [-0.0]])\n', (6059, 6084), True, 'import numpy as np, math\n'), ((6121, 6154), 'numpy.array', 'np.array', (['[[0.0], [0.0], [550.0]]'], {}), '([[0.0], [0.0], [550.0]])\n', (6129, 6154), True, 'import numpy as np, math\n'), ((6375, 6557), 'cv2.solvePnP', 'cv2.solvePnP', (['self._faceModelPoints', 'image_points', 'self._camera_matrix', 'self._dist_coeffs'], {'rvec': 'self._rotation_vector', 'tvec': 'self._translation_vector', 'useExtrinsicGuess': '(True)'}), '(self._faceModelPoints, image_points, self._camera_matrix, self\n ._dist_coeffs, rvec=self._rotation_vector, tvec=self.\n _translation_vector, useExtrinsicGuess=True)\n', (6387, 6557), False, 'import cv2\n'), ((6887, 6955), 'numpy.concatenate', 'np.concatenate', (['(self._translation_vector, self._rotation_vector)', '(0)'], {}), '((self._translation_vector, self._rotation_vector), 0)\n', (6901, 6955), True, 'import numpy as np, math\n'), ((8044, 8058), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (8052, 8058), True, 'import numpy as np, math\n'), ((9969, 10105), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['self._objectPointsVec', 'self._imagePointsVec', '(1920, 1080)', 'self._camera_matrix', 'self._dist_coeffs'], {'flags': 'flags'}), '(self._objectPointsVec, self._imagePointsVec, (1920, \n 1080), self._camera_matrix, self._dist_coeffs, flags=flags)\n', (9988, 10105), False, 'import cv2\n'), ((12205, 12260), 'numpy.concatenate', 'np.concatenate', (['(self._translation_vector[:, 0], rv)', '(0)'], {}), '((self._translation_vector[:, 0], rv), 0)\n', (12219, 12260), True, 'import numpy as np, math\n'), ((12447, 12483), 'cv2.Rodrigues', 'cv2.Rodrigues', (['self._rotation_vector'], {}), '(self._rotation_vector)\n', (12460, 12483), False, 'import cv2\n'), ((12506, 12559), 'cv2.hconcat', 'cv2.hconcat', (['(rotation_mat, self._translation_vector)'], {}), '((rotation_mat, self._translation_vector))\n', (12517, 12559), False, 'import cv2\n'), ((12984, 13015), 'numpy.array', 'np.array', (['[t for t in pose[3:]]'], {}), '([t for t in pose[3:]])\n', (12992, 13015), True, 'import numpy as np, math\n'), ((13472, 13492), 'numpy.linalg.norm', 'np.linalg.norm', (['dist'], {}), '(dist)\n', (13486, 13492), True, 'import numpy as np, math\n'), ((13644, 13673), 'numpy.array', 'np.array', (['[[0], [162], [0.0]]'], {}), '([[0], [162], [0.0]])\n', (13652, 13673), True, 'import numpy as np, math\n'), ((13805, 13832), 'numpy.array', 'np.array', (['[[0], [0], [0.0]]'], {}), '([[0], [0], [0.0]])\n', (13813, 13832), True, 'import numpy as np, math\n'), ((13859, 13892), 'numpy.array', 'np.array', (['[[-0.0], [0.0], [-0.0]]'], {}), '([[-0.0], [0.0], [-0.0]])\n', (13867, 13892), True, 'import numpy as np, math\n'), ((13982, 14092), 'cv2.projectPoints', 'cv2.projectPoints', (['corners3D', 'rotation_vector', 'translation_vector', 'self._camera_matrix', 'self._dist_coeffs'], {}), '(corners3D, rotation_vector, translation_vector, self.\n _camera_matrix, self._dist_coeffs)\n', (13999, 14092), False, 'import cv2\n'), ((14564, 14607), 'numpy.concatenate', 'np.concatenate', (['(screen, landmarks3d, nose)'], {}), '((screen, landmarks3d, nose))\n', (14578, 14607), True, 'import numpy as np, math\n'), ((14710, 14737), 'numpy.array', 'np.array', (['[[0], [0], [0.0]]'], {}), '([[0], [0], [0.0]])\n', (14718, 14737), True, 'import numpy as np, math\n'), ((14764, 14797), 'numpy.array', 'np.array', (['[[-0.0], [0.0], [-0.0]]'], {}), '([[-0.0], [0.0], [-0.0]])\n', (14772, 14797), True, 'import numpy as np, math\n'), ((14820, 14927), 'cv2.projectPoints', 'cv2.projectPoints', (['points', 'rotation_vector', 'translation_vector', 'self._camera_matrix', 'self._dist_coeffs'], {}), '(points, rotation_vector, translation_vector, self.\n _camera_matrix, self._dist_coeffs)\n', (14837, 14927), False, 'import cv2\n'), ((16167, 16183), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (16175, 16183), True, 'import numpy as np, math\n'), ((3206, 3338), 'cv2.projectPoints', 'cv2.projectPoints', (['self._rectCorners3D', 'self._rotation_vector', 'self._translation_vector', 'self._camera_matrix', 'self._dist_coeffs'], {}), '(self._rectCorners3D, self._rotation_vector, self.\n _translation_vector, self._camera_matrix, self._dist_coeffs)\n', (3223, 3338), False, 'import cv2\n'), ((4529, 4562), 'numpy.reshape', 'np.reshape', (['model_points', '(3, -1)'], {}), '(model_points, (3, -1))\n', (4539, 4562), True, 'import numpy as np, math\n'), ((10585, 10625), 'numpy.linalg.norm', 'np.linalg.norm', (['self._translation_vector'], {}), '(self._translation_vector)\n', (10599, 10625), True, 'import numpy as np, math\n'), ((10938, 11051), 'cv2.projectPoints', 'cv2.projectPoints', (['self._rectCorners3D', 'rv', 'self._translation_vector', 'self._camera_matrix', 'self._dist_coeffs'], {}), '(self._rectCorners3D, rv, self._translation_vector, self.\n _camera_matrix, self._dist_coeffs)\n', (10955, 11051), False, 'import cv2\n'), ((2118, 2153), 'numpy.array', 'np.array', (['point_3d'], {'dtype': '"""float32"""'}), "(point_3d, dtype='float32')\n", (2126, 2153), True, 'import numpy as np, math\n'), ((4959, 5021), 'pykalman.KalmanFilter', 'KalmanFilter', ([], {'initial_state_mean': 'm', 'initial_state_covariance': 'c'}), '(initial_state_mean=m, initial_state_covariance=c)\n', (4971, 5021), False, 'from pykalman import KalmanFilter\n'), ((12132, 12150), 'math.degrees', 'math.degrees', (['t[0]'], {}), '(t[0])\n', (12144, 12150), False, 'import numpy as np, math\n'), ((12611, 12627), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (12619, 12627), True, 'import numpy as np, math\n'), ((12707, 12736), 'numpy.ones', 'np.ones', (['(points.shape[0], 1)'], {}), '((points.shape[0], 1))\n', (12714, 12736), True, 'import numpy as np, math\n'), ((12761, 12794), 'numpy.matmul', 'np.matmul', (['project_mat', 'points_.T'], {}), '(project_mat, points_.T)\n', (12770, 12794), True, 'import numpy as np, math\n')] |
import numpy as np
import time
import cv2
def sample_angle():
d = np.random.binomial(1,0.5)
theta = np.random.uniform(np.pi/12, np.pi - np.pi/12) * (-1)**d
return theta
class Player():
def __init__(self, x, board_size, bat_size, dtheta = np.pi/12, dy = 3):
self.x = x
self.y = np.random.randint(bat_size+1, board_size[0] - bat_size-2)
self.theta = np.random.uniform(0,2*np.pi)
self.dy = dy
self.dtheta = dtheta
self.y_max = board_size[0]-bat_size-1
self.y_min = bat_size
self.score = 0
self.theta_mesured = 0
def update(self, action):
if action == 0: # Move up
self.y += self.dy
if self.y > self.y_max:
self.y = self.y_max
elif action == 1: # Move down
self.y -= self.dy
if self.y < self.y_min:
self.y = self.y_min
elif action == 3: # Torret up
self.theta += self.dtheta
elif action == 4: # Torret up
self.theta -= self.dtheta
class Ball():
def __init__(self,x, y, V, theta):
self.V = V
self.x = x
self.y = y
self.theta = theta
self.quantum_hits = 0
self.polarization = np.random.uniform(0.0, np.pi)
self.visible = 255
class QPP():
def __init__(self, n_players = 1, board_size = (60,60,60), V = 2, n_rounds = 21, res = 0.2, mode="quantum"):
self.bat_size = 6
self.board_size = board_size
self.board = np.zeros((int(board_size[0]/res),int(board_size[1]/res)))
self.res = res
self.ball = Ball(x = board_size[0]/2, y = board_size[1]/2, V = V, theta = sample_angle())
self.mode = mode
self.left_player = Player(6, board_size, self.bat_size)
self.right_player = Player(board_size[1] - 6, board_size, self.bat_size)
self.done = False
self.n_rounds = n_rounds
self.round = 0
self.n_steps = 0
self.board_angle = np.arctan((board_size[0]-board_size[2])/board_size[1])
self.quantum_hits = 0
def direction_probability(self, pose):
if pose == 'left':
t1 = np.remainder(self.left_player.theta,2*np.pi)
t1 = np.min([t1, 2*np.pi-t1])
t2 = np.remainder(self.ball.polarization,2*np.pi)
t2 = np.min([t2, 2*np.pi-t2])
x = np.abs(t1-t2)
P = (1 + np.cos(2*x))/2
elif pose == 'right':
t1 = np.remainder(self.ball.polarization,2*np.pi)
t1 = np.min([t1, 2*np.pi-t1])
t2 = np.remainder(self.right_player.theta,2*np.pi)
t2 = np.min([t2, 2*np.pi-t2])
x = np.abs(t1-t2)
P = (1 + np.cos(2*x))/2
return P
def _height(self,x):
m = (self.board_size[2]-self.board_size[0])/self.board_size[1]
b = self.board_size[0]
h = x*m+b
return h
def _update_board(self):
self.board = np.zeros((round(self.board_size[1]/self.res),round(self.board_size[0]/self.res)))
Bx = int(round(self.ball.x/self.res))
By = int(round(self.ball.y/self.res))
cv2.circle(self.board,(By, Bx), 10, (self.ball.visible,self.ball.visible,self.ball.visible), -1)
#self.board[50:-50,50:-50] = 0
for ii in range(-round(self.bat_size/self.res),round(self.bat_size/self.res)+1):
for jj in range(4):
self.board[round(self.left_player.x/self.res)-jj, round(self.left_player.y/self.res)+ii-1] = 127
self.board[round(self.right_player.x/self.res)+jj, round(self.right_player.y/self.res)+ii-1] = 127
#
for ii in range(round(self.board_size[1]/self.res)):
self.board[ii, round(self._height(ii*self.res)/self.res)-3:round(self._height(ii*self.res)/self.res)-1] = 200
self.board[ii,0:3] = 200
x_tor = np.array([round(self.left_player.x/self.res), round(self.left_player.x/self.res) + (5/self.res)*np.sin((self.left_player.theta))]).astype(np.int)
y_tor = np.array([round(self.left_player.y/self.res), round(self.left_player.y/self.res) + (5/self.res)*np.cos((self.left_player.theta))]) .astype(np.int)
self.board = cv2.line(self.board,(y_tor[0],x_tor[0] ),(y_tor[1],x_tor[1] ),255,3)
x_tor = np.array([round(self.right_player.x/self.res), round(self.right_player.x/self.res) + (5/self.res)*np.sin((self.right_player.theta))]).astype(np.int)
y_tor = np.array([round(self.right_player.y/self.res), round(self.right_player.y/self.res) + (5/self.res)*np.cos((self.right_player.theta))]) .astype(np.int)
self.board = cv2.line(self.board,(y_tor[0],x_tor[0] ),(y_tor[1],x_tor[1] ),255,3)
#
def step(self, Action_A, Action_B ):
hit = 0
win = 0
# --- Player A --- #
self.left_player.update(Action_A)
# --- Player B --- #
self.right_player.update(Action_B)
# --- Ball step --- #
self.ball.x += self.ball.V * np.cos(self.ball.theta)
self.ball.y += self.ball.V * np.sin(self.ball.theta)
self.ball.visible -= 20
# --- Ball --- #
if self.ball.visible < 0:
self.ball.visible = 0
if self.ball.y > self._height(self.ball.x):
self.ball.y = self._height(self.ball.x)
self.ball.theta = - self.ball.theta - 2*self.board_angle
if self.ball.y < 1:
self.ball.y = 1
self.ball.theta = - self.ball.theta
if self.ball.y >= self.left_player.y - self.bat_size and self.ball.y <= self.left_player.y + self.bat_size and self.ball.x <= self.left_player.x :
self.ball.x = self.left_player.x
hit = 1
p = self.direction_probability("left")
if np.random.binomial(1,p) == 1:
self.ball.theta = np.pi - self.ball.theta
self.ball.polarization = self.left_player.theta
else:
self.ball.theta = np.pi + self.ball.theta
self.ball.polarization = self.left_player.theta + np.pi/2
elif self.ball.x <= self.left_player.x and self.ball.x > self.left_player.x - 3:
self.right_player.score += 1
self.round += 1
win = -1
self.ball.visible = 255
elif self.ball.x <= self.left_player.x - 3:
self.ball.x = self.board_size[1]/2
self.ball.y = self.board_size[0]/2
self.ball.theta = sample_angle()
self.ball.visible = 255
if self.ball.y >= self.right_player.y - self.bat_size and self.ball.y <= self.right_player.y + self.bat_size and self.ball.x >= self.right_player.x :
self.ball.x = self.right_player.x
hit = -1
p = self.direction_probability("right")
if np.random.binomial(1,p) == 1:
self.ball.theta = np.pi - self.ball.theta
self.ball.polarization = self.right_player.theta
else:
self.ball.theta = np.pi + self.ball.theta
self.ball.polarization = self.right_player.theta + np.pi/2
elif self.ball.x >= self.right_player.x and self.ball.x < self.right_player.x + 3:
self.left_player.score += 1
self.round += 1
win = 1
self.ball.visible = 255
elif self.ball.x >= self.right_player.x + 3:
self.ball.x = self.board_size[1]/2
self.ball.y = self.board_size[0]/2
self.ball.theta = sample_angle()
self.ball.visible = 255
if self.round == 21:
self.done = True
self.n_steps = 0
self.n_steps += 1
self._update_board()
return [self.left_player.score, self.right_player.score], self.board, self.done, hit, win
if __name__ == '__main__':
QP = QuantumPong()
done = False
steps = 0
while not done:
a = QP.step(3,4)
done = a[2]
cv2.imshow("video",a[1]/255)
time.sleep(0.1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"numpy.abs",
"cv2.line",
"numpy.min",
"time.sleep",
"cv2.imshow",
"numpy.random.randint",
"cv2.circle",
"numpy.remainder",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"cv2.waitKey",
"numpy.random.binomial",
"numpy.arctan"
] | [((71, 97), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (89, 97), True, 'import numpy as np\n'), ((109, 158), 'numpy.random.uniform', 'np.random.uniform', (['(np.pi / 12)', '(np.pi - np.pi / 12)'], {}), '(np.pi / 12, np.pi - np.pi / 12)\n', (126, 158), True, 'import numpy as np\n'), ((317, 378), 'numpy.random.randint', 'np.random.randint', (['(bat_size + 1)', '(board_size[0] - bat_size - 2)'], {}), '(bat_size + 1, board_size[0] - bat_size - 2)\n', (334, 378), True, 'import numpy as np\n'), ((396, 427), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (413, 427), True, 'import numpy as np\n'), ((1280, 1309), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'np.pi'], {}), '(0.0, np.pi)\n', (1297, 1309), True, 'import numpy as np\n'), ((2038, 2096), 'numpy.arctan', 'np.arctan', (['((board_size[0] - board_size[2]) / board_size[1])'], {}), '((board_size[0] - board_size[2]) / board_size[1])\n', (2047, 2096), True, 'import numpy as np\n'), ((3257, 3360), 'cv2.circle', 'cv2.circle', (['self.board', '(By, Bx)', '(10)', '(self.ball.visible, self.ball.visible, self.ball.visible)', '(-1)'], {}), '(self.board, (By, Bx), 10, (self.ball.visible, self.ball.visible,\n self.ball.visible), -1)\n', (3267, 3360), False, 'import cv2\n'), ((4356, 4428), 'cv2.line', 'cv2.line', (['self.board', '(y_tor[0], x_tor[0])', '(y_tor[1], x_tor[1])', '(255)', '(3)'], {}), '(self.board, (y_tor[0], x_tor[0]), (y_tor[1], x_tor[1]), 255, 3)\n', (4364, 4428), False, 'import cv2\n'), ((4794, 4866), 'cv2.line', 'cv2.line', (['self.board', '(y_tor[0], x_tor[0])', '(y_tor[1], x_tor[1])', '(255)', '(3)'], {}), '(self.board, (y_tor[0], x_tor[0]), (y_tor[1], x_tor[1]), 255, 3)\n', (4802, 4866), False, 'import cv2\n'), ((8483, 8514), 'cv2.imshow', 'cv2.imshow', (['"""video"""', '(a[1] / 255)'], {}), "('video', a[1] / 255)\n", (8493, 8514), False, 'import cv2\n'), ((8520, 8535), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (8530, 8535), False, 'import time\n'), ((2225, 2272), 'numpy.remainder', 'np.remainder', (['self.left_player.theta', '(2 * np.pi)'], {}), '(self.left_player.theta, 2 * np.pi)\n', (2237, 2272), True, 'import numpy as np\n'), ((2287, 2315), 'numpy.min', 'np.min', (['[t1, 2 * np.pi - t1]'], {}), '([t1, 2 * np.pi - t1])\n', (2293, 2315), True, 'import numpy as np\n'), ((2329, 2376), 'numpy.remainder', 'np.remainder', (['self.ball.polarization', '(2 * np.pi)'], {}), '(self.ball.polarization, 2 * np.pi)\n', (2341, 2376), True, 'import numpy as np\n'), ((2391, 2419), 'numpy.min', 'np.min', (['[t2, 2 * np.pi - t2]'], {}), '([t2, 2 * np.pi - t2])\n', (2397, 2419), True, 'import numpy as np\n'), ((2432, 2447), 'numpy.abs', 'np.abs', (['(t1 - t2)'], {}), '(t1 - t2)\n', (2438, 2447), True, 'import numpy as np\n'), ((5240, 5263), 'numpy.cos', 'np.cos', (['self.ball.theta'], {}), '(self.ball.theta)\n', (5246, 5263), True, 'import numpy as np\n'), ((5302, 5325), 'numpy.sin', 'np.sin', (['self.ball.theta'], {}), '(self.ball.theta)\n', (5308, 5325), True, 'import numpy as np\n'), ((2529, 2576), 'numpy.remainder', 'np.remainder', (['self.ball.polarization', '(2 * np.pi)'], {}), '(self.ball.polarization, 2 * np.pi)\n', (2541, 2576), True, 'import numpy as np\n'), ((2591, 2619), 'numpy.min', 'np.min', (['[t1, 2 * np.pi - t1]'], {}), '([t1, 2 * np.pi - t1])\n', (2597, 2619), True, 'import numpy as np\n'), ((2633, 2681), 'numpy.remainder', 'np.remainder', (['self.right_player.theta', '(2 * np.pi)'], {}), '(self.right_player.theta, 2 * np.pi)\n', (2645, 2681), True, 'import numpy as np\n'), ((2696, 2724), 'numpy.min', 'np.min', (['[t2, 2 * np.pi - t2]'], {}), '([t2, 2 * np.pi - t2])\n', (2702, 2724), True, 'import numpy as np\n'), ((2737, 2752), 'numpy.abs', 'np.abs', (['(t1 - t2)'], {}), '(t1 - t2)\n', (2743, 2752), True, 'import numpy as np\n'), ((6123, 6147), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p'], {}), '(1, p)\n', (6141, 6147), True, 'import numpy as np\n'), ((7246, 7270), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p'], {}), '(1, p)\n', (7264, 7270), True, 'import numpy as np\n'), ((8547, 8561), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8558, 8561), False, 'import cv2\n'), ((2467, 2480), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (2473, 2480), True, 'import numpy as np\n'), ((2772, 2785), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (2778, 2785), True, 'import numpy as np\n'), ((4122, 4152), 'numpy.sin', 'np.sin', (['self.left_player.theta'], {}), '(self.left_player.theta)\n', (4128, 4152), True, 'import numpy as np\n'), ((4284, 4314), 'numpy.cos', 'np.cos', (['self.left_player.theta'], {}), '(self.left_player.theta)\n', (4290, 4314), True, 'import numpy as np\n'), ((4556, 4587), 'numpy.sin', 'np.sin', (['self.right_player.theta'], {}), '(self.right_player.theta)\n', (4562, 4587), True, 'import numpy as np\n'), ((4721, 4752), 'numpy.cos', 'np.cos', (['self.right_player.theta'], {}), '(self.right_player.theta)\n', (4727, 4752), True, 'import numpy as np\n')] |
import os
import numpy as np
import random
import torch
from torch.utils.data import DataLoader
from torchvision import models, transforms
import myInception_v3
from myDataReader import ClsDataset
from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3
def del_file(path):
ls = os.listdir(path)
for i in ls:
c_path = os.path.join(path, i)
if os.path.isdir(c_path):
del_file(c_path)
else:
os.remove(c_path)
def main(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
preprocess = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(), #operated on original image, rewrite on previous transform.
transforms.Normalize(args.norm['normMean'], args.norm['normStd'])])
testset = ClsDataset(args.testpath, args.test_root, preprocess)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.nWorker)
print(args.testpath)
if args.net == 'inception_v3':
net = getattr(myInception_v3, args.net)(pretrained=False, num_classes=args.nCls)
else:
net = getattr(models, args.net)(pretrained=False, num_classes=args.nCls)
if len(args.gpu) > 1:
net = torch.nn.DataParallel(net).cuda()
else:
net = net.cuda()
rootpath = os.path.join(args.resultpath, args.task, args.taskID)
net.load_state_dict(
torch.load(os.path.join(rootpath, 'models', args.restore))) # load the finetune weight parameters
print('####################Loading model...', os.path.join(rootpath, 'models', args.restore))
savepath = os.path.join(rootpath, args.restore, 'TEST')
if not os.path.exists(savepath):
os.makedirs(savepath)
reals_patch_test, scores_patch_test, predictions_patch_test, namelist_patch_test = NetPrediction(testloader, net, args.nCls)
if args.nCls == 2:
result_patch = EvalMetrics(reals_patch_test, predictions_patch_test)
auc_patch_test, threshold_YI_patch_test = RocPlot(reals_patch_test, scores_patch_test[:, 1])
print("testing set patch-level AUC:", auc_patch_test,
"testing set patch-level threshold:", threshold_YI_patch_test)
elif args.nCls == 4:
result_patch = EvalMetricsV2(reals_patch_test, predictions_patch_test)
for key in result_patch:
print(key, ': ', result_patch[key])
ProbBoxPlot(scores_patch_test[:, 1], reals_patch_test)
savename_patch = os.path.join(savepath, 'patchTEST.npz')
save_temp_excel(namelist_patch_test, scores_patch_test[:, 1], predictions_patch_test, reals_patch_test,
savepath, args.nCls, 'patch', 'TEST')
np.savez(savename_patch, key_real=reals_patch_test, key_score=scores_patch_test, key_binpred=predictions_patch_test,
key_namelist=namelist_patch_test)
reals_patient_test, scores_patient_test, predictions_patient_test, namelist_patient_test = patient_res_m3(
reals_patch_test, scores_patch_test, namelist_patch_test, args.nCls)
ProbBoxPlot(scores_patient_test[:, 1], reals_patient_test)
if args.nCls == 2:
result_patient = EvalMetrics(reals_patient_test, predictions_patient_test)
auc_patient_test, threshold_YI_patient_test = RocPlot(reals_patient_test, scores_patient_test[:, 1])
print("testing set patient-level AUC:", auc_patient_test,
"testing set patient-level threshold:", threshold_YI_patient_test)
elif args.nCls == 4:
result_patient = EvalMetricsV2(reals_patient_test, predictions_patient_test)
for key in result_patient:
print(key, ': ', result_patient[key])
save_temp_excel(namelist_patient_test, scores_patient_test[:, 1], predictions_patient_test, reals_patient_test,
savepath, args.nCls, 'patient', 'TEST')
savename_patient = os.path.join(savepath, 'patientTEST.npz')
np.savez(savename_patient, key_real=reals_patient_test, key_score=scores_patient_test, key_binpred=predictions_patient_test,
key_namelist=namelist_patient_test)
if __name__ == '__main__':
arg = GetParser()
main(arg)
| [
"os.remove",
"os.path.exists",
"numpy.savez",
"os.listdir",
"myUtils.NetPrediction",
"os.path.isdir",
"numpy.random.seed",
"myUtils.EvalMetricsV2",
"torchvision.transforms.ToTensor",
"myUtils.EvalMetrics",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"myUtils.GetParse... | [((386, 402), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (396, 402), False, 'import os\n'), ((649, 671), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (660, 671), False, 'import random\n'), ((677, 702), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (691, 702), True, 'import numpy as np\n'), ((708, 736), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (725, 736), False, 'import torch\n'), ((742, 775), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (764, 775), False, 'import torch\n'), ((1043, 1096), 'myDataReader.ClsDataset', 'ClsDataset', (['args.testpath', 'args.test_root', 'preprocess'], {}), '(args.testpath, args.test_root, preprocess)\n', (1053, 1096), False, 'from myDataReader import ClsDataset\n'), ((1115, 1208), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.nWorker'}), '(testset, batch_size=args.batch_size, shuffle=False, num_workers=\n args.nWorker)\n', (1125, 1208), False, 'from torch.utils.data import DataLoader\n'), ((1584, 1637), 'os.path.join', 'os.path.join', (['args.resultpath', 'args.task', 'args.taskID'], {}), '(args.resultpath, args.task, args.taskID)\n', (1596, 1637), False, 'import os\n'), ((1889, 1933), 'os.path.join', 'os.path.join', (['rootpath', 'args.restore', '"""TEST"""'], {}), "(rootpath, args.restore, 'TEST')\n", (1901, 1933), False, 'import os\n'), ((2093, 2134), 'myUtils.NetPrediction', 'NetPrediction', (['testloader', 'net', 'args.nCls'], {}), '(testloader, net, args.nCls)\n', (2106, 2134), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((2672, 2726), 'myUtils.ProbBoxPlot', 'ProbBoxPlot', (['scores_patch_test[:, 1]', 'reals_patch_test'], {}), '(scores_patch_test[:, 1], reals_patch_test)\n', (2683, 2726), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((2751, 2790), 'os.path.join', 'os.path.join', (['savepath', '"""patchTEST.npz"""'], {}), "(savepath, 'patchTEST.npz')\n", (2763, 2790), False, 'import os\n'), ((2796, 2945), 'myUtils.save_temp_excel', 'save_temp_excel', (['namelist_patch_test', 'scores_patch_test[:, 1]', 'predictions_patch_test', 'reals_patch_test', 'savepath', 'args.nCls', '"""patch"""', '"""TEST"""'], {}), "(namelist_patch_test, scores_patch_test[:, 1],\n predictions_patch_test, reals_patch_test, savepath, args.nCls, 'patch',\n 'TEST')\n", (2811, 2945), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((2964, 3124), 'numpy.savez', 'np.savez', (['savename_patch'], {'key_real': 'reals_patch_test', 'key_score': 'scores_patch_test', 'key_binpred': 'predictions_patch_test', 'key_namelist': 'namelist_patch_test'}), '(savename_patch, key_real=reals_patch_test, key_score=\n scores_patch_test, key_binpred=predictions_patch_test, key_namelist=\n namelist_patch_test)\n', (2972, 3124), True, 'import numpy as np\n'), ((3227, 3314), 'myUtils.patient_res_m3', 'patient_res_m3', (['reals_patch_test', 'scores_patch_test', 'namelist_patch_test', 'args.nCls'], {}), '(reals_patch_test, scores_patch_test, namelist_patch_test,\n args.nCls)\n', (3241, 3314), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((3330, 3388), 'myUtils.ProbBoxPlot', 'ProbBoxPlot', (['scores_patient_test[:, 1]', 'reals_patient_test'], {}), '(scores_patient_test[:, 1], reals_patient_test)\n', (3341, 3388), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((3956, 4115), 'myUtils.save_temp_excel', 'save_temp_excel', (['namelist_patient_test', 'scores_patient_test[:, 1]', 'predictions_patient_test', 'reals_patient_test', 'savepath', 'args.nCls', '"""patient"""', '"""TEST"""'], {}), "(namelist_patient_test, scores_patient_test[:, 1],\n predictions_patient_test, reals_patient_test, savepath, args.nCls,\n 'patient', 'TEST')\n", (3971, 4115), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((4170, 4211), 'os.path.join', 'os.path.join', (['savepath', '"""patientTEST.npz"""'], {}), "(savepath, 'patientTEST.npz')\n", (4182, 4211), False, 'import os\n'), ((4217, 4387), 'numpy.savez', 'np.savez', (['savename_patient'], {'key_real': 'reals_patient_test', 'key_score': 'scores_patient_test', 'key_binpred': 'predictions_patient_test', 'key_namelist': 'namelist_patient_test'}), '(savename_patient, key_real=reals_patient_test, key_score=\n scores_patient_test, key_binpred=predictions_patient_test, key_namelist\n =namelist_patient_test)\n', (4225, 4387), True, 'import numpy as np\n'), ((4433, 4444), 'myUtils.GetParser', 'GetParser', ([], {}), '()\n', (4442, 4444), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((439, 460), 'os.path.join', 'os.path.join', (['path', 'i'], {}), '(path, i)\n', (451, 460), False, 'import os\n'), ((473, 494), 'os.path.isdir', 'os.path.isdir', (['c_path'], {}), '(c_path)\n', (486, 494), False, 'import os\n'), ((1823, 1869), 'os.path.join', 'os.path.join', (['rootpath', '"""models"""', 'args.restore'], {}), "(rootpath, 'models', args.restore)\n", (1835, 1869), False, 'import os\n'), ((1946, 1970), 'os.path.exists', 'os.path.exists', (['savepath'], {}), '(savepath)\n', (1960, 1970), False, 'import os\n'), ((1981, 2002), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (1992, 2002), False, 'import os\n'), ((2187, 2240), 'myUtils.EvalMetrics', 'EvalMetrics', (['reals_patch_test', 'predictions_patch_test'], {}), '(reals_patch_test, predictions_patch_test)\n', (2198, 2240), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((2292, 2342), 'myUtils.RocPlot', 'RocPlot', (['reals_patch_test', 'scores_patch_test[:, 1]'], {}), '(reals_patch_test, scores_patch_test[:, 1])\n', (2299, 2342), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((3441, 3498), 'myUtils.EvalMetrics', 'EvalMetrics', (['reals_patient_test', 'predictions_patient_test'], {}), '(reals_patient_test, predictions_patient_test)\n', (3452, 3498), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((3554, 3608), 'myUtils.RocPlot', 'RocPlot', (['reals_patient_test', 'scores_patient_test[:, 1]'], {}), '(reals_patient_test, scores_patient_test[:, 1])\n', (3561, 3608), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((554, 571), 'os.remove', 'os.remove', (['c_path'], {}), '(c_path)\n', (563, 571), False, 'import os\n'), ((826, 855), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (843, 855), False, 'from torchvision import models, transforms\n'), ((866, 887), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (885, 887), False, 'from torchvision import models, transforms\n'), ((958, 1023), 'torchvision.transforms.Normalize', 'transforms.Normalize', (["args.norm['normMean']", "args.norm['normStd']"], {}), "(args.norm['normMean'], args.norm['normStd'])\n", (978, 1023), False, 'from torchvision import models, transforms\n'), ((1684, 1730), 'os.path.join', 'os.path.join', (['rootpath', '"""models"""', 'args.restore'], {}), "(rootpath, 'models', args.restore)\n", (1696, 1730), False, 'import os\n'), ((2532, 2587), 'myUtils.EvalMetricsV2', 'EvalMetricsV2', (['reals_patch_test', 'predictions_patch_test'], {}), '(reals_patch_test, predictions_patch_test)\n', (2545, 2587), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((3808, 3867), 'myUtils.EvalMetricsV2', 'EvalMetricsV2', (['reals_patient_test', 'predictions_patient_test'], {}), '(reals_patient_test, predictions_patient_test)\n', (3821, 3867), False, 'from myUtils import save_temp_excel, GetParser, ProbBoxPlot, NetPrediction, EvalMetrics, EvalMetricsV2, patient_res_forval, RocPlot, patient_res_m3\n'), ((1495, 1521), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (1516, 1521), False, 'import torch\n')] |
import cv2
import numpy as np
def valid_odd_size(size):
"""
Validates that a kernel shape is of odd ints and of with 2 dimensions
:param size: the shape (size) to be checked
:return: False if size is invalid
"""
if type(size) not in (list, tuple):
return False
if len(size) != 2:
return False
if size[0] % 2 != 1 or size[1] % 2 != 1:
return False
return True
def cross_kernel(size):
r"""
Returns a cross (ones in a cross) kernel for morphological functions
Example of a (5,5) cross:
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 1 1 1 1 1 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the `numpy.array` of the cross shape
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
return cv2.getStructuringElement(cv2.MORPH_CROSS, ksize=size)
def rectangle_kernel(size):
r"""
Returns a rectangle (all ones) kernel for morphological functions
Example of a (5,5) rectangle:
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the `numpy.array` of the cross shape
"""
return cv2.getStructuringElement(cv2.MORPH_RECT, ksize=size)
def ellipse_kernel(size):
r"""
Returns an ellipse (ones in the shape of an ellipse) kernel for morphological functions
Example of a (5,5) ellipse:
| \| 0 0 1 0 0 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 0 0 1 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the kernel
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=size)
def horizontal_line_kernel(size):
r"""
Returns a horizontal line (a horizontal line of ones) kernel for morphological functions
Example of a (5,5) horizontal line:
| \| 0 0 0 0 0 \|
| \| 0 0 0 0 0 \|
| \| 1 1 1 1 1 \|
| \| 0 0 0 0 0 \|
| \| 0 0 0 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the kernel
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
kernel = np.zeros(size, dtype=np.uint8)
kernel[int((size[0] - 1) / 2),] = 1
return kernel
def vertical_line_kernel(size):
r"""
Returns a vertical line (a vertical line of ones) kernel for morphological functions
Example of a (5,5) vertical line:
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the kernel
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
kernel = np.zeros(size, dtype=np.uint8)
kernel[:, int((size[1] - 1) / 2)] = 1
return kernel
def sharpening_kernel(size):
"""
Creates a sharpening kernel for image shar
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
kernel = np.ones(size)
kernel *= -1
kernel[int((size[0] - 1) / 2), int((size[1] - 1) / 2)] = kernel.size
return kernel
| [
"numpy.zeros",
"cv2.getStructuringElement",
"numpy.ones"
] | [((1046, 1100), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS'], {'ksize': 'size'}), '(cv2.MORPH_CROSS, ksize=size)\n', (1071, 1100), False, 'import cv2\n'), ((1526, 1579), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT'], {'ksize': 'size'}), '(cv2.MORPH_RECT, ksize=size)\n', (1551, 1579), False, 'import cv2\n'), ((2195, 2251), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE'], {'ksize': 'size'}), '(cv2.MORPH_ELLIPSE, ksize=size)\n', (2220, 2251), False, 'import cv2\n'), ((2886, 2916), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.uint8'}), '(size, dtype=np.uint8)\n', (2894, 2916), True, 'import numpy as np\n'), ((3601, 3631), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.uint8'}), '(size, dtype=np.uint8)\n', (3609, 3631), True, 'import numpy as np\n'), ((3998, 4011), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (4005, 4011), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 12:05:08 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
from pyro.control import nonlinear
###############################################################################
sys = pendulum.DoublePendulum()
ctl = nonlinear.ComputedTorqueController( sys )
ctl.w0 = 1.5
ctl.zeta = 0.5
ctl.rbar = np.array([0,0])
# New cl-dynamic
cl_sys = ctl + sys
# Simultation
cl_sys.x0 = np.array([-3.14,1,0,0])
cl_sys.compute_trajectory( tf = 10 )
cl_sys.plot_trajectory()
cl_sys.plot_phase_plane_trajectory(0, 2)
cl_sys.animate_simulation() | [
"numpy.array",
"pyro.dynamic.pendulum.DoublePendulum",
"pyro.control.nonlinear.ComputedTorqueController"
] | [((423, 448), 'pyro.dynamic.pendulum.DoublePendulum', 'pendulum.DoublePendulum', ([], {}), '()\n', (446, 448), False, 'from pyro.dynamic import pendulum\n'), ((456, 495), 'pyro.control.nonlinear.ComputedTorqueController', 'nonlinear.ComputedTorqueController', (['sys'], {}), '(sys)\n', (490, 495), False, 'from pyro.control import nonlinear\n'), ((541, 557), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (549, 557), True, 'import numpy as np\n'), ((622, 648), 'numpy.array', 'np.array', (['[-3.14, 1, 0, 0]'], {}), '([-3.14, 1, 0, 0])\n', (630, 648), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import utilities
def fig_for_location(data, country=None, state=None, num_start=100, case_type="confirmed", averaged_days=5,
yaxes_type='log', doubling_guides=None):
"""Creates a plotly Figure showing COVID-19 cases since count reached num_start
Plotted on a logarithmic scale
Also shows calculated number of days to double averaged over the last number of days
Description describes which data set is being used, confirmed, recovered, deaths
"""
series_sum = data.series_sum_for_location(case_type=case_type, country=country, state=state)
if len(series_sum) == 0:
# most likely cause is someone has selected country and current state is not in that country
state = None
series_sum = data.series_sum_for_location(case_type=case_type, country=country, state=state)
df_sum = pd.DataFrame({"current": series_sum})
idx_start = df_sum['current'].index[df_sum['current'] >= num_start][0]
df_sum['previous'] = df_sum['current'].shift(averaged_days, fill_value=0)
df_plot = df_sum.loc[idx_start:].copy()
df_plot['doubling days'] = averaged_days / np.log2(df_plot['current'] / df_plot['previous'])
df_plot['inverse of doubling days'] = 1 / df_plot['doubling days']
df_plot['doubling days'].clip(lower=-100, upper=100, axis="index", inplace=True)
location = utilities.location_name(country=country, state=state)
fig = make_subplots(
rows=3, cols=1, shared_xaxes=True,
specs=[[{"rowspan": 2}], [None], [{}]],
subplot_titles=[f"{case_type.title()} cases on a {yaxes_type} scale",
f"Inverse of days to double averaged over last {averaged_days} days. Higher is worse."]
)
fig.update_layout(
title_text=f"{case_type.title()} cases {location} starting from {num_start}",
height=600
)
trace_type = ""
fig.add_trace(
go.Scatter(x=df_plot.index, y=df_plot['current'], mode='lines', name=location + trace_type),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=df_plot.index, y=df_plot['inverse of doubling days'], mode='lines', showlegend=False),
row=3, col=1
)
fig.update_yaxes(title_text='Cases', type=yaxes_type, row=1, col=1)
fig.update_layout(legend_traceorder="normal")
if case_type == "confirmed":
doubling_guides = sorted(doubling_guides) if doubling_guides is not None else [4, 5, 6, 8, 10, 12]
for doubler in doubling_guides:
num_start_actual = df_plot.loc[idx_start, 'current']
legend = f'every {doubler} days'
df_plot[legend] = num_start_actual * np.exp2((df_plot.index - idx_start).days / doubler)
fig.add_trace(
go.Scatter(x=df_plot.index, y=df_plot[legend], mode='lines', name=legend),
row=1, col=1
)
return fig
| [
"utilities.location_name",
"numpy.exp2",
"plotly.graph_objects.Scatter",
"pandas.DataFrame",
"numpy.log2"
] | [((969, 1006), 'pandas.DataFrame', 'pd.DataFrame', (["{'current': series_sum}"], {}), "({'current': series_sum})\n", (981, 1006), True, 'import pandas as pd\n'), ((1472, 1525), 'utilities.location_name', 'utilities.location_name', ([], {'country': 'country', 'state': 'state'}), '(country=country, state=state)\n', (1495, 1525), False, 'import utilities\n'), ((1251, 1300), 'numpy.log2', 'np.log2', (["(df_plot['current'] / df_plot['previous'])"], {}), "(df_plot['current'] / df_plot['previous'])\n", (1258, 1300), True, 'import numpy as np\n'), ((2019, 2115), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_plot.index', 'y': "df_plot['current']", 'mode': '"""lines"""', 'name': '(location + trace_type)'}), "(x=df_plot.index, y=df_plot['current'], mode='lines', name=\n location + trace_type)\n", (2029, 2115), True, 'import plotly.graph_objects as go\n'), ((2166, 2269), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_plot.index', 'y': "df_plot['inverse of doubling days']", 'mode': '"""lines"""', 'showlegend': '(False)'}), "(x=df_plot.index, y=df_plot['inverse of doubling days'], mode=\n 'lines', showlegend=False)\n", (2176, 2269), True, 'import plotly.graph_objects as go\n'), ((2754, 2805), 'numpy.exp2', 'np.exp2', (['((df_plot.index - idx_start).days / doubler)'], {}), '((df_plot.index - idx_start).days / doubler)\n', (2761, 2805), True, 'import numpy as np\n'), ((2849, 2922), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_plot.index', 'y': 'df_plot[legend]', 'mode': '"""lines"""', 'name': 'legend'}), "(x=df_plot.index, y=df_plot[legend], mode='lines', name=legend)\n", (2859, 2922), True, 'import plotly.graph_objects as go\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from learner_Q import LearnerQ
import logging
_logger = logging.getLogger(__name__)
class LearnerPLPR(LearnerQ):
def __init__(self, action_count=4, name='PPR',
epsilon=1.0, epsilon_change=-0.0005, alpha=0.05, gamma=0.95,
source=None, rng=np.random.RandomState(1)):
super(LearnerPLPR, self).__init__(action_count, name,
epsilon, epsilon_change,
alpha, gamma,
source, rng)
# self.random_test = True
def get_action_id(self, state, library=None, policy_name=None,
task_name=None,
status='training', psi=0.0):
# if self.random_test:
# return self.rng.randint(0, self.action_count)
""" Get an action according to current policy during testing and when
the chosen policy is the policy for the current task. """
if status in ['testing'] or \
(policy_name == task_name and len(library) > 1):
_logger.debug("### Selecting policy %s greedily ###" %
str(policy_name))
return self.greedy(self.Q, state)
""" Get an action using the policy reuse strategy. """
reuse_probability = self.rng.uniform(0, 1)
if reuse_probability < psi and len(library) > 1:
_logger.debug("### Reusing policy %s greedily (%s < %s)###" %
(str(policy_name), str(reuse_probability), str(psi)))
return self.greedy(library[policy_name]['Q'], state)
else:
explore_probability = self.rng.uniform(0, 1)
# epsilon = 1.0 - psi
if explore_probability < self.epsilon:
_logger.debug("### Random action (%s < %s) ###" %
(str(explore_probability), str(self.epsilon)))
return self.rng.randint(0, self.action_count)
else:
_logger.debug("### Selecting policy %s e-greedily "
"(%s > %s) ###" % (str(task_name),
str(explore_probability),
str(self.epsilon)))
return self.greedy(self.Q, state)
| [
"logging.getLogger",
"numpy.random.RandomState"
] | [((121, 148), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'import logging\n'), ((343, 367), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (364, 367), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from .utils import distance
def get_dewarped_table(im, corners):
# check input
if im is None:
return None
if len(corners) != 4:
return None
target_w = int(max(distance(corners[0], corners[1]), distance(corners[2], corners[3])))
target_h = int(max(distance(corners[0], corners[3]), distance(corners[1], corners[2])))
target_corners = [[0, 0], [target_w, 0], [target_w, target_h], [0, target_h]]
pts1 = np.float32(corners)
pts2 = np.float32(target_corners)
transform_matrix = cv2.getPerspectiveTransform(pts1, pts2)
dewarped = cv2.warpPerspective(im, transform_matrix, (target_w, target_h))
return dewarped | [
"cv2.warpPerspective",
"numpy.float32",
"cv2.getPerspectiveTransform"
] | [((489, 508), 'numpy.float32', 'np.float32', (['corners'], {}), '(corners)\n', (499, 508), True, 'import numpy as np\n'), ((520, 546), 'numpy.float32', 'np.float32', (['target_corners'], {}), '(target_corners)\n', (530, 546), True, 'import numpy as np\n'), ((570, 609), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (597, 609), False, 'import cv2\n'), ((625, 688), 'cv2.warpPerspective', 'cv2.warpPerspective', (['im', 'transform_matrix', '(target_w, target_h)'], {}), '(im, transform_matrix, (target_w, target_h))\n', (644, 688), False, 'import cv2\n')] |
import numpy
import matplotlib.pyplot as plot
def relu(arr):
return numpy.maximum(0, arr)
x = numpy.arange(-10, 10, 0.1)
y = relu(x)
plot.plot(x, y, label="Sigmoid function")
plot.xlabel('x')
plot.ylabel('y')
plot.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.maximum",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((102, 128), 'numpy.arange', 'numpy.arange', (['(-10)', '(10)', '(0.1)'], {}), '(-10, 10, 0.1)\n', (114, 128), False, 'import numpy\n'), ((141, 182), 'matplotlib.pyplot.plot', 'plot.plot', (['x', 'y'], {'label': '"""Sigmoid function"""'}), "(x, y, label='Sigmoid function')\n", (150, 182), True, 'import matplotlib.pyplot as plot\n'), ((183, 199), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""x"""'], {}), "('x')\n", (194, 199), True, 'import matplotlib.pyplot as plot\n'), ((200, 216), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""y"""'], {}), "('y')\n", (211, 216), True, 'import matplotlib.pyplot as plot\n'), ((217, 228), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (226, 228), True, 'import matplotlib.pyplot as plot\n'), ((74, 95), 'numpy.maximum', 'numpy.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (87, 95), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
HISTORY:
Created on Wed May 27 14:27:16 2020
Project: Vortex GUI
Author: DIVE-LINK (www.dive-link.net), <EMAIL>
<NAME> (SemperAnte), <EMAIL>
TODO:
DESCRIPTION:
InformationWidget
slots:
loadImage
startImage
clearImage
"""
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtGui as qtg
from PyQt5 import QtCore as qtc
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import numpy as np
import random
class InformationWidget(qtw.QGroupBox):
infoRequested = qtc.pyqtSignal()
WIDGET_TITLE = 'Information'
TIMER_INTERVAL = 2.0
def __init__(self, parent = None):
super().__init__(parent)
self.progressValue = 0
self.art = None
self.bytesSend = 0
self.num = 1
self._setupUi()
self.timer = qtc.QTimer()
self.timer.setInterval(int(self.TIMER_INTERVAL * 1000))
self.timer.timeout.connect(self.infoRequested)
@qtc.pyqtSlot(np.ndarray)
def loadImage(self, image):
# remove alpha channel
if image.shape[2] == 4:
image = image[:, :, 0:3]
self.imageSource = (image * np.iinfo(np.uint8).max).astype(np.uint8)
numBlock = np.ceil(self.imageSource.size * 8 / 288).astype(int)
imageCopy = self.imageSource.flatten()
imageCopy = np.unpackbits(imageCopy)
imageCopy.resize((numBlock, 288))
self.imageCopy = imageCopy.T
self.ax = self.fig.add_axes([0, 0, 1, 1])
self.ax.axis('off')
self.art = self.ax.imshow(self.imageSource, interpolation = None)
self.canvas.draw()
@qtc.pyqtSlot()
def startImage(self):
self.timer.start()
@qtc.pyqtSlot()
def clearImage(self):
self.timer.stop()
self.bytesSend = 0
self.progressValue = 0
self.progress.setValue(self.progressValue)
def _setupUi(self):
self.setTitle(self.WIDGET_TITLE)
monospaceFont = qtg.QFont('Courier New', 9)
self.stat = qtw.QTextEdit()
self.stat.setReadOnly(True)
self.stat.setCurrentFont(monospaceFont)
self.statCursor = self.stat.textCursor()
self.log = qtw.QTextEdit()
self.log.setReadOnly(True)
self.log.setCurrentFont(monospaceFont)
self.tab = qtw.QTabWidget()
self.tab.addTab(self.stat, 'Statistics')
self.tab.addTab(self.log, 'Log')
self.tab.setMinimumWidth(750)
self.tab.setSizePolicy(qtw.QSizePolicy.Preferred, qtw.QSizePolicy.Maximum)
self.fig = plt.figure()
self.canvas = FigureCanvas(self.fig)
self.progress = qtw.QProgressBar()
self.progress.setRange(0, 100)
self.progress.setValue(self.progressValue)
# data widget
self.dataTime = qtw.QLabel('Time remaining: -')
self.dataRate = qtw.QLabel('Bitrate: 0 bit/s')
self.dataBer = qtw.QLabel('BER: 0.0')
self.dataBler = qtw.QLabel('BLER: 0.0')
self.dataWidget = qtw.QWidget()
self.dataWidget.setSizePolicy(qtw.QSizePolicy.Preferred, qtw.QSizePolicy.Maximum)
self.dataWidget.setLayout(qtw.QGridLayout())
self.dataWidget.layout().setContentsMargins(0, 0, 0, 0)
self.dataWidget.layout().addWidget(self.dataTime, 0, 0)
self.dataWidget.layout().addWidget(self.dataRate, 1, 0)
self.dataWidget.layout().addWidget(self.dataBer, 0, 1)
self.dataWidget.layout().addWidget(self.dataBler, 1, 1)
self.setLayout(qtw.QVBoxLayout())
self.layout().addWidget(self.tab)
self.layout().addWidget(self.canvas)
self.layout().addWidget(self.progress)
self.layout().addWidget(self.dataWidget)
@qtc.pyqtSlot(dict)
def showInfo(self, info):
for key, value in info.items():
if key == 'progress':
self.progress.setValue(value)
elif key == 'datarate':
self.dataRate.setText('Bitrate: {0:.1f} bit/s'.format(value))
elif key == 'ber':
self.dataBer.setText('BER: {0:.2e}'.format(value))
elif key == 'bler':
self.dataBler.setText('BLER: {0:.2e}'.format(value))
elif key == 'stat':
self.stat.insertPlainText(value)
self.statCursor.movePosition(qtg.QTextCursor.End)
self.stat.setTextCursor(self.statCursor)
elif key == 'imbl':
idx = np.unpackbits(value).astype(np.bool)
idx = idx[:self.imageCopy.shape[1]]
print(f'size = {idx.size}, {idx}')
imageSink = np.full_like(self.imageCopy, np.iinfo(np.uint8).max)
imageSink[:, idx] = self.imageCopy[:, idx]
imageSink = imageSink.T.flatten()
imageSink = np.packbits(imageSink)
imageSink.resize(self.imageSource.shape)
self.art.set_data(imageSink)
self.canvas.draw()
else:
raise NameError
'''
def _stepImage(self):
BITRATE = 874
self.bytesSend += 2 * BITRATE / 8
if self.bytesSend > self.imageSource.size:
self.bytesSend = self.imageSource.size
self.progressValue = self.bytesSend / self.imageSource.size * 100
self.progress.setValue(self.progressValue)
time = (100 - self.progressValue) / 100 * (self.imageSource.size * 8 / BITRATE)
self.dataTime.setText(f'Time remaining: {time:.1f} s')
rate = BITRATE + random.uniform(-300, 300)
self.dataRate.setText(f'Bitrate: {rate:.1f} bit/s')
ber = 2.03e-4 + random.uniform(-0.11e-4, 0.11e-4)
self.dataBer.setText(f'BER: {ber:.2e}')
bler = 1.11e-2 + random.uniform(-0.06e-2, 0.06e-2)
self.dataBler.setText(f'BLER: {bler:.2e}')
imageSink = np.full_like(self.imageSource, np.iinfo(np.uint8).max)
imageSinkLine = imageSink.view()
imageSinkLine.shape = self.imageSourceLine.shape
n = self.progressValue / 100 * self.imageSourceLine.shape[0]
n = int(n)
imageSinkLine[0:n, :] = self.imageSourceLine[0:n, :]
self.art.set_data(imageSink)
self.canvas.draw()
b = 960 - int(random.uniform(0, 40))
self.stat.append(f'{self.num:14d}| 960|{b:14d}|{self.progressValue:13.1f}%|{rate:14.1f}|')
self.num += 1
'''
# simple test
if __name__ == '__main__':
import utility
utility.runManualTest(InformationWidget) | [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtCore.pyqtSignal",
"numpy.ceil",
"numpy.packbits",
"PyQt5.QtGui.QFont",
"PyQt5.QtCore.QTimer",
"numpy.unpackbits",
"numpy.iinfo",
"utility.runManualTest",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QProgressBar",
"matplotlib.pypl... | [((663, 679), 'PyQt5.QtCore.pyqtSignal', 'qtc.pyqtSignal', ([], {}), '()\n', (677, 679), True, 'from PyQt5 import QtCore as qtc\n'), ((1141, 1165), 'PyQt5.QtCore.pyqtSlot', 'qtc.pyqtSlot', (['np.ndarray'], {}), '(np.ndarray)\n', (1153, 1165), True, 'from PyQt5 import QtCore as qtc\n'), ((1829, 1843), 'PyQt5.QtCore.pyqtSlot', 'qtc.pyqtSlot', ([], {}), '()\n', (1841, 1843), True, 'from PyQt5 import QtCore as qtc\n'), ((1907, 1921), 'PyQt5.QtCore.pyqtSlot', 'qtc.pyqtSlot', ([], {}), '()\n', (1919, 1921), True, 'from PyQt5 import QtCore as qtc\n'), ((3982, 4000), 'PyQt5.QtCore.pyqtSlot', 'qtc.pyqtSlot', (['dict'], {}), '(dict)\n', (3994, 4000), True, 'from PyQt5 import QtCore as qtc\n'), ((6891, 6931), 'utility.runManualTest', 'utility.runManualTest', (['InformationWidget'], {}), '(InformationWidget)\n', (6912, 6931), False, 'import utility\n'), ((995, 1007), 'PyQt5.QtCore.QTimer', 'qtc.QTimer', ([], {}), '()\n', (1005, 1007), True, 'from PyQt5 import QtCore as qtc\n'), ((1523, 1547), 'numpy.unpackbits', 'np.unpackbits', (['imageCopy'], {}), '(imageCopy)\n', (1536, 1547), True, 'import numpy as np\n'), ((2207, 2234), 'PyQt5.QtGui.QFont', 'qtg.QFont', (['"""Courier New"""', '(9)'], {}), "('Courier New', 9)\n", (2216, 2234), True, 'from PyQt5 import QtGui as qtg\n'), ((2264, 2279), 'PyQt5.QtWidgets.QTextEdit', 'qtw.QTextEdit', ([], {}), '()\n', (2277, 2279), True, 'from PyQt5 import QtWidgets as qtw\n'), ((2432, 2447), 'PyQt5.QtWidgets.QTextEdit', 'qtw.QTextEdit', ([], {}), '()\n', (2445, 2447), True, 'from PyQt5 import QtWidgets as qtw\n'), ((2550, 2566), 'PyQt5.QtWidgets.QTabWidget', 'qtw.QTabWidget', ([], {}), '()\n', (2564, 2566), True, 'from PyQt5 import QtWidgets as qtw\n'), ((2798, 2810), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2808, 2810), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2855), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (2845, 2855), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((2880, 2898), 'PyQt5.QtWidgets.QProgressBar', 'qtw.QProgressBar', ([], {}), '()\n', (2896, 2898), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3035, 3066), 'PyQt5.QtWidgets.QLabel', 'qtw.QLabel', (['"""Time remaining: -"""'], {}), "('Time remaining: -')\n", (3045, 3066), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3091, 3121), 'PyQt5.QtWidgets.QLabel', 'qtw.QLabel', (['"""Bitrate: 0 bit/s"""'], {}), "('Bitrate: 0 bit/s')\n", (3101, 3121), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3145, 3167), 'PyQt5.QtWidgets.QLabel', 'qtw.QLabel', (['"""BER: 0.0"""'], {}), "('BER: 0.0')\n", (3155, 3167), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3192, 3215), 'PyQt5.QtWidgets.QLabel', 'qtw.QLabel', (['"""BLER: 0.0"""'], {}), "('BLER: 0.0')\n", (3202, 3215), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3242, 3255), 'PyQt5.QtWidgets.QWidget', 'qtw.QWidget', ([], {}), '()\n', (3253, 3255), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3388, 3405), 'PyQt5.QtWidgets.QGridLayout', 'qtw.QGridLayout', ([], {}), '()\n', (3403, 3405), True, 'from PyQt5 import QtWidgets as qtw\n'), ((3766, 3783), 'PyQt5.QtWidgets.QVBoxLayout', 'qtw.QVBoxLayout', ([], {}), '()\n', (3781, 3783), True, 'from PyQt5 import QtWidgets as qtw\n'), ((1403, 1443), 'numpy.ceil', 'np.ceil', (['(self.imageSource.size * 8 / 288)'], {}), '(self.imageSource.size * 8 / 288)\n', (1410, 1443), True, 'import numpy as np\n'), ((1334, 1352), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (1342, 1352), True, 'import numpy as np\n'), ((5133, 5155), 'numpy.packbits', 'np.packbits', (['imageSink'], {}), '(imageSink)\n', (5144, 5155), True, 'import numpy as np\n'), ((4739, 4759), 'numpy.unpackbits', 'np.unpackbits', (['value'], {}), '(value)\n', (4752, 4759), True, 'import numpy as np\n'), ((4958, 4976), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (4966, 4976), True, 'import numpy as np\n')] |
from .. import ccllib as lib
from ..core import check
from ..background import omega_x
from .massdef import MassDef, MassDef200m
import numpy as np
class HaloBias(object):
""" This class enables the calculation of halo bias functions.
We currently assume that all halo bias functions can be written
as functions that depend on M only through sigma_M (where
sigma_M^2 is the overdensity variance on spheres with a
radius given by the Lagrangian radius for mass M).
All sub-classes implementing specific parametrizations
can therefore be simply created by replacing this class'
`_get_bsigma method`.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`): a mass
definition object that fixes
the mass definition used by this halo bias
parametrization.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "default"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
cosmo.compute_sigma()
self.mass_def_strict = mass_def_strict
if mass_def is not None:
if self._check_mdef(mass_def):
raise ValueError("Halo bias " + self.name +
" is not compatible with mass definition" +
" Delta = %s, " % (mass_def.Delta) +
" rho = " + mass_def.rho_type)
self.mdef = mass_def
else:
self._default_mdef()
self._setup(cosmo)
def _default_mdef(self):
""" Assigns a default mass definition for this object if
none is passed at initialization.
"""
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
""" Use this function to initialize any internal attributes
of this object. This function is called at the very end of the
constructor call.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
"""
pass
def _check_mdef_strict(self, mdef):
return False
def _check_mdef(self, mdef):
""" Return False if the input mass definition agrees with
the definitions for which this parametrization
works. True otherwise. This function gets called at the
start of the constructor call.
Args:
mdef (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
Returns:
bool: True if the mass definition is not compatible with
this parametrization. False otherwise.
"""
if self.mass_def_strict:
return self._check_mdef_strict(mdef)
return False
def _get_consistent_mass(self, cosmo, M, a, mdef_other):
""" Transform a halo mass with a given mass definition into
the corresponding mass definition that was used to initialize
this object.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
M (float or array_like): halo mass in units of M_sun.
a (float): scale factor.
mdef_other (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
Returns:
float or array_like: mass according to this object's
mass definition.
"""
if mdef_other is not None:
M_use = mdef_other.translate_mass(cosmo, M, a, self.mdef)
else:
M_use = M
return np.log10(M_use)
def _get_Delta_m(self, cosmo, a):
""" For SO-based mass definitions, this returns the corresponding
value of Delta for a rho_matter-based definition. This is useful
mostly for the Tinker mass functions, which are defined for any
SO mass in general, but explicitly only for Delta_matter.
"""
delta = self.mdef.get_Delta(cosmo, a)
if self.mdef.rho_type == 'matter':
return delta
else:
om_this = omega_x(cosmo, a, self.mdef.rho_type)
om_matt = omega_x(cosmo, a, 'matter')
return delta * om_this / om_matt
def get_halo_bias(self, cosmo, M, a, mdef_other=None):
""" Returns the halo bias for input parameters.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
M (float or array_like): halo mass in units of M_sun.
a (float): scale factor.
mdef_other (:class:`~pyccl.halos.massdef.MassDef`):
the mass definition object that defines M.
Returns:
float or array_like: halo bias.
"""
M_use = np.atleast_1d(M)
logM = self._get_consistent_mass(cosmo, M_use,
a, mdef_other)
# sigma(M)
status = 0
sigM, status = lib.sigM_vec(cosmo.cosmo, a, logM,
len(logM), status)
check(status)
b = self._get_bsigma(cosmo, sigM, a)
if np.ndim(M) == 0:
b = b[0]
return b
def _get_bsigma(self, cosmo, sigM, a):
""" Get the halo bias as a function of sigmaM.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
sigM (float or array_like): standard deviation in the
overdensity field on the scale of this halo.
a (float): scale factor.
Returns:
float or array_like: f(sigma_M) function.
"""
raise NotImplementedError("Use one of the non-default "
"HaloBias classes")
class HaloBiasSheth99(HaloBias):
""" Implements halo bias described in 1999MNRAS.308..119S
This parametrization is only valid for 'fof' masses.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts FoF masses only.
If `None`, FoF masses will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
use_delta_c_fit (bool): if True, use delta_crit given by
the fit of Nakamura & Suto 1997. Otherwise use
delta_crit = 1.68647.
"""
name = "Sheth99"
def __init__(self, cosmo, mass_def=None,
mass_def_strict=True,
use_delta_c_fit=False):
self.use_delta_c_fit = use_delta_c_fit
super(HaloBiasSheth99, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
self.p = 0.3
self.a = 0.707
def _check_mdef_strict(self, mdef):
if self.mass_def_strict:
if mdef.Delta != 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
if self.use_delta_c_fit:
status = 0
delta_c, status = lib.dc_NakamuraSuto(cosmo.cosmo, a, status)
check(status)
else:
delta_c = 1.68647
nu = delta_c / sigM
anu2 = self.a * nu**2
return 1. + (anu2 - 1. + 2. * self.p / (1. + anu2**self.p))/delta_c
class HaloBiasSheth01(HaloBias):
""" Implements halo bias described in arXiv:astro-ph/9907024.
This parametrization is only valid for 'fof' masses.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts FoF masses only.
If `None`, FoF masses will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "Sheth01"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
super(HaloBiasSheth01, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
self.a = 0.707
self.sqrta = 0.84083292038
self.b = 0.5
self.c = 0.6
self.dc = 1.68647
def _check_mdef_strict(self, mdef):
if mdef.Delta != 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
nu = self.dc/sigM
anu2 = self.a * nu**2
anu2c = anu2**self.c
t1 = self.b * (1.0 - self.c) * (1.0 - 0.5 * self.c)
return 1. + (self.sqrta * anu2 * (1 + self.b / anu2c) -
anu2c / (anu2c + t1)) / (self.sqrta * self.dc)
class HaloBiasBhattacharya11(HaloBias):
""" Implements halo bias described in arXiv:1005.2239.
This parametrization is only valid for 'fof' masses.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts FoF masses only.
If `None`, FoF masses will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "Bhattacharya11"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
super(HaloBiasBhattacharya11, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
self.a = 0.788
self.az = 0.01
self.p = 0.807
self.q = 1.795
self.dc = 1.68647
def _check_mdef_strict(self, mdef):
if mdef.Delta != 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
nu = self.dc / sigM
a = self.a * a**self.az
anu2 = a * nu**2
return 1. + (anu2 - self.q + 2*self.p / (1 + anu2**self.p)) / self.dc
class HaloBiasTinker10(HaloBias):
""" Implements halo bias described in arXiv:1001.3162.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts SO masses with
200 < Delta < 3200 with respect to the matter density.
If `None`, Delta = 200 (matter) will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "Tinker10"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
super(HaloBiasTinker10, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef200m()
def _AC(self, ld):
xp = np.exp(-(4./ld)**4.)
A = 1.0 + 0.24 * ld * xp
C = 0.019 + 0.107 * ld + 0.19*xp
return A, C
def _a(self, ld):
return 0.44 * ld - 0.88
def _setup(self, cosmo):
self.B = 0.183
self.b = 1.5
self.c = 2.4
self.dc = 1.68647
def _check_mdef_strict(self, mdef):
if mdef.Delta == 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
nu = self.dc / sigM
ld = np.log10(self._get_Delta_m(cosmo, a))
A, C = self._AC(ld)
aa = self._a(ld)
nupa = nu**aa
return 1. - A * nupa / (nupa + self.dc**aa) + \
self.B * nu**self.b + C * nu**self.c
def halo_bias_from_name(name):
""" Returns halo bias subclass from name string
Args:
name (string): a halo bias name
Returns:
HaloBias subclass corresponding to the input name.
"""
bias_functions = {c.name: c for c in HaloBias.__subclasses__()}
if name in bias_functions:
return bias_functions[name]
else:
raise ValueError("Halo bias parametrization %s not implemented")
| [
"numpy.exp",
"numpy.log10",
"numpy.ndim",
"numpy.atleast_1d"
] | [((3626, 3641), 'numpy.log10', 'np.log10', (['M_use'], {}), '(M_use)\n', (3634, 3641), True, 'import numpy as np\n'), ((4780, 4796), 'numpy.atleast_1d', 'np.atleast_1d', (['M'], {}), '(M)\n', (4793, 4796), True, 'import numpy as np\n'), ((11314, 11340), 'numpy.exp', 'np.exp', (['(-(4.0 / ld) ** 4.0)'], {}), '(-(4.0 / ld) ** 4.0)\n', (11320, 11340), True, 'import numpy as np\n'), ((5139, 5149), 'numpy.ndim', 'np.ndim', (['M'], {}), '(M)\n', (5146, 5149), True, 'import numpy as np\n')] |
from sklearn.cluster import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster
from sklearn.linear_model import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model
from sklearn.naive_bayes import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.naive_bayes
from sklearn.svm import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.svm
from sklearn.ensemble import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.ensemble
from sklearn.discriminant_analysis import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.discriminant_analysis
from sklearn.tree import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.tree
from sklearn.ensemble import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.ensemble
from sklearn import * # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
from sklearn import model_selection
import numpy as np
import os
def fit(model, x, y=None, **kwargs):
return model.fit(x, y, **kwargs)
def predict(model, x, **kwargs):
return model.predict(x, **kwargs)
def train_test_split(x, y, **kwargs):
return model_selection.train_test_split(x, y, **kwargs)
# descriptive variables
def gini_index(array):
# all values are treated equally, arrays must be 1d
array = np.array(array).flatten().astype(float)
if np.amin(array) < 0:
array -= np.amin(array) # values cannot be negative
array += np.finfo(float).tiny # values cannot be 0
array = np.sort(array) # values must be sorted
index = np.arange(1, array.shape[0] + 1) # index per array element
n = array.shape[0] # number of array elements
return (np.sum((2 * index - n - 1) * array)) / (
n * np.sum(array)
) # Gini coefficient
| [
"numpy.amin",
"sklearn.model_selection.train_test_split",
"numpy.sort",
"numpy.sum",
"numpy.array",
"numpy.finfo",
"numpy.arange"
] | [((1274, 1322), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['x', 'y'], {}), '(x, y, **kwargs)\n', (1306, 1322), False, 'from sklearn import model_selection\n'), ((1636, 1650), 'numpy.sort', 'np.sort', (['array'], {}), '(array)\n', (1643, 1650), True, 'import numpy as np\n'), ((1688, 1720), 'numpy.arange', 'np.arange', (['(1)', '(array.shape[0] + 1)'], {}), '(1, array.shape[0] + 1)\n', (1697, 1720), True, 'import numpy as np\n'), ((1487, 1501), 'numpy.amin', 'np.amin', (['array'], {}), '(array)\n', (1494, 1501), True, 'import numpy as np\n'), ((1524, 1538), 'numpy.amin', 'np.amin', (['array'], {}), '(array)\n', (1531, 1538), True, 'import numpy as np\n'), ((1581, 1596), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1589, 1596), True, 'import numpy as np\n'), ((1811, 1846), 'numpy.sum', 'np.sum', (['((2 * index - n - 1) * array)'], {}), '((2 * index - n - 1) * array)\n', (1817, 1846), True, 'import numpy as np\n'), ((1864, 1877), 'numpy.sum', 'np.sum', (['array'], {}), '(array)\n', (1870, 1877), True, 'import numpy as np\n'), ((1440, 1455), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (1448, 1455), True, 'import numpy as np\n')] |
"""Helper functions for finding and plotting a pareto front."""
from os.path import join
from typing import Dict, MutableMapping, Optional
# import sys
# from PyQt5.QtWidgets import QApplication
import numpy as np
from plotly import offline
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from mat_discover.utils.plotting import matplotlibify
def is_pareto_efficient_simple(costs):
"""
Find the pareto-efficient points.
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
Fairly fast for many datapoints, less fast for many costs, somewhat readable
Modified from: https://stackoverflow.com/a/40239615/13697228
"""
mx = np.max(costs)
costs = np.nan_to_num(costs, nan=mx)
is_efficient = np.ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(
costs[is_efficient] < c, axis=1
) # Keep any point with a lower cost
is_efficient[i] = True # And keep self
return is_efficient
def get_pareto_ind(proxy, target, reverse_x=True):
"""Get Pareto front indices.
Parameters
----------
proxy : 1d array
Chemical uniqueness proxy values (x-axis).
target : 1d array
Target property (i.e. performance) values (y-axis).
reverse_x : bool, optional
Whether to flip the x direction (i.e. Pareto front seeks maximization of target
and *minimization* of proxy), by default True
Returns
-------
pareto_ind : 2d array
Pareto front indices.
"""
# use reverse_x if using "peak"
if reverse_x:
inpt = [proxy, -target]
else:
inpt = [-proxy, -target]
pareto_ind = np.nonzero(is_pareto_efficient_simple(np.array(inpt).T))
return pareto_ind
def pareto_plot(
df,
x="neigh_avg_targ",
y="target",
color="Peak height",
x_unit=None,
y_unit=None,
color_unit=None,
hover_data=["formula"],
fpath=join("figures", "pareto-front"),
reverse_x=True,
parity_type="max-of-both",
pareto_front=True,
color_continuous_scale=None,
color_discrete_map=None,
xrange=None,
use_plotly_offline: bool = True,
):
"""Generate and save pareto plot for two variables.
Parameters
----------
df : DataFrame
Contains relevant variables for pareto plot.
x : str, optional
Name of df column to use for x-axis, by default "proxy"
y : str, optional
Name of df column to use for y-axis, by default "target"
color : str, optional
Name of df column to use for colors, by default "Peak height"
hover_data : list of str, optional
Name(s) of df columns to display on hover, by default ["formulas"]
fpath : str, optional
Filepath to which to save HTML and PNG. Specify as None if no saving
is desired, by default "pareto-plot"
reverse_x : bool, optional
Whether to reverse the x-axis (i.e. for maximize y and minimize x front)
parity_type : str, optional
What kind of parity line to plot: "max-of-both", "max-of-each", or "none"
use_plotly_offline: bool
Whether to use `offline.plot(fig)` instead of `fig.show()`. Set to False for
Google Colab. By default, True.
"""
labels: Optional[MutableMapping[str, str]] = {}
assert labels is not None
if x_unit is not None:
labels[x] = f"{x} ({x_unit})"
if y_unit is not None:
labels[y] = f"{y} ({y_unit})"
if color_unit is not None:
labels[color] = f"{color} ({color_unit})"
if labels == {}:
labels = None
mx = np.max(df[color])
if color_continuous_scale is None and color_discrete_map is None and mx >= 1:
if isinstance(df[color].iloc[0], (int, np.integer)):
# if mx < 24:
# df.loc[:, color] = df[color].astype(str)
# color_discrete_map = px.colors.qualitative.Dark24
# color_discrete_map = sns.color_palette("Spectral", mx + 1, as_cmap=True)
# scatter_color_kwargs = {"color_continuous_scale": color_discrete_map}
def mpl_to_plotly(cmap, pl_entries=11, rdigits=2):
# cmap - colormap
# pl_entries - int = number of Plotly colorscale entries
# rdigits - int -=number of digits for rounding scale values
scale = np.linspace(0, 1, pl_entries)
colors = (cmap(scale)[:, :3] * 255).astype(np.uint8)
pl_colorscale = [
[round(s, rdigits), f"rgb{tuple(color)}"]
for s, color in zip(scale, colors)
]
return pl_colorscale
nipy_spectral = mpl_to_plotly(
plt.cm.nipy_spectral, pl_entries=mx + 1, rdigits=3
)
scatter_color_kwargs = {
"color_continuous_scale": nipy_spectral # px.colors.sequential.Blackbody_r
}
elif isinstance(df[color].iloc[0], (float, np.float32, np.float64)):
scatter_color_kwargs = {}
elif color_continuous_scale is not None:
scatter_color_kwargs = {"color_continuous_scale": color_continuous_scale}
elif color_discrete_map is not None:
scatter_color_kwargs = {"color_discrete_sequence": color_discrete_map}
else:
scatter_color_kwargs = {}
# trace order counts 0, 1, 2, ... instead of 0, 1, 10, 11
df["color_num"] = df[color].astype(int)
df = df.sort_values("color_num")
fig = px.scatter(
df,
x=x,
y=y,
color=color,
labels=labels,
hover_data=hover_data,
**scatter_color_kwargs,
)
# unpack
proxy = df[x]
target = df[y]
if pareto_front:
pareto_ind = get_pareto_ind(proxy, target, reverse_x=reverse_x)
# Add scatter trace with medium sized markers
sorter = np.flip(np.argsort(target.iloc[pareto_ind]))
fig.add_scatter(
mode="lines",
line={"color": "black", "width": 1, "dash": "dash"},
x=proxy.iloc[pareto_ind].iloc[sorter],
y=target.iloc[pareto_ind].iloc[sorter],
marker_symbol="circle-open",
marker_size=10,
hoverinfo="skip",
name="pareto front",
)
else:
pareto_ind = None
# parity line
if parity_type == "max-of-both":
mx = np.nanmax([proxy, target])
mx2 = mx
elif parity_type == "max-of-each":
mx, mx2 = np.nanmax(proxy), np.nanmax(target)
if parity_type is not None:
fig.add_trace(go.Line(x=[0, mx], y=[0, mx2], name="parity"))
# legend and reversal
fig.update_layout(legend_orientation="h", legend_y=1.1, legend_yanchor="bottom")
if reverse_x:
fig.update_layout(xaxis=dict(autorange="reversed"))
if use_plotly_offline:
offline.plot(fig)
else:
fig.show()
if fpath is not None:
fig.write_html(fpath + ".html")
fig, scale = matplotlibify(fig)
if xrange is not None:
fig.update_xaxes(range=xrange)
# saving
if fpath is not None:
fig.write_image(fpath + ".png", scale=scale)
return fig, pareto_ind
# %% Code Graveyard
# pf_hover_data = df.loc[:, hover_data].iloc[pareto_ind]
# fig.add_scatter(x=proxy[pareto_ind], y=target[pareto_ind])
| [
"plotly.express.scatter",
"numpy.ones",
"plotly.offline.plot",
"os.path.join",
"mat_discover.utils.plotting.matplotlibify",
"numpy.any",
"numpy.max",
"numpy.argsort",
"numpy.array",
"numpy.linspace",
"plotly.graph_objects.Line",
"numpy.nanmax",
"numpy.nan_to_num"
] | [((784, 797), 'numpy.max', 'np.max', (['costs'], {}), '(costs)\n', (790, 797), True, 'import numpy as np\n'), ((810, 838), 'numpy.nan_to_num', 'np.nan_to_num', (['costs'], {'nan': 'mx'}), '(costs, nan=mx)\n', (823, 838), True, 'import numpy as np\n'), ((858, 893), 'numpy.ones', 'np.ones', (['costs.shape[0]'], {'dtype': 'bool'}), '(costs.shape[0], dtype=bool)\n', (865, 893), True, 'import numpy as np\n'), ((2122, 2153), 'os.path.join', 'join', (['"""figures"""', '"""pareto-front"""'], {}), "('figures', 'pareto-front')\n", (2126, 2153), False, 'from os.path import join\n'), ((3772, 3789), 'numpy.max', 'np.max', (['df[color]'], {}), '(df[color])\n', (3778, 3789), True, 'import numpy as np\n'), ((5662, 5765), 'plotly.express.scatter', 'px.scatter', (['df'], {'x': 'x', 'y': 'y', 'color': 'color', 'labels': 'labels', 'hover_data': 'hover_data'}), '(df, x=x, y=y, color=color, labels=labels, hover_data=hover_data,\n **scatter_color_kwargs)\n', (5672, 5765), True, 'import plotly.express as px\n'), ((7150, 7168), 'mat_discover.utils.plotting.matplotlibify', 'matplotlibify', (['fig'], {}), '(fig)\n', (7163, 7168), False, 'from mat_discover.utils.plotting import matplotlibify\n'), ((6552, 6578), 'numpy.nanmax', 'np.nanmax', (['[proxy, target]'], {}), '([proxy, target])\n', (6561, 6578), True, 'import numpy as np\n'), ((7018, 7035), 'plotly.offline.plot', 'offline.plot', (['fig'], {}), '(fig)\n', (7030, 7035), False, 'from plotly import offline\n'), ((997, 1036), 'numpy.any', 'np.any', (['(costs[is_efficient] < c)'], {'axis': '(1)'}), '(costs[is_efficient] < c, axis=1)\n', (1003, 1036), True, 'import numpy as np\n'), ((6049, 6084), 'numpy.argsort', 'np.argsort', (['target.iloc[pareto_ind]'], {}), '(target.iloc[pareto_ind])\n', (6059, 6084), True, 'import numpy as np\n'), ((6744, 6789), 'plotly.graph_objects.Line', 'go.Line', ([], {'x': '[0, mx]', 'y': '[0, mx2]', 'name': '"""parity"""'}), "(x=[0, mx], y=[0, mx2], name='parity')\n", (6751, 6789), True, 'import plotly.graph_objects as go\n'), ((1896, 1910), 'numpy.array', 'np.array', (['inpt'], {}), '(inpt)\n', (1904, 1910), True, 'import numpy as np\n'), ((4526, 4555), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'pl_entries'], {}), '(0, 1, pl_entries)\n', (4537, 4555), True, 'import numpy as np\n'), ((6653, 6669), 'numpy.nanmax', 'np.nanmax', (['proxy'], {}), '(proxy)\n', (6662, 6669), True, 'import numpy as np\n'), ((6671, 6688), 'numpy.nanmax', 'np.nanmax', (['target'], {}), '(target)\n', (6680, 6688), True, 'import numpy as np\n')] |
#import
from src.project_parameters import ProjectParameters
from DeepLearningTemplate.predict_gui import BasePredictGUI
from src.predict import Predict
from DeepLearningTemplate.data_preparation import AudioLoader, parse_transforms
from tkinter import Button, messagebox
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from playsound import playsound
import tkinter as tk
import gradio as gr
# class
class PredictGUI(BasePredictGUI):
def __init__(self, project_parameters) -> None:
super().__init__(extensions=('.wav'))
self.predictor = Predict(project_parameters=project_parameters)
self.classes = project_parameters.classes
self.loader = AudioLoader(sample_rate=project_parameters.sample_rate)
self.transform = parse_transforms(
transforms_config=project_parameters.transforms_config)['predict']
self.sample_rate = project_parameters.sample_rate
assert project_parameters.threshold is not None, 'please check the threshold. the threshold value is {}.'.format(
project_parameters.threshold)
self.threshold = project_parameters.threshold
self.web_interface = project_parameters.web_interface
self.examples = project_parameters.examples if len(
project_parameters.examples) else None
# button
self.play_button = Button(master=self.window,
text='Play',
command=self.play)
# matplotlib canvas
# this is Tkinter default background-color
facecolor = (0.9254760742, 0.9254760742, 0.9254760742)
figsize = np.array([12, 4]) * project_parameters.in_chans
self.image_canvas = FigureCanvasTkAgg(Figure(figsize=figsize,
facecolor=facecolor),
master=self.window)
def reset_widget(self):
super().reset_widget()
self.image_canvas.figure.clear()
def display(self):
waveform = self.loader(path=self.filepath)
# the transformed sample dimension is (in_chans, freq, time)
sample = self.transform(waveform)
sample = sample.cpu().data.numpy()
# invert the freq axis so that the frequency axis of the spectrogram is displayed correctly
sample = sample[:, ::-1, :]
rows, cols = len(sample), 2
for idx in range(1, rows * cols + 1):
subplot = self.image_canvas.figure.add_subplot(rows, cols, idx)
if idx % cols == 1:
# plot waveform
subplot.title.set_text(
'channel {} waveform'.format((idx - 1) // cols + 1))
subplot.set_xlabel('time')
subplot.set_ylabel('amplitude')
time = np.linspace(
0, len(waveform[(idx - 1) // cols]),
len(waveform[(idx - 1) // cols])) / self.sample_rate
subplot.plot(time, waveform[(idx - 1) // cols])
else:
# plot spectrogram
# TODO: display frequency and time.
subplot.title.set_text(
'channel {} spectrogram'.format((idx - 1) // cols + 1))
subplot.imshow(sample[(idx - 1) // cols])
subplot.axis('off')
self.image_canvas.draw()
def open_file(self):
super().open_file()
self.display()
def display_output(self, fake_sample):
self.image_canvas.figure.clear()
waveform = self.loader(path=self.filepath)
# the transformed sample dimension is (in_chans, freq, time)
sample = self.transform(waveform)
# convert the range of the sample to 0~1
sample = (sample - sample.min()) / (sample.max() - sample.min())
sample = sample.cpu().data.numpy()
# invert the freq axis so that the frequency axis of the spectrogram is displayed correctly
sample = sample[:, ::-1, :]
# the fake_sample dimension is (1, in_chans, freq, time),
# so use 0 index to get the first fake_sample
fake_sample = fake_sample[0][:, ::-1, :]
diff = np.abs(sample - fake_sample)
rows, cols = len(sample), 3
title = ['real', 'fake', 'diff']
for idx in range(1, rows * cols + 1):
subplot = self.image_canvas.figure.add_subplot(rows, cols, idx)
subplot.title.set_text('{} {}'.format(title[(idx - 1) % 3],
((idx - 1) // cols + 1)))
if (idx - 1) % 3 == 0:
# plot real
subplot.imshow(sample[(idx - 1) // cols])
elif (idx - 1) % 3 == 1:
# plot fake
subplot.imshow(fake_sample[(idx - 1) // cols])
elif (idx - 1) % 3 == 2:
# plot diff
subplot.imshow(diff[(idx - 1) // cols])
subplot.axis('off')
self.image_canvas.draw()
def recognize(self):
if self.filepath is not None:
score, fake_sample = self.predictor.predict(inputs=self.filepath)
self.display_output(fake_sample=fake_sample)
score = score.item() # score is a scalar
self.predicted_label.config(text='score:\n{}'.format(score))
self.result_label.config(text=self.classes[int(
score >= self.threshold)])
else:
messagebox.showerror(title='Error!', message='please open a file!')
def play(self):
if self.filepath is not None:
playsound(sound=self.filepath, block=True)
else:
messagebox.showerror(title='Error!', message='please open a file!')
def inference(self, inputs):
score, fake_sample = self.predictor.predict(inputs=inputs)
score = score.item() # score is a scalar
result = f'threshold: {self.threshold}\nscore: {score}\nresult: {self.classes[int(score >= self.threshold)]}'
return result
def run(self):
if self.web_interface:
gr.Interface(
fn=self.inference,
inputs=gr.inputs.Audio(source='microphone', type='filepath'),
outputs=gr.outputs.Textbox(),
examples=self.examples,
interpretation="default").launch(share=True, inbrowser=True)
else:
# NW
self.open_file_button.pack(anchor=tk.NW)
self.recognize_button.pack(anchor=tk.NW)
self.play_button.pack(anchor=tk.NW)
# N
self.filepath_label.pack(anchor=tk.N)
self.image_canvas.get_tk_widget().pack(anchor=tk.N)
self.predicted_label.pack(anchor=tk.N)
self.result_label.pack(anchor=tk.N)
# run
super().run()
if __name__ == '__main__':
# project parameters
project_parameters = ProjectParameters().parse()
# launch prediction gui
PredictGUI(project_parameters=project_parameters).run()
| [
"numpy.abs",
"tkinter.messagebox.showerror",
"src.predict.Predict",
"matplotlib.figure.Figure",
"src.project_parameters.ProjectParameters",
"DeepLearningTemplate.data_preparation.parse_transforms",
"playsound.playsound",
"tkinter.Button",
"numpy.array",
"gradio.inputs.Audio",
"gradio.outputs.Tex... | [((632, 678), 'src.predict.Predict', 'Predict', ([], {'project_parameters': 'project_parameters'}), '(project_parameters=project_parameters)\n', (639, 678), False, 'from src.predict import Predict\n'), ((751, 806), 'DeepLearningTemplate.data_preparation.AudioLoader', 'AudioLoader', ([], {'sample_rate': 'project_parameters.sample_rate'}), '(sample_rate=project_parameters.sample_rate)\n', (762, 806), False, 'from DeepLearningTemplate.data_preparation import AudioLoader, parse_transforms\n'), ((1423, 1481), 'tkinter.Button', 'Button', ([], {'master': 'self.window', 'text': '"""Play"""', 'command': 'self.play'}), "(master=self.window, text='Play', command=self.play)\n", (1429, 1481), False, 'from tkinter import Button, messagebox\n'), ((4249, 4277), 'numpy.abs', 'np.abs', (['(sample - fake_sample)'], {}), '(sample - fake_sample)\n', (4255, 4277), True, 'import numpy as np\n'), ((832, 904), 'DeepLearningTemplate.data_preparation.parse_transforms', 'parse_transforms', ([], {'transforms_config': 'project_parameters.transforms_config'}), '(transforms_config=project_parameters.transforms_config)\n', (848, 904), False, 'from DeepLearningTemplate.data_preparation import AudioLoader, parse_transforms\n'), ((1711, 1728), 'numpy.array', 'np.array', (['[12, 4]'], {}), '([12, 4])\n', (1719, 1728), True, 'import numpy as np\n'), ((1805, 1849), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': 'figsize', 'facecolor': 'facecolor'}), '(figsize=figsize, facecolor=facecolor)\n', (1811, 1849), False, 'from matplotlib.figure import Figure\n'), ((5515, 5582), 'tkinter.messagebox.showerror', 'messagebox.showerror', ([], {'title': '"""Error!"""', 'message': '"""please open a file!"""'}), "(title='Error!', message='please open a file!')\n", (5535, 5582), False, 'from tkinter import Button, messagebox\n'), ((5654, 5696), 'playsound.playsound', 'playsound', ([], {'sound': 'self.filepath', 'block': '(True)'}), '(sound=self.filepath, block=True)\n', (5663, 5696), False, 'from playsound import playsound\n'), ((5723, 5790), 'tkinter.messagebox.showerror', 'messagebox.showerror', ([], {'title': '"""Error!"""', 'message': '"""please open a file!"""'}), "(title='Error!', message='please open a file!')\n", (5743, 5790), False, 'from tkinter import Button, messagebox\n'), ((6974, 6993), 'src.project_parameters.ProjectParameters', 'ProjectParameters', ([], {}), '()\n', (6991, 6993), False, 'from src.project_parameters import ProjectParameters\n'), ((6217, 6270), 'gradio.inputs.Audio', 'gr.inputs.Audio', ([], {'source': '"""microphone"""', 'type': '"""filepath"""'}), "(source='microphone', type='filepath')\n", (6232, 6270), True, 'import gradio as gr\n'), ((6296, 6316), 'gradio.outputs.Textbox', 'gr.outputs.Textbox', ([], {}), '()\n', (6314, 6316), True, 'import gradio as gr\n')] |
#!/usr/bin/env python3
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import rospy
import rospkg
from visualization_msgs.msg import Marker, MarkerArray
from sensor_msgs.msg import Image
from statek_ml.msg import DynamicDetectionArray
from statek_ml.msg import DynamicDetection
import tensorflow as tf
import sys
import numpy as np
import time
import math
import threading
# Load _get_legs function from petranet's preprocessing.
r = rospkg.RosPack()
path = r.get_path("statek_ml")
path += "/models/lidar_net"
sys.path.insert(1, path)
get_legs = __import__("dataset_processing")
get_legs = get_legs._get_legs
data_path = r.get_path("statek_ml") + "/data"
class Filter():
def __init__(self, velocities, alpha):
self.alpha = alpha
self.velocities = np.array(velocities)
def update(self, velocities):
velocities = np.array(velocities)
self.velocities = self.velocities + self.alpha * (velocities - self.velocities)
return tuple(self.velocities.tolist())
class LegKalman():
def __init__(self, leg_position, process_variance, measurement_variance):
# @brief Class constructor.
# @param leg_position Initial leg position (y, x).
# @param process_variance Process variance.
# @param measurement_variance Measurement variance.
# Parameters.
self.Q = np.array([[process_variance, 0], [0, process_variance]])
self.R = np.array([[measurement_variance, 0],[0, measurement_variance]])
# Initial conditions.
y = leg_position[0]
x = leg_position[1]
self.a_posteriori_xhat = np.array([[y], [x]])
self.a_posteriori_P = self.Q
def update(self, velocity_measurement, position_measurement, dt):
# @brief Update filter.
# @param velocity_measurement Velocity measurement based on current position measurement
# and previous estimate (vy, vx).
# @param position_measurement Current position measurement.
# @param Time passed since last update.
# @return Position estimate (y, x).
vy = velocity_measurement[0]
vx = velocity_measurement[1]
y = position_measurement[0]
x = position_measurement[1]
u = np.array([[vy], [vx]])
z = np.array([[y], [x]])
B = np.array([[dt, 0], [0, dt]])
# Predict.
a_priori_xhat = self.a_posteriori_xhat + B @ u
a_priori_P = self.a_posteriori_P + self.Q
# Update.
innovation = z - a_priori_xhat
innovation_covariance = a_priori_P + self.R
K = a_priori_P @ np.transpose(innovation_covariance)
self.a_posteriori_xhat = a_priori_xhat + K @ innovation
self.a_posteriori_P = (np.identity(2) - K) @ a_priori_P
y = self.a_posteriori_xhat[0]
x = self.a_posteriori_xhat[1]
return (y[0], x[0])
class Leg():
def __init__(self, leg, forget_time, max_distance, process_variance, measurement_variance, filter_alpha):
# @brief Class constructor.
# @param leg Initial coordinates of the leg in meters [y, x].
# @param forget_time Period of time in seconds in which update() should success at least once.
# If there was no succesfull update in this time period then alive() will return False
# and leg should be removed.
# @param max_distance Maximum allowed euclidean distance in meters between current leg position (from Kalman)
# and leg candidates coordinates. This distance assumes update time of 1 second.
# @param process_variance Process variance for Kalman filter.
# @param measurement_variance Measurement variance for Kalman filter.
# Params.
self.forget_time = forget_time
self.max_distance = max_distance
# First update.
self.vel_filter = Filter((0,0), filter_alpha)
self.filter = LegKalman(leg, process_variance, measurement_variance)
self.last_update = time.time()
self.estimated_velocity = (0,0)
self.estimated_position = leg
def _get_distance(self, candidate):
# @brief Get candidate's distance from current leg's estimated position.
# @param candidate Candidate to check (y, x).
# @return Distance from candidate to current's position.
return math.sqrt((candidate[0] - self.estimated_position[0])**2 + (candidate[1] - self.estimated_position[1])**2)
def _find_candidate(self, leg_candidates):
# @brief Find most promising leg candidate.
# @param leg_candidates List od leg candidates [(y,x), (y,x), ...].
# @return Tuple in which the first element is found distance and second is it's index.
distances = [self._get_distance(candidate) for candidate in leg_candidates]
min_distance = min(distances)
return (min_distance, distances.index(min_distance))
def update(self, leg_candidates):
# @brief Update leg object.
# @param List of legs detected from PeTraNet prediction [(y, x), (y, x), ...].
# If the update succeeds then this function will remove most promising
# leg candidate.
# @param dt Time passed since last call to update()
# @return True on success.
passed = time.time() - self.last_update
if passed == 0:
return leg_candidates
if len(leg_candidates) == 0:
return leg_candidates
# Check whether any candidate is in range.
candidate_used = False
candidate_distance, candidate_index = self._find_candidate(leg_candidates)
if candidate_distance <= self.max_distance:
# Use position from network.
measured_position = tuple(leg_candidates[candidate_index])
candidate_used = True
else:
# Estimate position based on previous velocity.
measured_position = tuple((np.asarray(self.estimated_position) + passed * np.asarray(self.estimated_velocity)).tolist())
measured_velocity_y = (measured_position[0] - self.estimated_position[0]) / passed
measured_velocity_x = (measured_position[1] - self.estimated_position[1]) / passed
measured_velocity = (measured_velocity_y, measured_velocity_x)
# Position estimation.
previous_position_estimate = self.estimated_position
self.estimated_position = self.filter.update(measured_velocity, measured_position, passed)
# A posteriori velocity.
estimated_velocity_y = (self.estimated_position[0] - previous_position_estimate[0]) / passed
estimated_velocity_x = (self.estimated_position[1] - previous_position_estimate[1]) / passed
self.estimated_velocity = self.vel_filter.update((estimated_velocity_y, estimated_velocity_x))
if candidate_used:
del leg_candidates[candidate_index]
self.last_update = time.time()
return leg_candidates
def alive(self):
# @brief Whether this object should be destroyed.
# @return True if leg is still alive.
return (time.time() - self.last_update) < self.forget_time
def position(self):
# @brief Get this leg's estimated position.
# @return tuple of (y, x) coordinates.
return self.estimated_position
def velocity(self):
# @brief Get this leg's estimated velocity.
# @return tuple of (y, x) velocities.
return self.estimated_velocity
def load_network():
# @brief Load network and it's preprocessing function.
# @return (network model, preprocessing function).
model_path = rospkg.RosPack().get_path("statek_ml")
model_path += "/models/lidar_net"
sys.path.insert(1, model_path)
rospy.logwarn("Loading model...")
net = tf.saved_model.load(model_path + "/trained_model_optimized", tags=[tag_constants.SERVING])
rospy.logwarn("Getting signatures...")
net = net.signatures[tf.compat.v1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
rospy.logwarn("Converting to constants...")
net = convert_variables_to_constants_v2(net)
rospy.logwarn("Done!")
preprocessor = __import__("dataset_processing")
preprocessor = preprocessor.preprocess_input_sample
return (net, preprocessor)
# Can't get cv_bridge to work on python3 and Jeston Nano D:
def msg_to_img(msg):
# @brief Convert message to image suitable for preprocessing.
# @param msg Message to convert.
# @return Converted message.
frame = np.array(list(msg.data))
frame = np.reshape(frame, (1, msg.height, msg.width))
return frame
def prediction_to_cv(prediction):
# @brief Convert prediction (0 - 1 float) to cv matrix (0 - 255 uint8).
# @param prediction Prediction to convert.
# @return Converted prediction.
prediction = prediction[0].numpy()
prediction = np.round(prediction)
prediction *= 255.0
prediction = prediction.astype(np.uint8)
prediction = np.squeeze(prediction)
return prediction
def to_meters(leg, height_pixels, width_pixels, height, width):
# @brief convert coordinates from pixel to meters.
# @param leg Leg to convert [y, x].
# @param height_pixels Image's height in pixels.
# @param width_pixels Image's width in pixels.
# @param height Image's height in meters.
# @param width Image's width in meters.
y = float(leg[0])
x = float(leg[1])
y -= height_pixels / 2.0
x -= width_pixels / 2.0
y *= (height / height_pixels)
x *= (width / width_pixels)
return [y, x]
def to_meters_arr(legs, height_pixels, width_pixels, height, width):
return [to_meters(leg, height_pixels, width_pixels, height, width) for leg in legs]
def get_rviz_marker(position, id):
marker = Marker()
marker.header.frame_id = "statek/laser/laser_link"
marker.header.stamp = rospy.Time.now()
marker.ns = "legs"
marker.id = id
marker.action = Marker.ADD
marker.type = Marker.SPHERE
marker.lifetime.secs = 3
marker.pose.position.x = position[0]
marker.pose.position.y = position[1]
marker.pose.position.z = 0.15
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1
marker.color.r = 1
marker.color.b = 0
marker.color.g = 1
return marker
def get_leg_marker(leg):
marker = DynamicDetection()
marker.type = DynamicDetection.LEG
p = leg.position()
v = leg.velocity()
marker.position[0] = p[0]
marker.position[1] = p[1]
marker.position[2] = 0.15
marker.velocity[0] = v[0]
marker.velocity[1] = v[1]
marker.velocity[2] = 0
return marker
def scan_callback(new_msg):
global msg, msg_lock, msg_arrived
with msg_lock:
msg = new_msg
msg_arrived = True
# Limit memory usage as it's shared with CPU.
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
tf.config.experimental.set_virtual_device_configuration(
gpu_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=1024)])
# Init ROS.
rospy.init_node('lidar_fconv_dataset_collector')
statek_name = rospy.get_param("~statek_name", "statek")
rviz_publisher = rospy.Publisher("/" + statek_name + "/laser/hoomans", MarkerArray, queue_size=1)
marker_publisher = rospy.Publisher("/" + statek_name + "/laser/dynamic_detections", DynamicDetectionArray, queue_size=1)
msg = None
msg_lock = threading.Lock()
msg_arrived = False
legs = []
width_meters = rospy.get_param("~width_meters", 5.12)
height_meters = rospy.get_param("~height_meters", 5.12)
width_pixels = rospy.get_param("~width_pixels", 256)
height_pixels = rospy.get_param("~height_pixels", 256)
fps = rospy.get_param("~fps", 15)
filter_alpha = rospy.get_param("~filter_alpha", 0.09)
forget_time = rospy.get_param("~leg_forget_time", 0.3)
leg_hysteresis = rospy.get_param("~leg_hysteresis", 0.4)
max_distance = rospy.get_param("~candidate_max_distance", 0.75)
process_variance = rospy.get_param("~leg_process_variance", 0.01)
measurement_variance = rospy.get_param("~leg_measurement_variance", 0.01)
net, preprocessor = load_network()
# Init lidar image subscriber.
rospy.Subscriber("/" + statek_name + "/laser/scan_img", Image, scan_callback, queue_size=1, buff_size=65536*2)
rate = rospy.Rate(fps)
while not rospy.is_shutdown():
with msg_lock:
if msg_arrived == False:
rate.sleep()
continue
# Preprocess data.
with msg_lock:
frame = msg_to_img(msg)
msg_arrived = False
frame = preprocessor(frame)
# Make prediction.
prediction = net(frame)
prediction = prediction_to_cv(prediction)
# Extract legs.
leg_candidates = get_legs(prediction)
if len(leg_candidates) == 0:
rate.sleep()
leg_candidates = to_meters_arr(leg_candidates,
height_pixels,width_pixels,
height_meters, width_meters)
# Update legs.
cntr = 0
rviz_markers = MarkerArray()
leg_markers = DynamicDetectionArray()
leg_markers.header.frame_id = "statek/laser/laser_link"
for leg in legs:
leg_candidates = leg.update(leg_candidates)
if math.hypot(leg.velocity()[0], leg.velocity()[1]) > leg_hysteresis:
leg_markers.detections.append(get_leg_marker(leg))
rviz_markers.markers.append(get_rviz_marker(leg.position(), cntr))
cntr+=1
rviz_publisher.publish(rviz_markers)
marker_publisher.publish(leg_markers)
# Remove dead legs.
legs = [leg for leg in legs if leg.alive()]
# Generate new legs from candidates that were not consumed by already
# existing legs.
for candidate in leg_candidates:
legs.append(Leg(candidate, forget_time, max_distance,
process_variance, measurement_variance, filter_alpha))
rate.sleep() | [
"sys.path.insert",
"visualization_msgs.msg.Marker",
"rospy.logwarn",
"visualization_msgs.msg.MarkerArray",
"rospy.init_node",
"math.sqrt",
"numpy.array",
"rospy.Rate",
"tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2",
"tensorflow.saved_model.load",
"numpy.resh... | [((538, 554), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (552, 554), False, 'import rospkg\n'), ((614, 638), 'sys.path.insert', 'sys.path.insert', (['(1)', 'path'], {}), '(1, path)\n', (629, 638), False, 'import sys\n'), ((10941, 10992), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (10985, 10992), True, 'import tensorflow as tf\n'), ((10993, 11055), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu_devices[0]', '(True)'], {}), '(gpu_devices[0], True)\n', (11033, 11055), True, 'import tensorflow as tf\n'), ((11254, 11302), 'rospy.init_node', 'rospy.init_node', (['"""lidar_fconv_dataset_collector"""'], {}), "('lidar_fconv_dataset_collector')\n", (11269, 11302), False, 'import rospy\n'), ((11317, 11358), 'rospy.get_param', 'rospy.get_param', (['"""~statek_name"""', '"""statek"""'], {}), "('~statek_name', 'statek')\n", (11332, 11358), False, 'import rospy\n'), ((11378, 11463), 'rospy.Publisher', 'rospy.Publisher', (["('/' + statek_name + '/laser/hoomans')", 'MarkerArray'], {'queue_size': '(1)'}), "('/' + statek_name + '/laser/hoomans', MarkerArray, queue_size=1\n )\n", (11393, 11463), False, 'import rospy\n'), ((11478, 11583), 'rospy.Publisher', 'rospy.Publisher', (["('/' + statek_name + '/laser/dynamic_detections')", 'DynamicDetectionArray'], {'queue_size': '(1)'}), "('/' + statek_name + '/laser/dynamic_detections',\n DynamicDetectionArray, queue_size=1)\n", (11493, 11583), False, 'import rospy\n'), ((11603, 11619), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (11617, 11619), False, 'import threading\n'), ((11665, 11703), 'rospy.get_param', 'rospy.get_param', (['"""~width_meters"""', '(5.12)'], {}), "('~width_meters', 5.12)\n", (11680, 11703), False, 'import rospy\n'), ((11720, 11759), 'rospy.get_param', 'rospy.get_param', (['"""~height_meters"""', '(5.12)'], {}), "('~height_meters', 5.12)\n", (11735, 11759), False, 'import rospy\n'), ((11775, 11812), 'rospy.get_param', 'rospy.get_param', (['"""~width_pixels"""', '(256)'], {}), "('~width_pixels', 256)\n", (11790, 11812), False, 'import rospy\n'), ((11829, 11867), 'rospy.get_param', 'rospy.get_param', (['"""~height_pixels"""', '(256)'], {}), "('~height_pixels', 256)\n", (11844, 11867), False, 'import rospy\n'), ((11874, 11901), 'rospy.get_param', 'rospy.get_param', (['"""~fps"""', '(15)'], {}), "('~fps', 15)\n", (11889, 11901), False, 'import rospy\n'), ((11917, 11955), 'rospy.get_param', 'rospy.get_param', (['"""~filter_alpha"""', '(0.09)'], {}), "('~filter_alpha', 0.09)\n", (11932, 11955), False, 'import rospy\n'), ((11970, 12010), 'rospy.get_param', 'rospy.get_param', (['"""~leg_forget_time"""', '(0.3)'], {}), "('~leg_forget_time', 0.3)\n", (11985, 12010), False, 'import rospy\n'), ((12028, 12067), 'rospy.get_param', 'rospy.get_param', (['"""~leg_hysteresis"""', '(0.4)'], {}), "('~leg_hysteresis', 0.4)\n", (12043, 12067), False, 'import rospy\n'), ((12083, 12131), 'rospy.get_param', 'rospy.get_param', (['"""~candidate_max_distance"""', '(0.75)'], {}), "('~candidate_max_distance', 0.75)\n", (12098, 12131), False, 'import rospy\n'), ((12151, 12197), 'rospy.get_param', 'rospy.get_param', (['"""~leg_process_variance"""', '(0.01)'], {}), "('~leg_process_variance', 0.01)\n", (12166, 12197), False, 'import rospy\n'), ((12221, 12271), 'rospy.get_param', 'rospy.get_param', (['"""~leg_measurement_variance"""', '(0.01)'], {}), "('~leg_measurement_variance', 0.01)\n", (12236, 12271), False, 'import rospy\n'), ((12339, 12455), 'rospy.Subscriber', 'rospy.Subscriber', (["('/' + statek_name + '/laser/scan_img')", 'Image', 'scan_callback'], {'queue_size': '(1)', 'buff_size': '(65536 * 2)'}), "('/' + statek_name + '/laser/scan_img', Image,\n scan_callback, queue_size=1, buff_size=65536 * 2)\n", (12355, 12455), False, 'import rospy\n'), ((12458, 12473), 'rospy.Rate', 'rospy.Rate', (['fps'], {}), '(fps)\n', (12468, 12473), False, 'import rospy\n'), ((7797, 7827), 'sys.path.insert', 'sys.path.insert', (['(1)', 'model_path'], {}), '(1, model_path)\n', (7812, 7827), False, 'import sys\n'), ((7833, 7866), 'rospy.logwarn', 'rospy.logwarn', (['"""Loading model..."""'], {}), "('Loading model...')\n", (7846, 7866), False, 'import rospy\n'), ((7877, 7972), 'tensorflow.saved_model.load', 'tf.saved_model.load', (["(model_path + '/trained_model_optimized')"], {'tags': '[tag_constants.SERVING]'}), "(model_path + '/trained_model_optimized', tags=[\n tag_constants.SERVING])\n", (7896, 7972), True, 'import tensorflow as tf\n'), ((7972, 8010), 'rospy.logwarn', 'rospy.logwarn', (['"""Getting signatures..."""'], {}), "('Getting signatures...')\n", (7985, 8010), False, 'import rospy\n'), ((8120, 8163), 'rospy.logwarn', 'rospy.logwarn', (['"""Converting to constants..."""'], {}), "('Converting to constants...')\n", (8133, 8163), False, 'import rospy\n'), ((8174, 8212), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_variables_to_constants_v2', (['net'], {}), '(net)\n', (8207, 8212), False, 'from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n'), ((8217, 8239), 'rospy.logwarn', 'rospy.logwarn', (['"""Done!"""'], {}), "('Done!')\n", (8230, 8239), False, 'import rospy\n'), ((8649, 8694), 'numpy.reshape', 'np.reshape', (['frame', '(1, msg.height, msg.width)'], {}), '(frame, (1, msg.height, msg.width))\n', (8659, 8694), True, 'import numpy as np\n'), ((8963, 8983), 'numpy.round', 'np.round', (['prediction'], {}), '(prediction)\n', (8971, 8983), True, 'import numpy as np\n'), ((9070, 9092), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (9080, 9092), True, 'import numpy as np\n'), ((9865, 9873), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (9871, 9873), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((9955, 9971), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (9969, 9971), False, 'import rospy\n'), ((10446, 10464), 'statek_ml.msg.DynamicDetection', 'DynamicDetection', ([], {}), '()\n', (10462, 10464), False, 'from statek_ml.msg import DynamicDetection\n'), ((12484, 12503), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (12501, 12503), False, 'import rospy\n'), ((13187, 13200), 'visualization_msgs.msg.MarkerArray', 'MarkerArray', ([], {}), '()\n', (13198, 13200), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((13219, 13242), 'statek_ml.msg.DynamicDetectionArray', 'DynamicDetectionArray', ([], {}), '()\n', (13240, 13242), False, 'from statek_ml.msg import DynamicDetectionArray\n'), ((873, 893), 'numpy.array', 'np.array', (['velocities'], {}), '(velocities)\n', (881, 893), True, 'import numpy as np\n'), ((950, 970), 'numpy.array', 'np.array', (['velocities'], {}), '(velocities)\n', (958, 970), True, 'import numpy as np\n'), ((1456, 1512), 'numpy.array', 'np.array', (['[[process_variance, 0], [0, process_variance]]'], {}), '([[process_variance, 0], [0, process_variance]])\n', (1464, 1512), True, 'import numpy as np\n'), ((1530, 1594), 'numpy.array', 'np.array', (['[[measurement_variance, 0], [0, measurement_variance]]'], {}), '([[measurement_variance, 0], [0, measurement_variance]])\n', (1538, 1594), True, 'import numpy as np\n'), ((1714, 1734), 'numpy.array', 'np.array', (['[[y], [x]]'], {}), '([[y], [x]])\n', (1722, 1734), True, 'import numpy as np\n'), ((2342, 2364), 'numpy.array', 'np.array', (['[[vy], [vx]]'], {}), '([[vy], [vx]])\n', (2350, 2364), True, 'import numpy as np\n'), ((2377, 2397), 'numpy.array', 'np.array', (['[[y], [x]]'], {}), '([[y], [x]])\n', (2385, 2397), True, 'import numpy as np\n'), ((2410, 2438), 'numpy.array', 'np.array', (['[[dt, 0], [0, dt]]'], {}), '([[dt, 0], [0, dt]])\n', (2418, 2438), True, 'import numpy as np\n'), ((4077, 4088), 'time.time', 'time.time', ([], {}), '()\n', (4086, 4088), False, 'import time\n'), ((4424, 4538), 'math.sqrt', 'math.sqrt', (['((candidate[0] - self.estimated_position[0]) ** 2 + (candidate[1] - self.\n estimated_position[1]) ** 2)'], {}), '((candidate[0] - self.estimated_position[0]) ** 2 + (candidate[1] -\n self.estimated_position[1]) ** 2)\n', (4433, 4538), False, 'import math\n'), ((11154, 11222), 'tensorflow.config.experimental.VirtualDeviceConfiguration', 'tf.config.experimental.VirtualDeviceConfiguration', ([], {'memory_limit': '(1024)'}), '(memory_limit=1024)\n', (11203, 11222), True, 'import tensorflow as tf\n'), ((2699, 2734), 'numpy.transpose', 'np.transpose', (['innovation_covariance'], {}), '(innovation_covariance)\n', (2711, 2734), True, 'import numpy as np\n'), ((5368, 5379), 'time.time', 'time.time', ([], {}), '()\n', (5377, 5379), False, 'import time\n'), ((7000, 7011), 'time.time', 'time.time', ([], {}), '()\n', (7009, 7011), False, 'import time\n'), ((7716, 7732), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (7730, 7732), False, 'import rospkg\n'), ((2830, 2844), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2841, 2844), True, 'import numpy as np\n'), ((7185, 7196), 'time.time', 'time.time', ([], {}), '()\n', (7194, 7196), False, 'import time\n'), ((6006, 6041), 'numpy.asarray', 'np.asarray', (['self.estimated_position'], {}), '(self.estimated_position)\n', (6016, 6041), True, 'import numpy as np\n'), ((6053, 6088), 'numpy.asarray', 'np.asarray', (['self.estimated_velocity'], {}), '(self.estimated_velocity)\n', (6063, 6088), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 01:45:23 2018
@author: JAE
"""
import torch
import torch.multiprocessing as mp
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from collections import namedtuple, deque
import gym
import copy
import sys
hidden_dim = 128
batch_size = 8
burn_in_length = 10
sequences_length = 20
#feature_state = (1,84,84)
feature_state = (4)
feature_reward = 1
feature_action = 2
n_step = 5
gamma_t = 0.997
IMG_GET_RENDER = False
#IMG_GET_RENDER = True
def obs_preproc(x):
if IMG_GET_RENDER == False :
x = torch.from_numpy(np.resize(x, feature_state)).float().unsqueeze(0)
return x
x= np.dot(x, np.array([[0.299, 0.587, 0.114]]).T)
x= np.reshape(x, (1, x.shape[1], x.shape[0]))
x = torch.from_numpy(np.resize(x, feature_state)).float().unsqueeze(0)/255
return x
class DQN(torch.nn.Module):
def __init__(self, state_shape, action_dim):
super(DQN, self).__init__()
self.input_shape = state_shape
self.action_dim = action_dim
# self.front = torch.nn.Sequential(torch.nn.Conv2d(state_shape[0], 64, 5, stride=3),
# torch.nn.ReLU(),
# torch.nn.Conv2d(64, 64, 3, stride=3),
# torch.nn.ReLU(),
# torch.nn.Conv2d(64, 64, 3, stride=1),
# torch.nn.ReLU())
self.size = 2
self.front = torch.nn.Sequential(torch.nn.Linear( 4, 64*self.size*self.size),
torch.nn.ReLU())
# self.lstm = torch.nn.LSTMCell(input_size=64*self.size*self.size , hidden_size=hidden_dim)
self.value_stream_layer = torch.nn.Sequential(torch.nn.Linear( 64*self.size*self.size, hidden_dim),
torch.nn.ReLU())
self.advantage_stream_layer = torch.nn.Sequential(torch.nn.Linear(64*self.size*self.size, hidden_dim),
torch.nn.ReLU())
self.value = torch.nn.Linear(hidden_dim, 1)
self.advantage = torch.nn.Linear(hidden_dim, action_dim)
def forward(self, x):
#assert x.shape == self.input_shape, "Input shape should be:" + str(self.input_shape) + "Got:" + str(x.shape)
x = self.front(x)
x = x.view(-1, 64 * self.size * self.size)
value = self.value(self.value_stream_layer(x))
advantage = self.advantage(self.advantage_stream_layer(x))
action_value = value + (advantage - (1/self.action_dim) * advantage.sum() )
return action_value
def actor_process(rank, shared_state, shared_queue, max_frame = 1 ):
print('{} actor process start '.format(rank))
Q_main = DQN(feature_state, feature_action)
Q_main.load_state_dict(shared_state["Q_state"])
# env = gym.make("Breakout-v0")
env = gym.make('CartPole-v0')
policy_epsilon = 0.2*rank+0.05
action = 0
frame = 0
total_reward = []
obs =env.reset()
if IMG_GET_RENDER:
obs =env.render(mode='rgb_array')
ot= obs_preproc(obs)
while frame < max_frame:
for seq in range(sequences_length):
frame+=1
with torch.no_grad():
Qt = Q_main(ot)
#e greedy
if random.random() >= policy_epsilon:
action = torch.argmax(Qt,dim=1).item()
else:
action = random.randint(0,feature_action-1)
ot_1, rt,dt,_ = env.step(action)
if IMG_GET_RENDER:
obs =env.render(mode='rgb_array')
ot_1 = obs_preproc(obs)
gamma_t = 0 if dt else 0.99
shared_queue.put([ot,action,rt,gamma_t,ot_1])
total_reward.append(rt)
ot = ot_1
if dt == True:
obs =env.reset()
if IMG_GET_RENDER:
obs =env.render(mode='rgb_array')
ot = obs_preproc(obs)
if rank == 0:
print('#{} total reward: {}'.format(rank,sum(total_reward)))
total_reward = []
break
if frame % 100 == 0:
Q_main.load_state_dict(shared_state["Q_state"])
print('{} actor process done '.format(rank))
# state, action, reward,gamma ,next_state= map(np.stack,zip(*local_buf))
def learner_process(rank , shared_state, shared_queue, max_frame =1 ):
Q_main = DQN(feature_state, feature_action)
Q_target = DQN(feature_state, feature_action)
Q_main.load_state_dict(shared_state["Q_state"])
Q_target.load_state_dict(shared_state["Q_state"])
value_optimizer = optim.Adam(Q_main.parameters(), lr=0.00001)
global_buf = deque(maxlen = 10000)
frame = 0
i=0
while len(global_buf) <= 100:
global_buf.append(shared_queue.get())
while frame<max_frame:
for i in range(shared_queue.qsize()):
global_buf.append(shared_queue.get())
frame+=1
batch = random.sample(global_buf, batch_size)
state, action, reward, gamma, next_state = map(np.stack, zip(*batch))
st = torch.from_numpy(state).view(batch_size,feature_state[0],feature_state[1],feature_state[2]).float()
at = torch.from_numpy(action).view(batch_size).long()
rt = torch.from_numpy(reward).view(batch_size).float()
gamt = torch.from_numpy(gamma).view(batch_size).float()
st_1 = torch.from_numpy(next_state).view(batch_size,feature_state[0],feature_state[1],feature_state[2]).float()
with torch.no_grad():
exQ = rt + Q_main(st_1).gather(1,torch.argmax(Q_target(st_1),dim=1).view(batch_size,-1)).view(-1) * gamt
Qv = Q_main(st).gather(1,at.view(batch_size,-1)).view(-1)
value_optimizer.zero_grad()
loss = F.mse_loss (Qv, exQ)
# print(loss.item())
loss.backward()
value_optimizer.step()
if frame%100 == 0:
Q_target.load_state_dict(Q_main.state_dict())
shared_state["Q_state"] = Q_main.state_dict()
if __name__ == '__main__':
Q_main = DQN(feature_state, feature_action)
num_processes = 4
manager = mp.Manager()
shared_state = manager.dict()
shared_queue = manager.Queue()
shared_state["Q_state"] = Q_main.state_dict()
actor_process(0, shared_state, shared_queue,100)
learner_process(0, shared_state, shared_queue,2)
# learner_procs = mp.Process(target=learner_process, args=(999, shared_state, shared_queue,10000))
# learner_procs.start()
#
# actor_procs = []
# for i in range(num_processes):
# print(i)
# actor_proc = mp.Process(target=actor_process, args=(i, shared_state, shared_queue,10000))
# actor_proc.start()
# actor_procs.append(actor_proc)
# for act in actor_procs:
# act.join()
# learner_procs.join()
| [
"random.sample",
"torch.nn.functional.mse_loss",
"collections.deque",
"numpy.reshape",
"torch.nn.ReLU",
"random.randint",
"torch.argmax",
"torch.from_numpy",
"numpy.array",
"numpy.resize",
"torch.nn.Linear",
"torch.no_grad",
"random.random",
"gym.make",
"torch.multiprocessing.Manager"
] | [((779, 821), 'numpy.reshape', 'np.reshape', (['x', '(1, x.shape[1], x.shape[0])'], {}), '(x, (1, x.shape[1], x.shape[0]))\n', (789, 821), True, 'import numpy as np\n'), ((3066, 3089), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (3074, 3089), False, 'import gym\n'), ((5055, 5074), 'collections.deque', 'deque', ([], {'maxlen': '(10000)'}), '(maxlen=10000)\n', (5060, 5074), False, 'from collections import namedtuple, deque\n'), ((6604, 6616), 'torch.multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (6614, 6616), True, 'import torch.multiprocessing as mp\n'), ((2222, 2252), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (2237, 2252), False, 'import torch\n'), ((2278, 2317), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_dim', 'action_dim'], {}), '(hidden_dim, action_dim)\n', (2293, 2317), False, 'import torch\n'), ((5351, 5388), 'random.sample', 'random.sample', (['global_buf', 'batch_size'], {}), '(global_buf, batch_size)\n', (5364, 5388), False, 'import random\n'), ((6189, 6208), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Qv', 'exQ'], {}), '(Qv, exQ)\n', (6199, 6208), True, 'import torch.nn.functional as F\n'), ((735, 768), 'numpy.array', 'np.array', (['[[0.299, 0.587, 0.114]]'], {}), '([[0.299, 0.587, 0.114]])\n', (743, 768), True, 'import numpy as np\n'), ((1603, 1649), 'torch.nn.Linear', 'torch.nn.Linear', (['(4)', '(64 * self.size * self.size)'], {}), '(4, 64 * self.size * self.size)\n', (1618, 1649), False, 'import torch\n'), ((1702, 1717), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1715, 1717), False, 'import torch\n'), ((1890, 1945), 'torch.nn.Linear', 'torch.nn.Linear', (['(64 * self.size * self.size)', 'hidden_dim'], {}), '(64 * self.size * self.size, hidden_dim)\n', (1905, 1945), False, 'import torch\n'), ((1998, 2013), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2011, 2013), False, 'import torch\n'), ((2073, 2128), 'torch.nn.Linear', 'torch.nn.Linear', (['(64 * self.size * self.size)', 'hidden_dim'], {}), '(64 * self.size * self.size, hidden_dim)\n', (2088, 2128), False, 'import torch\n'), ((2184, 2199), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2197, 2199), False, 'import torch\n'), ((5911, 5926), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5924, 5926), False, 'import torch\n'), ((3422, 3437), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3435, 3437), False, 'import torch\n'), ((3516, 3531), 'random.random', 'random.random', ([], {}), '()\n', (3529, 3531), False, 'import random\n'), ((3662, 3699), 'random.randint', 'random.randint', (['(0)', '(feature_action - 1)'], {}), '(0, feature_action - 1)\n', (3676, 3699), False, 'import random\n'), ((5480, 5503), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (5496, 5503), False, 'import torch\n'), ((5593, 5617), 'torch.from_numpy', 'torch.from_numpy', (['action'], {}), '(action)\n', (5609, 5617), False, 'import torch\n'), ((5655, 5679), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (5671, 5679), False, 'import torch\n'), ((5720, 5743), 'torch.from_numpy', 'torch.from_numpy', (['gamma'], {}), '(gamma)\n', (5736, 5743), False, 'import torch\n'), ((5784, 5812), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (5800, 5812), False, 'import torch\n'), ((651, 678), 'numpy.resize', 'np.resize', (['x', 'feature_state'], {}), '(x, feature_state)\n', (660, 678), True, 'import numpy as np\n'), ((847, 874), 'numpy.resize', 'np.resize', (['x', 'feature_state'], {}), '(x, feature_state)\n', (856, 874), True, 'import numpy as np\n'), ((3581, 3604), 'torch.argmax', 'torch.argmax', (['Qt'], {'dim': '(1)'}), '(Qt, dim=1)\n', (3593, 3604), False, 'import torch\n')] |
"""
Credits:
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import numpy as np
import pytest
from eolearn.core import FeatureType
from eolearn.geometry import SuperpixelSegmentationTask, FelzenszwalbSegmentationTask, SlicSegmentationTask
SUPERPIXEL_FEATURE = FeatureType.MASK_TIMELESS, "SP_FEATURE"
@pytest.mark.parametrize(
"task, expected_min, expected_max, expected_mean, expected_median",
(
[
SuperpixelSegmentationTask(
(FeatureType.DATA, "BANDS-S2-L1C"), SUPERPIXEL_FEATURE, scale=100, sigma=0.5, min_size=100
),
0,
25,
10.6809,
11,
],
[
FelzenszwalbSegmentationTask(
(FeatureType.DATA_TIMELESS, "MAX_NDVI"), SUPERPIXEL_FEATURE, scale=21, sigma=1.0, min_size=52
),
0,
22,
8.5302,
7,
],
[
FelzenszwalbSegmentationTask((FeatureType.MASK, "CLM"), SUPERPIXEL_FEATURE, scale=1, sigma=0, min_size=15),
0,
171,
86.46267,
90,
],
[
SlicSegmentationTask(
(FeatureType.DATA, "CLP"),
SUPERPIXEL_FEATURE,
n_segments=55,
compactness=25.0,
max_num_iter=20,
sigma=0.8,
),
0,
48,
24.6072,
25,
],
[
SlicSegmentationTask(
(FeatureType.MASK_TIMELESS, "RANDOM_UINT8"),
SUPERPIXEL_FEATURE,
n_segments=231,
compactness=15.0,
max_num_iter=7,
sigma=0.2,
),
0,
195,
100.1844,
101,
],
),
)
def test_superpixel(test_eopatch, task, expected_min, expected_max, expected_mean, expected_median):
task.execute(test_eopatch)
result = test_eopatch[SUPERPIXEL_FEATURE]
assert result.dtype == np.int64, "Expected int64 dtype for result"
delta = 1e-3
assert np.amin(result) == pytest.approx(expected_min, delta), "Minimum values do not match."
assert np.amax(result) == pytest.approx(expected_max, delta), "Maxmum values do not match."
assert np.mean(result) == pytest.approx(expected_mean, delta), "Mean values do not match."
assert np.median(result) == pytest.approx(expected_median, delta), "Median values do not match."
| [
"pytest.approx",
"numpy.mean",
"numpy.median",
"numpy.amin",
"eolearn.geometry.SuperpixelSegmentationTask",
"eolearn.geometry.FelzenszwalbSegmentationTask",
"numpy.amax",
"eolearn.geometry.SlicSegmentationTask"
] | [((2346, 2361), 'numpy.amin', 'np.amin', (['result'], {}), '(result)\n', (2353, 2361), True, 'import numpy as np\n'), ((2365, 2399), 'pytest.approx', 'pytest.approx', (['expected_min', 'delta'], {}), '(expected_min, delta)\n', (2378, 2399), False, 'import pytest\n'), ((2443, 2458), 'numpy.amax', 'np.amax', (['result'], {}), '(result)\n', (2450, 2458), True, 'import numpy as np\n'), ((2462, 2496), 'pytest.approx', 'pytest.approx', (['expected_max', 'delta'], {}), '(expected_max, delta)\n', (2475, 2496), False, 'import pytest\n'), ((2539, 2554), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (2546, 2554), True, 'import numpy as np\n'), ((2558, 2593), 'pytest.approx', 'pytest.approx', (['expected_mean', 'delta'], {}), '(expected_mean, delta)\n', (2571, 2593), False, 'import pytest\n'), ((2634, 2651), 'numpy.median', 'np.median', (['result'], {}), '(result)\n', (2643, 2651), True, 'import numpy as np\n'), ((2655, 2692), 'pytest.approx', 'pytest.approx', (['expected_median', 'delta'], {}), '(expected_median, delta)\n', (2668, 2692), False, 'import pytest\n'), ((658, 780), 'eolearn.geometry.SuperpixelSegmentationTask', 'SuperpixelSegmentationTask', (["(FeatureType.DATA, 'BANDS-S2-L1C')", 'SUPERPIXEL_FEATURE'], {'scale': '(100)', 'sigma': '(0.5)', 'min_size': '(100)'}), "((FeatureType.DATA, 'BANDS-S2-L1C'),\n SUPERPIXEL_FEATURE, scale=100, sigma=0.5, min_size=100)\n", (684, 780), False, 'from eolearn.geometry import SuperpixelSegmentationTask, FelzenszwalbSegmentationTask, SlicSegmentationTask\n'), ((909, 1036), 'eolearn.geometry.FelzenszwalbSegmentationTask', 'FelzenszwalbSegmentationTask', (["(FeatureType.DATA_TIMELESS, 'MAX_NDVI')", 'SUPERPIXEL_FEATURE'], {'scale': '(21)', 'sigma': '(1.0)', 'min_size': '(52)'}), "((FeatureType.DATA_TIMELESS, 'MAX_NDVI'),\n SUPERPIXEL_FEATURE, scale=21, sigma=1.0, min_size=52)\n", (937, 1036), False, 'from eolearn.geometry import SuperpixelSegmentationTask, FelzenszwalbSegmentationTask, SlicSegmentationTask\n'), ((1163, 1273), 'eolearn.geometry.FelzenszwalbSegmentationTask', 'FelzenszwalbSegmentationTask', (["(FeatureType.MASK, 'CLM')", 'SUPERPIXEL_FEATURE'], {'scale': '(1)', 'sigma': '(0)', 'min_size': '(15)'}), "((FeatureType.MASK, 'CLM'), SUPERPIXEL_FEATURE,\n scale=1, sigma=0, min_size=15)\n", (1191, 1273), False, 'from eolearn.geometry import SuperpixelSegmentationTask, FelzenszwalbSegmentationTask, SlicSegmentationTask\n'), ((1374, 1506), 'eolearn.geometry.SlicSegmentationTask', 'SlicSegmentationTask', (["(FeatureType.DATA, 'CLP')", 'SUPERPIXEL_FEATURE'], {'n_segments': '(55)', 'compactness': '(25.0)', 'max_num_iter': '(20)', 'sigma': '(0.8)'}), "((FeatureType.DATA, 'CLP'), SUPERPIXEL_FEATURE,\n n_segments=55, compactness=25.0, max_num_iter=20, sigma=0.8)\n", (1394, 1506), False, 'from eolearn.geometry import SuperpixelSegmentationTask, FelzenszwalbSegmentationTask, SlicSegmentationTask\n'), ((1716, 1870), 'eolearn.geometry.SlicSegmentationTask', 'SlicSegmentationTask', (["(FeatureType.MASK_TIMELESS, 'RANDOM_UINT8')", 'SUPERPIXEL_FEATURE'], {'n_segments': '(231)', 'compactness': '(15.0)', 'max_num_iter': '(7)', 'sigma': '(0.2)'}), "((FeatureType.MASK_TIMELESS, 'RANDOM_UINT8'),\n SUPERPIXEL_FEATURE, n_segments=231, compactness=15.0, max_num_iter=7,\n sigma=0.2)\n", (1736, 1870), False, 'from eolearn.geometry import SuperpixelSegmentationTask, FelzenszwalbSegmentationTask, SlicSegmentationTask\n')] |
import numpy as np
from mpi4py import MPI
from tqdm import tqdm
from ..prob_calculators import get_p_cos1_given_xeff_q_a1, get_p_a1_given_xeff_q
comm = MPI.COMM_WORLD
pe = comm.Get_rank() # identity of this process (process element, sometimes called rank)
nprocs = comm.Get_size() # number of processes
root = nprocs - 1 # special process responsible for administrative work
def mpi_p_cos1_given_a1_calc(cos1s, a1s, xeff, q, mcmc_n):
data = dict(a1=np.array([]), cos1=np.array([]), p_cos1=np.array([]))
for a1 in tqdm(a1s, desc=f"Building p_cos1 cache"):
p_cos1_for_a1 = mpi_calc_p_cos1(cos1s, a1, xeff, q, mcmc_n)
data['a1'] = np.append(data['a1'], np.array([a1 for _ in cos1s]))
data['cos1'] = np.append(data['cos1'], cos1s)
data['p_cos1'] = np.append(data['p_cos1'], p_cos1_for_a1)
return data
def mpi_calc(func, x, *args):
x = np.array(x)
# total number of (work) elements
n_global = len(x)
# get the list of indices local to this process
local_inds = np.array_split(np.arange(0, n_global), nprocs)[pe]
# allocate and set local input values
local_x = x[local_inds]
local_res = [func(xi, *args) for xi in local_x]
# gather all local arrays on process root, will
# return a list of numpy arrays
res_list = comm.gather(local_res, root=root)
if pe == root:
# turn the list of arrays into a single array
res = np.concatenate(res_list)
return res
def mpi_calc_p_cos1(cos1s, a1, xeff, q, mcmc_n):
return mpi_calc(get_p_cos1_given_xeff_q_a1, cos1s, a1, xeff, q, mcmc_n)
def mpi_calc_p_a1(a1s, xeff, q, mcmc_n):
return mpi_calc(get_p_a1_given_xeff_q, a1s, xeff, q, mcmc_n)
| [
"tqdm.tqdm",
"numpy.append",
"numpy.array",
"numpy.concatenate",
"numpy.arange"
] | [((527, 567), 'tqdm.tqdm', 'tqdm', (['a1s'], {'desc': 'f"""Building p_cos1 cache"""'}), "(a1s, desc=f'Building p_cos1 cache')\n", (531, 567), False, 'from tqdm import tqdm\n'), ((887, 898), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (895, 898), True, 'import numpy as np\n'), ((734, 764), 'numpy.append', 'np.append', (["data['cos1']", 'cos1s'], {}), "(data['cos1'], cos1s)\n", (743, 764), True, 'import numpy as np\n'), ((790, 830), 'numpy.append', 'np.append', (["data['p_cos1']", 'p_cos1_for_a1'], {}), "(data['p_cos1'], p_cos1_for_a1)\n", (799, 830), True, 'import numpy as np\n'), ((1431, 1455), 'numpy.concatenate', 'np.concatenate', (['res_list'], {}), '(res_list)\n', (1445, 1455), True, 'import numpy as np\n'), ((459, 471), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (467, 471), True, 'import numpy as np\n'), ((478, 490), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (486, 490), True, 'import numpy as np\n'), ((499, 511), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (507, 511), True, 'import numpy as np\n'), ((680, 709), 'numpy.array', 'np.array', (['[a1 for _ in cos1s]'], {}), '([a1 for _ in cos1s])\n', (688, 709), True, 'import numpy as np\n'), ((1045, 1067), 'numpy.arange', 'np.arange', (['(0)', 'n_global'], {}), '(0, n_global)\n', (1054, 1067), True, 'import numpy as np\n')] |
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
A plugin to graph the pixel values along a straight line bisecting a cube.
**Plugin Type: Local**
``LineProfile`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
.. warning::
There are no restrictions to what axes can be chosen.
As such, the plot can be meaningless.
The ``LineProfile`` plugin is used for multidimensional (i.e., 3D or higher)
images. It plots the values of the pixels at the current cursor
position through the selected axis; or if a region is selected, it plots the
mean in each frame. This can be used to create normal spectral line profiles.
A marker is placed at the data point of the currently displayed frame.
Displayed X-axis is constructed using ``CRVAL*``, ``CDELT*``, ``CRPIX*``,
``CTYPE*``, and ``CUNIT*`` keywords from FITS header. If any of the keywords
are unavailabled, the axis falls back to ``NAXIS*`` values instead.
Displayed Y-axis is constructed using ``BTYPE`` and ``BUNIT``. If they are not
available, it simply labels pixel values as "Signal".
To use this plugin:
1. Select an axis.
2. Pick a point or draw a region using the cursor.
3. Use ``MultiDim`` to change step values of axes, if applicable.
"""
import numpy as np
from ginga import GingaPlugin
from ginga.gw import Widgets
try:
from ginga.gw import Plot
from ginga.util import plots
have_mpl = True
except ImportError:
have_mpl = False
__all__ = ['LineProfile']
class LineProfile(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
super(LineProfile, self).__init__(fv, fitsimage)
self.image = None
self.layertag = 'lineprofile-canvas'
self.selected_axis = None
self.hbox_axes = None
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_LineProfile')
self.settings.add_defaults(mark_type='point', mark_radius=10,
mark_style='cross', mark_color='cyan')
self.settings.load(onError='silent')
# For "marks" feature
self._new_mark = 'New'
self.mark_types = ['point', 'circle', 'ellipse', 'box', 'rectangle',
'polygon']
self.mark_type = self.settings.get('mark_type', 'point')
self.mark_radius = self.settings.get('mark_radius', 10) # point
self.mark_style = self.settings.get('mark_style', 'cross') # point
self.mark_color = self.settings.get('mark_color', 'cyan')
self.marks = [self._new_mark]
self.mark_selected = self._new_mark
self.mark_index = 0
self.y_lbl = ''
self.x_lbl = ''
self._split_sizes = [400, 500]
self.dc = self.fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype(self.mark_type, color=self.mark_color,
linestyle='dash')
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.add_draw_mode('move', down=self.buttondown_cb,
move=self.motion_cb, up=self.buttonup_cb)
canvas.set_draw_mode('draw')
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.set_surface(self.fitsimage)
self.canvas = canvas
self.gui_up = False
def build_gui(self, container):
if not have_mpl:
raise ImportError('Install matplotlib to use this plugin')
top = Widgets.VBox()
top.set_border_width(4)
box, sw, orientation = Widgets.get_oriented_box(container)
box.set_border_width(4)
box.set_spacing(2)
paned = Widgets.Splitter(orientation=orientation)
self.w.splitter = paned
self.plot = plots.Plot(logger=self.logger,
width=400, height=400)
ax = self.plot.add_axis()
ax.grid(True)
self._ax2 = self.plot.ax.twiny()
w = Plot.PlotWidget(self.plot)
w.resize(400, 400)
paned.add_widget(Widgets.hadjust(w, orientation))
captions = (('Plot All', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.plot_all.set_state(False)
b.plot_all.add_callback('activated', lambda *args: self.redraw_mark())
b.plot_all.set_tooltip("Plot all marks")
box.add_widget(w, stretch=0)
fr = Widgets.Frame("Axes controls")
self.hbox_axes = Widgets.HBox()
self.hbox_axes.set_border_width(4)
self.hbox_axes.set_spacing(1)
fr.set_widget(self.hbox_axes)
box.add_widget(fr, stretch=0)
captions = (('marks', 'combobox',
'New Mark Type:', 'label', 'Mark Type', 'combobox'),
('Pan to mark', 'button'),
('Delete', 'button', 'Delete All', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
# control for selecting a mark
cbox2 = b.marks
for tag in self.marks:
cbox2.append_text(tag)
cbox2.show_text(self.mark_selected)
cbox2.add_callback('activated', self.mark_select_cb)
self.w.marks = cbox2
cbox2.set_tooltip("Select a mark")
# control for selecting mark type
cbox2 = b.mark_type
for tag in self.mark_types:
cbox2.append_text(tag)
self.w.marks_type = cbox2
cbox2.set_index(self.mark_types.index(self.mark_type))
cbox2.add_callback('activated', self.set_marksdrawtype_cb)
cbox2.set_tooltip("Choose the mark type to draw")
b.pan_to_mark.add_callback('activated', self.pan2mark_cb)
b.pan_to_mark.set_tooltip("Pan follows selected mark")
b.delete.add_callback('activated', self.clear_mark_cb)
b.delete.set_tooltip("Delete selected mark")
b.delete_all.add_callback('activated', self.clear_all_cb)
b.delete_all.set_tooltip("Clear all marks")
vbox2 = Widgets.VBox()
vbox2.add_widget(w, stretch=0)
mode = self.canvas.get_draw_mode()
captions = (('Move', 'radiobutton', 'Draw', 'radiobutton',
'Edit', 'radiobutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.move.set_state(mode == 'move')
b.move.add_callback(
'activated', lambda w, val: self.set_mode_cb('move', val))
b.move.set_tooltip("Choose this to position marks")
self.w.btn_move = b.move
b.draw.set_state(mode == 'draw')
b.draw.add_callback(
'activated', lambda w, val: self.set_mode_cb('draw', val))
b.draw.set_tooltip("Choose this to draw a new mark")
self.w.btn_draw = b.draw
b.edit.set_state(mode == 'edit')
b.edit.add_callback(
'activated', lambda w, val: self.set_mode_cb('edit', val))
b.edit.set_tooltip("Choose this to edit a mark")
self.w.btn_edit = b.edit
vbox2.add_widget(w, stretch=0)
fr = Widgets.Frame("Mark controls")
fr.set_widget(vbox2)
box.add_widget(fr, stretch=0)
box.add_widget(Widgets.Label(''), stretch=1)
paned.add_widget(sw)
paned.set_sizes(self._split_sizes)
top.add_widget(paned, stretch=5)
# A button box that is always visible at the bottom
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
self.gui_up = True
self.select_mark(self._new_mark)
self.build_axes()
def build_axes(self):
self.selected_axis = None
if (not self.gui_up) or (self.hbox_axes is None):
return
self.hbox_axes.remove_all()
self.clear_plot()
image = self.fitsimage.get_image()
if image is not None:
# Add Checkbox widgets
# `image.naxispath` returns only mdim axes
nx = len(image.naxispath)
maxi = nx + 2
for i in range(1, maxi + 1):
chkbox = Widgets.CheckBox('NAXIS{}'.format(i))
self.hbox_axes.add_widget(chkbox)
# Disable axes for 2D images
if nx <= 0:
self.selected_axis = None
chkbox.set_enabled(False)
continue
# Add callback
self.axes_callback_handler(chkbox, i)
# Add filler
self.hbox_axes.add_widget(Widgets.Label(''), stretch=1)
else:
self.hbox_axes.add_widget(Widgets.Label('No NAXIS info'))
def axes_callback_handler(self, chkbox, pos):
chkbox.add_callback('activated',
lambda w, tf: self.axis_toggle_cb(w, tf, pos))
def axis_toggle_cb(self, w, tf, pos):
children = self.hbox_axes.get_children()
# Deactivate previously selected axis
if self.selected_axis is not None:
children[self.selected_axis - 1].set_state(False)
# Check if the old axis has been clicked
if pos == self.selected_axis:
self.selected_axis = None
self.clear_plot()
else:
self.selected_axis = pos
children[pos - 1].set_state(tf)
if self.gui_up:
self.redraw_mark()
def redo(self):
# Get image being shown
image = self.fitsimage.get_image()
if image is None:
return
if self.image != image:
self.image = image
self.build_axes()
self.redraw_mark()
def _plot(self, tags):
self.clear_plot()
if self.selected_axis is None:
return
mddata = self.image.get_mddata() # ..., z2, z1, y, x
naxes = mddata.ndim
i_sel = abs(self.selected_axis - naxes)
i_x = naxes - 1
i_y = naxes - 2
# Also sets axis labels
plot_x_axis_data = self.get_axis(self.selected_axis)
# Image may lack the required keywords, or some trouble
# building the axis.
if plot_x_axis_data is None:
return
is_surface_cut = i_sel in (i_x, i_y)
plotted_first = False
for tag in tags:
if tag == self._new_mark:
continue
obj = self.canvas.get_object_by_tag(tag)
if hasattr(obj, 'objects'):
obj = obj.objects[0]
axes_slice = self.image.revnaxis + [0, 0]
# Cutting through surface ignores drawn shape but uses its center.
# A line through higher dim uses the same algorithm.
if is_surface_cut or obj.kind == 'point':
xcen, ycen = obj.get_center_pt()
# Build N-dim slice
axes_slice[i_x] = int(round(xcen))
axes_slice[i_y] = int(round(ycen))
axes_slice[i_sel] = slice(None, None, None)
try:
plot_y_axis_data = mddata[tuple(axes_slice)]
except IndexError:
continue
# TODO: Add more stats choices? Only calc mean for now.
# Do some stats of data in selected region.
else:
# Collapse to 3D cube
if naxes > 3:
for j in (i_x, i_y, i_sel):
axes_slice[j] = slice(None, None, None)
data = mddata[tuple(axes_slice)] # z, y, x
else:
data = mddata
# Mask is 2D only (True = enclosed)
mask = self.image.get_shape_mask(obj)
try:
plot_y_axis_data = [data[i][mask].mean()
for i in range(data.shape[0])]
except IndexError:
continue
# If few enough data points, add marker
if len(plot_y_axis_data) <= 10:
marker = 'x'
else:
marker = None
if not plotted_first:
lines = self.plot.plot(
plot_x_axis_data, plot_y_axis_data, marker=marker,
label=tag, xtitle=self.x_lbl, ytitle=self.y_lbl)
plotted_first = True
else: # Overplot
lines = self.plot.ax.plot(
plot_x_axis_data, plot_y_axis_data, marker=marker,
label=tag)
# Highlight data point from active slice.
if not is_surface_cut:
i = self.image.revnaxis[i_sel]
self.plot.ax.plot(
plot_x_axis_data[i], plot_y_axis_data[i], marker='o',
ls='', color=lines[0].get_color())
if not plotted_first: # Nothing was plotted
return
# https://github.com/matplotlib/matplotlib/issues/3633/
ax2 = self._ax2
ax2.patch.set_visible(False)
# Top axis to show pixel location across X
ax2.cla()
xx1, xx2 = self.plot.ax.get_xlim()
ax2.set_xlim((xx1 - self._crval) / self._cdelt + self._crpix - 1,
(xx2 - self._crval) / self._cdelt + self._crpix - 1)
ax2.set_xlabel('Index')
self.plot.ax.legend(loc='best')
self.plot.draw()
def get_axis(self, i):
try:
naxis_s = 'NAXIS{}'.format(i)
naxis_i = self.image.get_keyword(naxis_s)
self.x_lbl = self.image.get_keyword('CTYPE{}'.format(i), naxis_s)
try:
kwds = ['CRVAL{}'.format(i), 'CDELT{}'.format(i),
'CRPIX{}'.format(i)]
crval_i, cdelt_i, crpix_i = self.image.get_keywords_list(*kwds)
except KeyError as e:
self.logger.error("Missing FITS keyword: {}".format(str(e)))
crval_i = 0
cdelt_i = 1
crpix_i = 1
n = np.arange(naxis_i) - (crpix_i - 1)
axis = crval_i + n * cdelt_i
self._crval = crval_i
self._cdelt = cdelt_i
self._crpix = crpix_i
units = self.image.get_keyword('CUNIT{}'.format(i), None)
if units is not None:
self.x_lbl += (' ({})'.format(units))
# Get pixel value info from header
self.y_lbl = self.image.get_keyword('BTYPE', 'Signal')
bunit = self.image.get_keyword('BUNIT', None)
if bunit is not None:
self.y_lbl += (' ({})'.format(bunit))
except Exception as e:
errmsg = "Error loading axis {}: {}".format(i, str(e))
self.logger.error(errmsg)
self.fv.show_error(errmsg)
else:
return axis
def clear_plot(self):
self.plot.clear()
self.plot.fig.canvas.draw()
# MARK FEATURE LOGIC #
def buttondown_cb(self, canvas, event, data_x, data_y, viewer):
return self.motion_cb(canvas, event, data_x, data_y, viewer)
def motion_cb(self, canvas, event, data_x, data_y, viewer):
if self.mark_selected == self._new_mark:
return True
obj = self.canvas.get_object_by_tag(self.mark_selected)
# Assume first element of this compound object is the reference obj
obj = obj.objects[0]
obj.move_to(data_x, data_y)
canvas.redraw(whence=3)
# self.redraw_mark() # Uncomment if you want drag_update
return True
def buttonup_cb(self, canvas, event, data_x, data_y, viewer):
if self.mark_selected == self._new_mark:
return True
obj = self.canvas.get_object_by_tag(self.mark_selected)
# Assume first element of this compound object is the reference obj
obj = obj.objects[0]
obj.move_to(data_x, data_y)
self.redraw_mark()
return True
def add_mark(self, obj):
self.logger.debug("Setting mark of type {}".format(obj.kind))
# Adding a new mark, so use a new tag.
if self.mark_selected == self._new_mark:
draw_new = True
self.mark_index += 1
idx = self.mark_index
# Replace existing mark (to support old-style drawing).
else:
draw_new = False
try:
idx = int(self.mark_selected.replace('mark', ''))
except ValueError as e:
self.logger.error(str(e))
return
obj.color = self.mark_color
obj.linestyle = 'solid'
if obj.kind == 'point':
obj.radius = self.mark_radius
obj.style = self.mark_style
args = [obj]
text_obj = self.dc.Text(4, 4, '{}'.format(idx), color=self.mark_color,
coord='offset', ref_obj=obj)
args.append(text_obj)
cobj = self.dc.CompoundObject(*args)
cobj.set_data(count=idx)
tag = 'mark{}'.format(idx)
self.canvas.delete_object_by_tag(tag)
self.canvas.add(cobj, tag=tag)
if draw_new:
self.marks.append(tag)
self.w.marks.append_text(tag)
self.select_mark(tag)
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
canvas.delete_object_by_tag(tag)
if obj.kind not in self.mark_types:
return True
# Disable plotting for 2D images
image = self.fitsimage.get_image()
if image is None or len(image.naxispath) < 1:
return
self.add_mark(obj)
def edit_cb(self, canvas, obj):
self.redraw_mark()
return True
def edit_select_marks(self):
if self.mark_selected != self._new_mark:
obj = self.canvas.get_object_by_tag(self.mark_selected)
# drill down to reference shape
if hasattr(obj, 'objects'):
obj = obj.objects[0]
self.canvas.edit_select(obj)
else:
self.canvas.clear_selected()
self.canvas.update_canvas()
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_marks()
return True
def set_mode(self, mode):
self.canvas.set_draw_mode(mode)
self.w.btn_move.set_state(mode == 'move')
self.w.btn_draw.set_state(mode == 'draw')
self.w.btn_edit.set_state(mode == 'edit')
def select_mark(self, tag):
try:
obj = self.canvas.get_object_by_tag(self.mark_selected)
except Exception: # old object may have been deleted
pass
else:
# drill down to reference shape
if hasattr(obj, 'objects'):
obj = obj.objects[0]
self.mark_selected = tag
self.w.marks.show_text(tag)
none_left = len(self.marks) < 2
if (tag == self._new_mark) or none_left:
if none_left:
self.w.delete_all.set_enabled(False)
self.w.delete.set_enabled(False)
self.w.pan_to_mark.set_enabled(False)
self.w.btn_move.set_enabled(False)
self.w.btn_draw.set_enabled(True)
self.w.btn_edit.set_enabled(False)
self.set_mode('draw')
else:
self.w.delete_all.set_enabled(True)
self.w.delete.set_enabled(True)
self.w.pan_to_mark.set_enabled(True)
self.w.btn_move.set_enabled(True)
self.w.btn_draw.set_enabled(False)
self.w.btn_edit.set_enabled(True)
if self.w.btn_edit.get_state():
self.edit_select_marks()
mode = self.canvas.get_draw_mode()
if mode == 'draw':
self.set_mode('move')
self.redraw_mark()
def redraw_mark(self):
plot_all = self.w.plot_all.get_state()
if plot_all:
self._plot([tag for tag in self.marks if tag != self._new_mark])
elif self.mark_selected != self._new_mark:
self._plot([self.mark_selected])
else:
self.clear_plot()
def mark_select_cb(self, w, index):
tag = self.marks[index]
self.select_mark(tag)
def set_marksdrawtype_cb(self, w, index):
self.mark_type = self.mark_types[index]
self.canvas.set_drawtype(self.mark_type, color=self.mark_color,
linestyle='dash')
def pan2mark_cb(self, w):
if self.mark_selected == self._new_mark:
return
obj = self.canvas.get_object_by_tag(self.mark_selected)
# drill down to reference shape
if hasattr(obj, 'objects'):
obj = obj.objects[0]
if obj.kind not in self.mark_types:
return
x, y = obj.get_center_pt()
self.fitsimage.panset_xy(x, y)
self.canvas.redraw(whence=3)
def clear_mark_cb(self, w):
tag = self.mark_selected
if tag == self._new_mark:
return
self.canvas.delete_object_by_tag(tag)
self.w.marks.delete_alpha(tag)
self.marks.remove(tag)
idx = len(self.marks) - 1
tag = self.marks[idx]
self.select_mark(tag)
# plot cleared in redraw_mark() if no more cuts
self.redraw_mark()
def clear_all_cb(self, w):
self.canvas.delete_all_objects()
self.w.marks.clear()
self.marks = [self._new_mark]
self.mark_selected = self._new_mark
self.w.marks.append_text(self._new_mark)
self.select_mark(self._new_mark)
# plot cleared in redraw_mark() if no more cuts
self.redraw_mark()
# GENERAL PLUGIN MANAGEMENT #
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
# insert layer if it is not already
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add canvas layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
self.canvas.ui_set_active(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_set_active(True)
self.fv.show_status("Mark a point or region and choose axis")
self.redo()
def stop(self):
self.gui_up = False
self._split_sizes = self.w.splitter.get_sizes()
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except Exception:
pass
# Don't hang on to current image
self.image = None
self.fv.show_status("")
def __str__(self):
return 'lineprofile'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_LineProfile', package='ginga')
# END
| [
"ginga.gw.Widgets.build_info",
"ginga.gw.Widgets.Button",
"ginga.gw.Widgets.hadjust",
"ginga.gw.Widgets.Splitter",
"ginga.gw.Widgets.HBox",
"ginga.gw.Widgets.get_oriented_box",
"ginga.util.toolbox.generate_cfg_example",
"ginga.util.plots.Plot",
"ginga.gw.Widgets.Label",
"ginga.gw.Plot.PlotWidget",... | [((23868, 23927), 'ginga.util.toolbox.generate_cfg_example', 'generate_cfg_example', (['"""plugin_LineProfile"""'], {'package': '"""ginga"""'}), "('plugin_LineProfile', package='ginga')\n", (23888, 23927), False, 'from ginga.util.toolbox import generate_cfg_example\n'), ((3671, 3685), 'ginga.gw.Widgets.VBox', 'Widgets.VBox', ([], {}), '()\n', (3683, 3685), False, 'from ginga.gw import Widgets\n'), ((3750, 3785), 'ginga.gw.Widgets.get_oriented_box', 'Widgets.get_oriented_box', (['container'], {}), '(container)\n', (3774, 3785), False, 'from ginga.gw import Widgets\n'), ((3862, 3903), 'ginga.gw.Widgets.Splitter', 'Widgets.Splitter', ([], {'orientation': 'orientation'}), '(orientation=orientation)\n', (3878, 3903), False, 'from ginga.gw import Widgets\n'), ((3957, 4010), 'ginga.util.plots.Plot', 'plots.Plot', ([], {'logger': 'self.logger', 'width': '(400)', 'height': '(400)'}), '(logger=self.logger, width=400, height=400)\n', (3967, 4010), False, 'from ginga.util import plots\n'), ((4152, 4178), 'ginga.gw.Plot.PlotWidget', 'Plot.PlotWidget', (['self.plot'], {}), '(self.plot)\n', (4167, 4178), False, 'from ginga.gw import Plot\n'), ((4331, 4384), 'ginga.gw.Widgets.build_info', 'Widgets.build_info', (['captions'], {'orientation': 'orientation'}), '(captions, orientation=orientation)\n', (4349, 4384), False, 'from ginga.gw import Widgets\n'), ((4627, 4657), 'ginga.gw.Widgets.Frame', 'Widgets.Frame', (['"""Axes controls"""'], {}), "('Axes controls')\n", (4640, 4657), False, 'from ginga.gw import Widgets\n'), ((4683, 4697), 'ginga.gw.Widgets.HBox', 'Widgets.HBox', ([], {}), '()\n', (4695, 4697), False, 'from ginga.gw import Widgets\n'), ((5101, 5154), 'ginga.gw.Widgets.build_info', 'Widgets.build_info', (['captions'], {'orientation': 'orientation'}), '(captions, orientation=orientation)\n', (5119, 5154), False, 'from ginga.gw import Widgets\n'), ((6234, 6248), 'ginga.gw.Widgets.VBox', 'Widgets.VBox', ([], {}), '()\n', (6246, 6248), False, 'from ginga.gw import Widgets\n'), ((6461, 6514), 'ginga.gw.Widgets.build_info', 'Widgets.build_info', (['captions'], {'orientation': 'orientation'}), '(captions, orientation=orientation)\n', (6479, 6514), False, 'from ginga.gw import Widgets\n'), ((7297, 7327), 'ginga.gw.Widgets.Frame', 'Widgets.Frame', (['"""Mark controls"""'], {}), "('Mark controls')\n", (7310, 7327), False, 'from ginga.gw import Widgets\n'), ((7639, 7653), 'ginga.gw.Widgets.HBox', 'Widgets.HBox', ([], {}), '()\n', (7651, 7653), False, 'from ginga.gw import Widgets\n'), ((7791, 7814), 'ginga.gw.Widgets.Button', 'Widgets.Button', (['"""Close"""'], {}), "('Close')\n", (7805, 7814), False, 'from ginga.gw import Widgets\n'), ((7931, 7953), 'ginga.gw.Widgets.Button', 'Widgets.Button', (['"""Help"""'], {}), "('Help')\n", (7945, 7953), False, 'from ginga.gw import Widgets\n'), ((4231, 4262), 'ginga.gw.Widgets.hadjust', 'Widgets.hadjust', (['w', 'orientation'], {}), '(w, orientation)\n', (4246, 4262), False, 'from ginga.gw import Widgets\n'), ((7419, 7436), 'ginga.gw.Widgets.Label', 'Widgets.Label', (['""""""'], {}), "('')\n", (7432, 7436), False, 'from ginga.gw import Widgets\n'), ((8079, 8096), 'ginga.gw.Widgets.Label', 'Widgets.Label', (['""""""'], {}), "('')\n", (8092, 8096), False, 'from ginga.gw import Widgets\n'), ((9257, 9274), 'ginga.gw.Widgets.Label', 'Widgets.Label', (['""""""'], {}), "('')\n", (9270, 9274), False, 'from ginga.gw import Widgets\n'), ((9339, 9369), 'ginga.gw.Widgets.Label', 'Widgets.Label', (['"""No NAXIS info"""'], {}), "('No NAXIS info')\n", (9352, 9369), False, 'from ginga.gw import Widgets\n'), ((14712, 14730), 'numpy.arange', 'np.arange', (['naxis_i'], {}), '(naxis_i)\n', (14721, 14730), True, 'import numpy as np\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._iupac_sequence import IUPACSequence, _motifs as parent_motifs
class Protein(IUPACSequence):
"""Store protein sequence data and optional associated metadata.
Only characters in the IUPAC protein character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the protein sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC protein character set. If ``False``,
validation will not be performed. Turning off validation will improve
runtime performance. If invalid characters are present, however, there
is **no guarantee that operations performed on the resulting object
will work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC Protein characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
stop_chars
nondegenerate_chars
degenerate_chars
degenerate_map
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import Protein
>>> Protein('PAW')
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 PAW
Convert lowercase characters to uppercase:
>>> Protein('paW', lowercase=True)
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 PAW
"""
__stop_codes = None
@classproperty
def _stop_codes(cls):
if cls.__stop_codes is None:
stops = cls.stop_chars
cls.__stop_codes = np.asarray([ord(s) for s in stops])
return cls.__stop_codes
@classproperty
@stable(as_of="0.4.0")
@overrides(IUPACSequence)
def alphabet(cls):
return super(Protein, cls).alphabet | cls.stop_chars
@classproperty
@stable(as_of="0.4.0")
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACDEFGHIKLMNPQRSTVWY")
@classproperty
@stable(as_of="0.4.0")
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"B": set("DN"), "Z": set("EQ"),
"X": set("ACDEFGHIKLMNPQRSTVWY")
}
@classproperty
@stable(as_of="0.4.0")
def stop_chars(cls):
"""Return characters representing translation stop codons.
Returns
-------
set
Characters representing translation stop codons.
"""
return set('*')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def stops(self):
"""Find positions containing stop characters in the protein sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a stop character is present
at that position in the protein sequence.
See Also
--------
has_stops
Examples
--------
>>> from skbio import Protein
>>> s = Protein('PAW')
>>> s.stops()
array([False, False, False], dtype=bool)
>>> s = Protein('PAW*E*')
>>> s.stops()
array([False, False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._stop_codes)
@stable(as_of="0.4.0")
def has_stops(self):
"""Determine if the sequence contains one or more stop characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of stop
characters in the protein sequence.
Examples
--------
>>> from skbio import Protein
>>> s = Protein('PAW')
>>> s.has_stops()
False
>>> s = Protein('PAW*E*')
>>> s.has_stops()
True
"""
return bool(self.stops().any())
@overrides(IUPACSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(Protein, self)._repr_stats()
stats.append(('has stops', '%r' % self.has_stops()))
return stats
_motifs = parent_motifs.copy()
@_motifs("N-glycosylation")
def _motif_nitro_glycosylation(sequence, min_length, ignore):
"""Identifies N-glycosylation runs"""
return sequence.find_with_regex("(N[^PX][ST][^PX])", ignore=ignore)
# Leave this at the bottom
_motifs.interpolate(Protein, "find_motifs")
| [
"skbio.util._decorator.overrides",
"skbio.util._decorator.stable",
"numpy.in1d"
] | [((3745, 3766), 'skbio.util._decorator.stable', 'stable', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (3751, 3766), False, 'from skbio.util._decorator import stable\n'), ((3772, 3796), 'skbio.util._decorator.overrides', 'overrides', (['IUPACSequence'], {}), '(IUPACSequence)\n', (3781, 3796), False, 'from skbio.util._decorator import classproperty, overrides\n'), ((3906, 3927), 'skbio.util._decorator.stable', 'stable', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (3912, 3927), False, 'from skbio.util._decorator import stable\n'), ((3933, 3957), 'skbio.util._decorator.overrides', 'overrides', (['IUPACSequence'], {}), '(IUPACSequence)\n', (3942, 3957), False, 'from skbio.util._decorator import classproperty, overrides\n'), ((4060, 4081), 'skbio.util._decorator.stable', 'stable', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (4066, 4081), False, 'from skbio.util._decorator import stable\n'), ((4087, 4111), 'skbio.util._decorator.overrides', 'overrides', (['IUPACSequence'], {}), '(IUPACSequence)\n', (4096, 4111), False, 'from skbio.util._decorator import classproperty, overrides\n'), ((4282, 4303), 'skbio.util._decorator.stable', 'stable', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (4288, 4303), False, 'from skbio.util._decorator import stable\n'), ((4606, 4627), 'skbio.util._decorator.stable', 'stable', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (4612, 4627), False, 'from skbio.util._decorator import stable\n'), ((5350, 5371), 'skbio.util._decorator.stable', 'stable', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (5356, 5371), False, 'from skbio.util._decorator import stable\n'), ((5914, 5938), 'skbio.util._decorator.overrides', 'overrides', (['IUPACSequence'], {}), '(IUPACSequence)\n', (5923, 5938), False, 'from skbio.util._decorator import classproperty, overrides\n'), ((5305, 5343), 'numpy.in1d', 'np.in1d', (['self._bytes', 'self._stop_codes'], {}), '(self._bytes, self._stop_codes)\n', (5312, 5343), True, 'import numpy as np\n')] |
import torch as th
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class PolicyValueNetwork(nn.Module):
def __init__(self, lower_size, prev_size, policy_size, hidden_size=64, proj_size=32):
super(PolicyValueNetwork, self).__init__()
self.value_size = 1
self.lower_size = lower_size
self.prev_size = prev_size
self.policy_size = policy_size
self.hidden_size = hidden_size
self.proj_size = proj_size
#input_size = 12
#proj_size = 50
#n_layers = 1
self.embed_l1 = nn.Embedding(self.lower_size, self.hidden_size)
self.embed_l2 = nn.Embedding(self.lower_size, self.hidden_size)
self.embed_l3 = nn.Embedding(self.lower_size, self.hidden_size)
self.embed_p = nn.Embedding(self.prev_size, self.hidden_size)
self.joint_linear1 = nn.Linear(4 * self.hidden_size, self.hidden_size)
self.joint_linear2 = nn.Linear(self.hidden_size, self.hidden_size)
self.value_linear = nn.Linear(self.hidden_size, self.proj_size)
self.value_pred = nn.Linear(self.proj_size, self.value_size)
self.policy_linear = nn.Linear(self.hidden_size, self.proj_size)
self.policy_pred = nn.Linear(self.proj_size, self.policy_size)
def forward(self, x_p, x_l):
# x_p is the previous interval chosen in the upper voice
# x_l is window of 3 steps of the bottom voice
x_p_i = x_p.long()
x_l_i = x_l.long()
x_p_e = self.embed_p(x_p_i[:, 0])
x_l_e1 = self.embed_l1(x_l_i[:, 0])
x_l_e2 = self.embed_l2(x_l_i[:, 1])
x_l_e3 = self.embed_l3(x_l_i[:, 2])
joined = th.cat([x_p_e, x_l_e1, x_l_e2, x_l_e3], dim=-1)
l1 = self.joint_linear1(joined)
r_l1 = F.relu(l1)
l2 = self.joint_linear2(r_l1)
r_l2 = F.relu(l2)
po_l1 = self.policy_linear(r_l2)
r_po_l1 = F.relu(po_l1)
po_l2 = self.policy_pred(r_po_l1)
p_po_l2 = F.log_softmax(po_l2, dim=1)
v_l1 = self.value_linear(r_l2)
r_v_l1 = F.relu(v_l1)
v_l2 = self.value_pred(r_v_l1)
p_v_l2 = F.tanh(v_l2)
return p_po_l2, p_v_l2
if __name__ == "__main__":
# toy example of the full pipeline
from datasets import fetch_two_voice_species1
from datasets import fetch_three_voice_species1
from analysis import notes_to_midi
all_ex = fetch_two_voice_species1()
# for now, just get info from 2 voice species 1
#all_ex += fetch_three_voice_species1()
all_tb = []
all_lower_midi = []
all_upper_midi = []
all_lower_offset = []
all_upper_offset = []
for ex in all_ex:
# skip any "wrong" examples
if not all(ex["answers"]):
continue
nd = ex["notes_and_durations"]
notes = [[ndii[0] for ndii in ndi] for ndi in nd]
# durations not used in first species, leave it alone
durations = [[ndii[1] for ndii in ndi] for ndi in nd]
midi = notes_to_midi(notes)
cf = ex["cantus_firmus_voice"]
all_lower_offset.append(list(np.array(midi[1]) - midi[1][-1]) + [13])
all_upper_offset.append(list(np.array(midi[0]) - midi[0][-1]))
all_upper_midi.append(midi[0])
all_lower_midi.append(midi[1])
tb = list(np.array(midi[0]) - np.array(midi[1]))
all_tb.append(tb)
flat_tb = [ddd for dd in all_tb for ddd in dd]
# these are the actions
# they map to intervals wrt bottom voice
# [-8, -4, -3, 0, 3, 4, 7, 8, 9, 12, 15, 16]
tb_set = sorted(list(set(flat_tb)))
tb_map = {v: k for k, v in enumerate(tb_set)}
tb_rev_map = {v: k for k, v in tb_map.items()}
flat_lower_offset = [ddd for dd in all_lower_offset for ddd in dd]
# these are input symbols from bottom_voice, as offsets relative to last note ("key" centered)
# [-12, -10, -9, -7, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 7, 8, 9, 12, 13]
# 13 reserved for end of lower indicator
lower_offset_set = sorted(list(set(flat_lower_offset)))
lower_map = {v: k for k, v in enumerate(lower_offset_set)}
lower_rev_map = {v: k for k, v in lower_map.items()}
def make_windows(all_tb, all_lower_offset, lower_window=3, upper_lookback=1):
# not general
assert upper_lookback < lower_window
all_instances = []
for ii in range(len(all_tb)):
tb = all_tb[ii]
lower_offset = all_lower_offset[ii]
instances = []
for kk in range(upper_lookback, len(lower_offset) - lower_window + lower_window // 2 + 1):
lookback = tb[kk - upper_lookback:kk]
window = lower_offset[kk - upper_lookback:kk + lower_window - upper_lookback]
instances.append([lookback, window])
all_instances += instances
return all_instances
list_data = make_windows(all_tb, all_lower_offset)
data_p = np.array([[tb_map[ldi] for ldi in ld[0]] for ld in list_data])
data_l = np.array([[lower_map[ldi] for ldi in ld[1]] for ld in list_data])
pv = PolicyValueNetwork(lower_size=len(lower_map), prev_size=len(tb_map))
optimizer = optim.Adam(pv.parameters(), lr=0.0001, weight_decay=1E-4)
optimizer.zero_grad()
mb_p = Variable(th.FloatTensor(data_p[:5]))
mb_l = Variable(th.FloatTensor(data_l[:5]))
np_po_gt = np.ones((5, len(tb_map))) / float(len(tb_map))
np_v_gt = 0. * np.ones((5, 1)) + 1.
gt_po = Variable(th.FloatTensor(np_po_gt))
gt_v = Variable(th.FloatTensor(np_v_gt))
for i in range(10000):
policy_log_probs, value_est = pv(mb_p, mb_l)
v_loss = th.sum(((value_est - gt_v) ** 2) / gt_po.size()[0])
po_loss = -th.sum((gt_po * policy_log_probs) / gt_po.size()[0])
loss = po_loss + v_loss
print("v", v_loss.data[0])
print("p", po_loss.data[0])
print("l", loss.data[0])
loss.backward()
optimizer.step()
from IPython import embed; embed(); raise ValueError()
| [
"torch.nn.functional.tanh",
"numpy.ones",
"analysis.notes_to_midi",
"IPython.embed",
"datasets.fetch_two_voice_species1",
"numpy.array",
"torch.cat",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.functional.log_softmax",
"torch.FloatTensor",
"torch.nn.Embedding"
] | [((2482, 2508), 'datasets.fetch_two_voice_species1', 'fetch_two_voice_species1', ([], {}), '()\n', (2506, 2508), False, 'from datasets import fetch_two_voice_species1\n'), ((4994, 5056), 'numpy.array', 'np.array', (['[[tb_map[ldi] for ldi in ld[0]] for ld in list_data]'], {}), '([[tb_map[ldi] for ldi in ld[0]] for ld in list_data])\n', (5002, 5056), True, 'import numpy as np\n'), ((5070, 5135), 'numpy.array', 'np.array', (['[[lower_map[ldi] for ldi in ld[1]] for ld in list_data]'], {}), '([[lower_map[ldi] for ldi in ld[1]] for ld in list_data])\n', (5078, 5135), True, 'import numpy as np\n'), ((6049, 6056), 'IPython.embed', 'embed', ([], {}), '()\n', (6054, 6056), False, 'from IPython import embed\n'), ((645, 692), 'torch.nn.Embedding', 'nn.Embedding', (['self.lower_size', 'self.hidden_size'], {}), '(self.lower_size, self.hidden_size)\n', (657, 692), True, 'import torch.nn as nn\n'), ((717, 764), 'torch.nn.Embedding', 'nn.Embedding', (['self.lower_size', 'self.hidden_size'], {}), '(self.lower_size, self.hidden_size)\n', (729, 764), True, 'import torch.nn as nn\n'), ((789, 836), 'torch.nn.Embedding', 'nn.Embedding', (['self.lower_size', 'self.hidden_size'], {}), '(self.lower_size, self.hidden_size)\n', (801, 836), True, 'import torch.nn as nn\n'), ((860, 906), 'torch.nn.Embedding', 'nn.Embedding', (['self.prev_size', 'self.hidden_size'], {}), '(self.prev_size, self.hidden_size)\n', (872, 906), True, 'import torch.nn as nn\n'), ((937, 986), 'torch.nn.Linear', 'nn.Linear', (['(4 * self.hidden_size)', 'self.hidden_size'], {}), '(4 * self.hidden_size, self.hidden_size)\n', (946, 986), True, 'import torch.nn as nn\n'), ((1016, 1061), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'self.hidden_size'], {}), '(self.hidden_size, self.hidden_size)\n', (1025, 1061), True, 'import torch.nn as nn\n'), ((1091, 1134), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'self.proj_size'], {}), '(self.hidden_size, self.proj_size)\n', (1100, 1134), True, 'import torch.nn as nn\n'), ((1161, 1203), 'torch.nn.Linear', 'nn.Linear', (['self.proj_size', 'self.value_size'], {}), '(self.proj_size, self.value_size)\n', (1170, 1203), True, 'import torch.nn as nn\n'), ((1234, 1277), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'self.proj_size'], {}), '(self.hidden_size, self.proj_size)\n', (1243, 1277), True, 'import torch.nn as nn\n'), ((1305, 1348), 'torch.nn.Linear', 'nn.Linear', (['self.proj_size', 'self.policy_size'], {}), '(self.proj_size, self.policy_size)\n', (1314, 1348), True, 'import torch.nn as nn\n'), ((1749, 1796), 'torch.cat', 'th.cat', (['[x_p_e, x_l_e1, x_l_e2, x_l_e3]'], {'dim': '(-1)'}), '([x_p_e, x_l_e1, x_l_e2, x_l_e3], dim=-1)\n', (1755, 1796), True, 'import torch as th\n'), ((1852, 1862), 'torch.nn.functional.relu', 'F.relu', (['l1'], {}), '(l1)\n', (1858, 1862), True, 'import torch.nn.functional as F\n'), ((1916, 1926), 'torch.nn.functional.relu', 'F.relu', (['l2'], {}), '(l2)\n', (1922, 1926), True, 'import torch.nn.functional as F\n'), ((1987, 2000), 'torch.nn.functional.relu', 'F.relu', (['po_l1'], {}), '(po_l1)\n', (1993, 2000), True, 'import torch.nn.functional as F\n'), ((2061, 2088), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['po_l2'], {'dim': '(1)'}), '(po_l2, dim=1)\n', (2074, 2088), True, 'import torch.nn.functional as F\n'), ((2146, 2158), 'torch.nn.functional.relu', 'F.relu', (['v_l1'], {}), '(v_l1)\n', (2152, 2158), True, 'import torch.nn.functional as F\n'), ((2215, 2227), 'torch.nn.functional.tanh', 'F.tanh', (['v_l2'], {}), '(v_l2)\n', (2221, 2227), True, 'import torch.nn.functional as F\n'), ((3071, 3091), 'analysis.notes_to_midi', 'notes_to_midi', (['notes'], {}), '(notes)\n', (3084, 3091), False, 'from analysis import notes_to_midi\n'), ((5337, 5363), 'torch.FloatTensor', 'th.FloatTensor', (['data_p[:5]'], {}), '(data_p[:5])\n', (5351, 5363), True, 'import torch as th\n'), ((5385, 5411), 'torch.FloatTensor', 'th.FloatTensor', (['data_l[:5]'], {}), '(data_l[:5])\n', (5399, 5411), True, 'import torch as th\n'), ((5538, 5562), 'torch.FloatTensor', 'th.FloatTensor', (['np_po_gt'], {}), '(np_po_gt)\n', (5552, 5562), True, 'import torch as th\n'), ((5584, 5607), 'torch.FloatTensor', 'th.FloatTensor', (['np_v_gt'], {}), '(np_v_gt)\n', (5598, 5607), True, 'import torch as th\n'), ((5495, 5510), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (5502, 5510), True, 'import numpy as np\n'), ((3379, 3396), 'numpy.array', 'np.array', (['midi[0]'], {}), '(midi[0])\n', (3387, 3396), True, 'import numpy as np\n'), ((3399, 3416), 'numpy.array', 'np.array', (['midi[1]'], {}), '(midi[1])\n', (3407, 3416), True, 'import numpy as np\n'), ((3247, 3264), 'numpy.array', 'np.array', (['midi[0]'], {}), '(midi[0])\n', (3255, 3264), True, 'import numpy as np\n'), ((3169, 3186), 'numpy.array', 'np.array', (['midi[1]'], {}), '(midi[1])\n', (3177, 3186), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task, Partition, Region, RW, WD
import numpy as np
@task(privileges=[WD])
def init_field(R):
coloring = np.array(
[[([0, 1],), ([1, 0],), ([0, 1],), ([1, 0],)],
[([1, 1],), ([1, 0],), ([0, 1],), ([1, 1],)],
[([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)],
[([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)]],
dtype=R.color.dtype)
np.copyto(R.color, coloring, casting='no')
@task
def main():
R = Region([4, 4], {'color': pygion.int2d})
init_field(R)
P = Partition.by_field(R, 'color', [2, 2])
assert P.color_space.volume == 4
print('Parent region has volume %s' % R.ispace.volume)
assert R.ispace.volume == 16
assert P[0, 0].ispace.volume == 4
assert P[0, 1].ispace.volume == 3
assert P[1, 0].ispace.volume == 3
assert P[1, 1].ispace.volume == 6
if __name__ == '__main__':
main()
| [
"numpy.copyto",
"pygion.task",
"numpy.array",
"pygion.Region",
"pygion.Partition.by_field"
] | [((735, 756), 'pygion.task', 'task', ([], {'privileges': '[WD]'}), '(privileges=[WD])\n', (739, 756), False, 'from pygion import task, Partition, Region, RW, WD\n'), ((791, 1015), 'numpy.array', 'np.array', (['[[([0, 1],), ([1, 0],), ([0, 1],), ([1, 0],)], [([1, 1],), ([1, 0],), ([0, \n 1],), ([1, 1],)], [([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)], [([0, 0\n ],), ([1, 1],), ([1, 1],), ([0, 0],)]]'], {'dtype': 'R.color.dtype'}), '([[([0, 1],), ([1, 0],), ([0, 1],), ([1, 0],)], [([1, 1],), ([1, 0]\n ,), ([0, 1],), ([1, 1],)], [([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)],\n [([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)]], dtype=R.color.dtype)\n', (799, 1015), True, 'import numpy as np\n'), ((1055, 1097), 'numpy.copyto', 'np.copyto', (['R.color', 'coloring'], {'casting': '"""no"""'}), "(R.color, coloring, casting='no')\n", (1064, 1097), True, 'import numpy as np\n'), ((1125, 1164), 'pygion.Region', 'Region', (['[4, 4]', "{'color': pygion.int2d}"], {}), "([4, 4], {'color': pygion.int2d})\n", (1131, 1164), False, 'from pygion import task, Partition, Region, RW, WD\n'), ((1192, 1230), 'pygion.Partition.by_field', 'Partition.by_field', (['R', '"""color"""', '[2, 2]'], {}), "(R, 'color', [2, 2])\n", (1210, 1230), False, 'from pygion import task, Partition, Region, RW, WD\n')] |
import os
import re
import csv
import codecs
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from string import punctuation
from gensim.models import KeyedVectors
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers.merge import concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
import sys
from keras import backend as K
from keras.engine.topology import Layer
#from keras import initializations
from keras import initializers, regularizers, constraints
from sklearn.metrices import roc_auc_score
class Attention(Layer):
# Input shape 3D tensor with shape: `(samples, steps, features)`.
# Output shape 2D tensor with shape: `(samples, features)`.
def __init__(self, step_dim,W_regulizer = None,b_regulizer = None,
W_constraint = None, b_constraint = None,bias = True,**kwargs):
self.W_regulizer = W_regulizer
self.b_regulizer = b_regulizer
self.W_constraint = W_constraint
self.b_constraint = b_constraint
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
self.init = initializers.get('glorot_uniform')
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
# Create a trainable weight variable for this layer.
self.W = self.add_weight(name='kernel',
shape=(input_shape[-1],),
initializer= self.init,
constraint = self.W_constraint,
regulizer = self.W_regulizer,
name = '{}_W'.format(self.name))
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
super(Attention, self).build(input_shape)
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
# apply mask after the exp. will be re-normalized next
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
path = '../input/data'
path1 = '../input/glove-840b-tokens-300d-vectors/'
EMBEDDING_FILE=path1+'glove.840B.300d.txt'
TRAIN_DATA_FILE=path+'train.csv'
TEST_DATA_FILE=path+'test.csv'
MAX_SEQUENCE_LENGTH = 150
MAX_NB_WORDS = 100000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.1
num_lstm = 300
num_dense = 256
lstm_dropout_rate = 0.25
dense_dropout_rate = 0.25
act = 'relu'
########################################
## index word vectors.
########################################
print('Indexing word vectors')
embedding_index = {}
with open(EMBEDDING_FILE,'r') as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype = 'float32')
embedding_index[word] = coefs
print('Indexed the word vectors')
print('Found %s word vectors.' %len(embedding_index))
train_df = pd.read_csv(TRAIN_DATA_FILE)
test_df = pd.read_csv(TEST_DATA_FILE)
########################################
## Basic preprocessing of text data.
########################################
print('performing some basic preprocessing on data')
#regex for removing non-alphanumeric characters and spaces
remove_special_char = re.compile('r[^a-z\d]',re.IGNORECASE)
#regex to replace all numerics
replace_numerics = re.compile(r'\d+',re.IGNORECASE)
##############################################################################################
## fuction for coverting the text to list of tokens after stopword removal and stemming.
##############################################################################################
def preprocess_text(text, remove_stopwords = True, perform_stemming = True):
#convert text to lowercase and split.
text = text.lower().split()
#stopword removal(you can use your own set of stopwords, here we are using default from nltk stopwords)
if(remove_stopwords):
stop_words = set(stopwords.words('english'))
text = [word for word in text if word not in stop_words]
text = ' '.join(text)
text = remove_special_char.sub('', text)
text = replace_numerics.sub('n', text)
if(perform_stemming):
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = ' '.join(stemmed_words)
return text
##################################################
## forming sequeces to feed into the network.
##################################################
raw_train_comments = train_df['comments'].fillna('NA').values
raw_test_comments = test_df['comments'].fillna('NA').values
classes_to_predict = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train_df[classes_to_predict].values
#y_test_predicted = test_df[classes_to_predict].values
processed_train_comments = []
for comment in raw_train_comments:
processed_train_comments.append(preprocess_text(comment))
processed_test_comments = []
for comment in raw_test_comments:
processed_test_comments.append(preprocess_text(comment))
tokenizer = Tokenizer(num_words = MAX_NB_WORDS)
tokenizer.fit_on_texts(processed_train_comments + processed_test_comments)
train_sequences = tokenizer.text_to_sequences(processed_train_comments)
test_sequences = tokenizer.text_to_sequences(processed_test_comments)
print('found %s tokens in text.' %(tokenizer.word_index))
train_data = pad_sequences(train_sequences, maxlen = MAX_SEQUENCE_LENGTH)
final_test_data = pad_sequences(test_sequences, maxlen = MAX_SEQUENCE_LENGTH)
print('shape of train_data(will be divided further into final_train_data + final_validation_data) ready for feeding to network is %s' %(train_data.shape))
print('shape of final_test_data ready for fedding to network is %s' %(final_test_data.shape))
print('shape of label(y) is %s' %(y.shape))
##################################################
## preparing word embeddings.
##################################################
print('preparing embedding matrix')
word_index = tokenizer.word_index
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for word, i in word_index.items():
if(i> MAX_NB_WORDS):
continue
embedding_vector = embedding_index.get(word)
if(embedding_vector is not None):
embedding_matrix[i] = embedding_vector
print('embedding matrix preparation complete')
##################################################
## train and validation split.
##################################################
print('creating train and validation data by dividing train_data in 80:20 ratio')
permutation = np.random.permutation(len(train_data))
index_train = permutation[:int(len(train_data)*0.8)]
index_validation = permutation[int(len(train_data)*0.2):]
final_train_data = train_data[index_train]
labels_of_train_data = y[index_train]
final_validation_data = train_data[index_validation]
labels_of_validation_data = y[index_validation]
print('train data shape:', final_train_data.shape)
print('validation data shape:', final_validation_data.shape)
print('train and validation data are ready!!')
############################
## Keras model structure.
############################
embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights = [embedding_matrix], input_length = MAX_SEQUENCE_LENGTH, trainable = False)
lstm_layer = LSTM(num_lstm, dropout = lstm_dropout_rate, recurrent_dropout = lstm_dropout_rate, return_sequences = True )
input_comment = Input(shape = (MAX_SEQUENCE_LENGTH,), dtype = 'int32')
embedded_sequence = embedding_layer(input_comment)
x = lstm_layer(embedded_sequence)
x = Dropout(dense_dropout_rate)(x)
merged = Attention(MAX_SEQUENCE_LENGTH)(x)
merged = Dense(num_dense, activation = act)(merged)
merged = Dropout(dense_dropout_rate)(merged)
merged = BatchNormalization()(merged)
preds = Dense(len(classes_to_predict), activation = 'sigmoid')(merged)
#########################
## train the model.
#########################
model = Model(inputs = [input_comment], outputs = preds)
model.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy'])
print(model.summary())
stamp = 'sentiment_with_lstm_and_glove_%.2f_%.2f'%(lstm_dropout_rate,dense_dropout_rate)
print(stamp)
best_model_path = stamp + '.h5'
early_stopping = EarlyStopping(patience = 2)
model_checkpoint = ModelCheckpoint(best_model_path, save_best_only = True, save_weights_only = True)
hist = model.fit(x = final_train_data, y = labels_of_train_data,\
validation_data = (final_validation_data, labels_of_validation_data), \
epochs = 20, batch_size = 256, shuffle = True, \
callbacks = [early_stopping, model_checkpoint])
best_score = min(hist.history['val_loss'])
#######################################
## time to make prediction!!!
########################################
y_test_predicted = model.predict([final_test_data], batch_size = 1024, verbose = 1)
sample_submission = pd.read_csv("../input/sample_submission.csv")
sample_submission[classes_to_predict] = y_test_predicted
sample_submission.to_csv('%.4f_'%(bst_val_score)+STAMP+'.csv', index=False)
| [
"keras.backend.sum",
"pandas.read_csv",
"re.compile",
"keras.backend.reshape",
"keras.backend.floatx",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences",
"nltk.corpus.stopwords.words",
"numpy.asarray",
"keras.layers.LSTM",
"keras.models.Model",
"keras.callbacks.EarlyStopping",... | [((4168, 4196), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_DATA_FILE'], {}), '(TRAIN_DATA_FILE)\n', (4179, 4196), True, 'import pandas as pd\n'), ((4207, 4234), 'pandas.read_csv', 'pd.read_csv', (['TEST_DATA_FILE'], {}), '(TEST_DATA_FILE)\n', (4218, 4234), True, 'import pandas as pd\n'), ((4492, 4531), 're.compile', 're.compile', (['"""r[^a-z\\\\d]"""', 're.IGNORECASE'], {}), "('r[^a-z\\\\d]', re.IGNORECASE)\n", (4502, 4531), False, 'import re\n'), ((4581, 4614), 're.compile', 're.compile', (['"""\\\\d+"""', 're.IGNORECASE'], {}), "('\\\\d+', re.IGNORECASE)\n", (4591, 4614), False, 'import re\n'), ((6418, 6451), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'MAX_NB_WORDS'}), '(num_words=MAX_NB_WORDS)\n', (6427, 6451), False, 'from keras.preprocessing.text import Tokenizer\n'), ((6745, 6803), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(train_sequences, maxlen=MAX_SEQUENCE_LENGTH)\n', (6758, 6803), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((6824, 6881), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_sequences'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)\n', (6837, 6881), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((7449, 7484), 'numpy.zeros', 'np.zeros', (['(nb_words, EMBEDDING_DIM)'], {}), '((nb_words, EMBEDDING_DIM))\n', (7457, 7484), True, 'import numpy as np\n'), ((8579, 8697), 'keras.layers.Embedding', 'Embedding', (['nb_words', 'EMBEDDING_DIM'], {'weights': '[embedding_matrix]', 'input_length': 'MAX_SEQUENCE_LENGTH', 'trainable': '(False)'}), '(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length\n =MAX_SEQUENCE_LENGTH, trainable=False)\n', (8588, 8697), False, 'from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n'), ((8712, 8818), 'keras.layers.LSTM', 'LSTM', (['num_lstm'], {'dropout': 'lstm_dropout_rate', 'recurrent_dropout': 'lstm_dropout_rate', 'return_sequences': '(True)'}), '(num_lstm, dropout=lstm_dropout_rate, recurrent_dropout=\n lstm_dropout_rate, return_sequences=True)\n', (8716, 8818), False, 'from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n'), ((8845, 8895), 'keras.layers.Input', 'Input', ([], {'shape': '(MAX_SEQUENCE_LENGTH,)', 'dtype': '"""int32"""'}), "(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n", (8850, 8895), False, 'from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n'), ((9351, 9395), 'keras.models.Model', 'Model', ([], {'inputs': '[input_comment]', 'outputs': 'preds'}), '(inputs=[input_comment], outputs=preds)\n', (9356, 9395), False, 'from keras.models import Model\n'), ((9667, 9692), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(2)'}), '(patience=2)\n', (9680, 9692), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((9714, 9791), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['best_model_path'], {'save_best_only': '(True)', 'save_weights_only': '(True)'}), '(best_model_path, save_best_only=True, save_weights_only=True)\n', (9729, 9791), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((10343, 10388), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample_submission.csv"""'], {}), "('../input/sample_submission.csv')\n", (10354, 10388), True, 'import pandas as pd\n'), ((8989, 9016), 'keras.layers.Dropout', 'Dropout', (['dense_dropout_rate'], {}), '(dense_dropout_rate)\n', (8996, 9016), False, 'from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n'), ((9072, 9104), 'keras.layers.Dense', 'Dense', (['num_dense'], {'activation': 'act'}), '(num_dense, activation=act)\n', (9077, 9104), False, 'from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n'), ((9124, 9151), 'keras.layers.Dropout', 'Dropout', (['dense_dropout_rate'], {}), '(dense_dropout_rate)\n', (9131, 9151), False, 'from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n'), ((9169, 9189), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9187, 9189), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1430, 1464), 'keras.initializers.get', 'initializers.get', (['"""glorot_uniform"""'], {}), "('glorot_uniform')\n", (1446, 1464), False, 'from keras import initializers, regularizers, constraints\n'), ((2830, 2841), 'keras.backend.tanh', 'K.tanh', (['eij'], {}), '(eij)\n', (2836, 2841), True, 'from keras import backend as K\n'), ((2855, 2865), 'keras.backend.exp', 'K.exp', (['eij'], {}), '(eij)\n', (2860, 2865), True, 'from keras import backend as K\n'), ((3106, 3122), 'keras.backend.expand_dims', 'K.expand_dims', (['a'], {}), '(a)\n', (3119, 3122), True, 'from keras import backend as K\n'), ((3174, 3203), 'keras.backend.sum', 'K.sum', (['weighted_input'], {'axis': '(1)'}), '(weighted_input, axis=1)\n', (3179, 3203), True, 'from keras import backend as K\n'), ((3980, 4019), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (3990, 4019), True, 'import numpy as np\n'), ((5512, 5538), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (5527, 5538), False, 'from nltk.stem import SnowballStemmer\n'), ((3081, 3091), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3089, 3091), True, 'from keras import backend as K\n'), ((5210, 5236), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (5225, 5236), False, 'from nltk.corpus import stopwords\n'), ((2677, 2709), 'keras.backend.reshape', 'K.reshape', (['x', '(-1, features_dim)'], {}), '(x, (-1, features_dim))\n', (2686, 2709), True, 'from keras import backend as K\n'), ((2711, 2747), 'keras.backend.reshape', 'K.reshape', (['self.W', '(features_dim, 1)'], {}), '(self.W, (features_dim, 1))\n', (2720, 2747), True, 'from keras import backend as K\n'), ((3001, 3011), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3009, 3011), True, 'from keras import backend as K\n'), ((3034, 3065), 'keras.backend.sum', 'K.sum', (['a'], {'axis': '(1)', 'keepdims': '(True)'}), '(a, axis=1, keepdims=True)\n', (3039, 3065), True, 'from keras import backend as K\n'), ((3068, 3079), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3077, 3079), True, 'from keras import backend as K\n')] |
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
class EKF:
"""Creates a N-dimensional Kalman filter.
Parameters
----------
x_init : numpy.ndarray, optional
Initial state vector: What we know about the (probable) start state.
P_init : numpy.ndarray, optional
Initial (uncertainty) covariance matrix: How sure are we about the start state - in each dimension?
F : numpy.ndarray, optional
State transition matrix: For predicting the next state from the current state.
B : numpy.ndarray, optional
Control matrix: For predicting the next state from the current state using control signals.
Q : numpy.ndarray, optional
Process noise covariance matrix: Describes the (Gaussian) randomness of state transitions in each dimension.
H : numpy.ndarray, optional
Measurement matrix: Describes how we think that our sensors map states to measurements z.
R : numpy.ndarray, optional
Measurement noise covariance matrix: Describes the (Gaussian) randomness of measurements per dimension.
cb_f : callable, optional
Callback that predicts a point in the state space (necessary for the EKF). If set, overwrites F.
cb_F : callable, optional
Callback that returns the matrix F (calculated for the current time).
cb_h : callable, optional
Callback that transforms a point from the state space to the measurement space (necessary for the EKF). If set, overwrites H.
cb_H : callable, optional
Callback that returns the matrix H (calculated for the current time).
init_with_first_meas : bool
Indicates if the first measurement shall be used to initialize the filters extimated position.
Returns
-------
EKF
An initialized (Extended) Kalman filter object which can be used for further filtering.
"""
def __init__(self, x_init, P_init, F, B, Q, H, R, cb_f=None, cb_F=None, cb_h=None, cb_H=None, init_with_first_meas=False):
# KF
self.x = x_init
self.P = P_init
self.F = F
self.B = B
self.Q = Q
self.H = H
self.R = R
# EKF
self.f = cb_f
self.cb_F = cb_F
self.h = cb_h
self.cb_H = cb_H
if bool(self.f is None) != bool(self.cb_F is None):
raise ValueError("cb_f() and cb_F() need to be set both or none of them.")
if bool(self.h is None) != bool(self.cb_H is None):
raise ValueError("cb_h() and cb_H() need to be set both or none of them.")
self._inited = not init_with_first_meas # Makes the first measurement used as the initial state
# end def
def predict(self, u):
"""Predicts the new state vector x using the transition matrix F and the specified control vector u and updates the uncertainty covariance. Matrix F embodies our knowledge about the system dynamics.
Parameters
----------
u : numpy.ndarray
Control vector applied by the control-input-model B.
"""
if not self._inited:
return
# Predict new state
if self.f is None:
self.x = np.dot(self.F, self.x) + np.dot(self.B, u)
else:
self.x = self.f(self.x) + np.dot(self.B, u)
if self.cb_F is not None:
self.F = self.cb_F(self.x)
# Update uncertainty covariance matrix
self.P = np.dot(self.F, np.dot(self.P, self.F.T)) + self.Q
# end def
def filter(self, z, R=None):
"""Updates the previously predicted state by incorporating the new measurement.
Parameters
----------
z : numpy.ndarray
Measurement vector used to update the estimation.
R : numpy.ndarray, optional
Measurement covariance matrix. Can be set to overwrite the initial one (in case R is not constant).
"""
if not self._inited:
self._inited = True
self.x = np.pad(z, (0, len(self.x) - len(z)), 'constant') # XXX Not correct - needs to be transformed from the measurement corrdinate system to the dynamic one's - but works, if both systems are the same
if R is not None:
self.R = R
# Compute innovation y
if self.h is None:
y = z - np.dot(self.H, self.x)
else:
y = z - self.h(self.x)
if self.cb_H is not None:
self.H = self.cb_H(self.x)
# Compute residual covariance matrix S
S = np.dot(self.H, np.dot(self.P, self.H.T)) + self.R
# Compute Kalman gain matrix the Kalman gain matrix tells us how strongly to correct each dimension of the
# predicted state vector by the help of the measurement
K = np.dot(self.P, np.dot(self.H.T, np.linalg.inv(S)))
# Correct previously predicted new state vector
self.x = self.x + np.dot(K, y)
# Update uncertainty covariance matrix
self.P = self.P - np.dot(K, np.dot(self.H, self.P))
#
def get_current_state_estimate(self):
"""Returns the current estimated state vector x, may it be after the predict() or after the correct_by_measurement() step.
Returns
-------
numpy.ndarray
Current estimated state vector x.
"""
return self.x
# end def
# Returns the current estimated uncertainty covariance matrix P may it be after the predict() or after the
# correct_by_measurement() step the covariance matrix describes the variance of each state vector argument
# = uncertainty about this argument of the state vector
def get_current_uncertainty(self):
"""Returns the current estimated uncertainty covariance matrix P may it be after the predict() or after the
correct_by_measurement() step the covariance matrix describes the variance of each state vector argument
= uncertainty about this argument of the state vector.
Returns
-------
numpy.ndarray
Current estimated uncertainty covariance matrix P.
"""
return self.P
# end def
@staticmethod
def join_measurements(R_z_list, mode=1):
"""Joins multiple measurements.
Parameters
----------
R_z_list
List of tuples where each tuple holds the pair of R and z.
mode : int
If mode equals 1, the inverse of the sum of all inverted measurements is being used. Mode 0 (stacking of measurements) is not implemented yet.
"""
R_res = None
z_res = None
if mode == 0:
pass
else: # if mode == 1
if not isinstance(R_z_list, list):
R_z_list = [R_z_list]
# Calculate R and z
R_res = 0
z_res = 0
for R, z in R_z_list:
R_res += np.linalg.inv(R)
z_res += np.dot(np.linalg.inv(R), z)
R_res = np.linalg.inv(R_res)
z_res = np.dot(z_res, R_res)
# end if
return R_res, z_res
# end def
class KF(EKF):
"""Creates a Kalman filter (as a special case of the Extended Kalman filter). See description of class EKF."""
def __init__(self, x_init, P_init, F, B, Q, H, R, init_with_first_meas=False):
EKF.__init__(self, x_init, P_init, F, B, Q, H, R, init_with_first_meas=init_with_first_meas)
# end def
# end class
| [
"numpy.dot",
"numpy.linalg.inv"
] | [((4911, 4923), 'numpy.dot', 'np.dot', (['K', 'y'], {}), '(K, y)\n', (4917, 4923), True, 'import numpy as np\n'), ((6988, 7008), 'numpy.linalg.inv', 'np.linalg.inv', (['R_res'], {}), '(R_res)\n', (7001, 7008), True, 'import numpy as np\n'), ((7029, 7049), 'numpy.dot', 'np.dot', (['z_res', 'R_res'], {}), '(z_res, R_res)\n', (7035, 7049), True, 'import numpy as np\n'), ((3196, 3218), 'numpy.dot', 'np.dot', (['self.F', 'self.x'], {}), '(self.F, self.x)\n', (3202, 3218), True, 'import numpy as np\n'), ((3221, 3238), 'numpy.dot', 'np.dot', (['self.B', 'u'], {}), '(self.B, u)\n', (3227, 3238), True, 'import numpy as np\n'), ((3291, 3308), 'numpy.dot', 'np.dot', (['self.B', 'u'], {}), '(self.B, u)\n', (3297, 3308), True, 'import numpy as np\n'), ((3463, 3487), 'numpy.dot', 'np.dot', (['self.P', 'self.F.T'], {}), '(self.P, self.F.T)\n', (3469, 3487), True, 'import numpy as np\n'), ((4329, 4351), 'numpy.dot', 'np.dot', (['self.H', 'self.x'], {}), '(self.H, self.x)\n', (4335, 4351), True, 'import numpy as np\n'), ((4550, 4574), 'numpy.dot', 'np.dot', (['self.P', 'self.H.T'], {}), '(self.P, self.H.T)\n', (4556, 4574), True, 'import numpy as np\n'), ((4809, 4825), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (4822, 4825), True, 'import numpy as np\n'), ((5008, 5030), 'numpy.dot', 'np.dot', (['self.H', 'self.P'], {}), '(self.H, self.P)\n', (5014, 5030), True, 'import numpy as np\n'), ((6897, 6913), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (6910, 6913), True, 'import numpy as np\n'), ((6946, 6962), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (6959, 6962), True, 'import numpy as np\n')] |
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import lale.docstrings
import lale.operators
class _BaselineClassifierImpl:
def __init__(self):
pass
def fit(self, X, y):
label_to_count = {}
for label in y:
label_to_count[label] = label_to_count.get(label, 0) + 1
majority_label = None
for label, count in label_to_count.items():
if majority_label is None or count > label_to_count[majority_label]:
majority_label = label
self._majority_label = majority_label
return self
def predict(self, X):
result = np.full((X.shape[0],), self._majority_label)
return result
def score(self, X, y):
from sklearn.metrics import accuracy_score
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"relevantToOptimizer": [],
"additionalProperties": False,
}
]
}
_input_fit_schema = {
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array"},
},
"y": {
"description": "Target class labels.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Baseline classifier always predicts the majority class.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.baseline_classifier.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
BaselineClassifier = lale.operators.make_operator(
_BaselineClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(BaselineClassifier)
| [
"numpy.full",
"sklearn.metrics.accuracy_score"
] | [((1170, 1214), 'numpy.full', 'np.full', (['(X.shape[0],)', 'self._majority_label'], {}), '((X.shape[0],), self._majority_label)\n', (1177, 1214), True, 'import numpy as np\n'), ((1365, 1390), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (1379, 1390), False, 'from sklearn.metrics import accuracy_score\n')] |
"""GenerativeModel class for ... generative models"""
import numpy as np
import tensorflow as tf
from netlds.network import Network
class GenerativeModel(object):
"""Base class for generative models"""
# use same data type throughout graph construction
dtype = tf.float32
def __init__(
self, dim_obs=None, dim_latent=None, post_z_samples=None,
**kwargs):
"""
Set base class attributes
Args:
dim_obs (int): dimension of observation vector
dim_latent (int): dimension of latent state
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
# set basic dims
self.dim_latent = dim_latent
self.dim_obs = dim_obs
# tf.Tensor that contains samples from the approximate posterior
# (output of inference network)
self.post_z_samples = post_z_samples
def build_graph(self, *args, **kwargs):
"""Build tensorflow computation graph for generative model"""
raise NotImplementedError
def log_density(self, y, z):
"""Evaluate log density of generative model"""
raise NotImplementedError
def sample(self, sess, num_samples=1, seed=None):
"""Draw samples from model"""
raise NotImplementedError
class NetFLDS(GenerativeModel):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
E[y_t^i] = f(z_t^i)
for each population i, where the z_t^i are non-overlapping subsets of z_t
"""
def __init__(
self, dim_obs=None, dim_latent=None, linear_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
nn_params=None, train_A = True, train_Q0 = True, post_z_samples=None,
num_clusters = None):
"""
Args:
dim_obs (list): observation dimension for each population
dim_latent (list): latent dimension for each population
linear_predictors (dict):
'dim_predictors' (list): dimension for each set of linear
predictors
'predictor_indx' (list of lists): each element of the list
contains the indices of the predictors in the
`dim_predictors` list used by the corresponding population
'predictor_params' (list of lists): each element contains
params for initializing the linear mapping of each pop/pred
combo; should match 'predictor_indx'
num_time_pts (int): number of time points per observation of the
dynamical sequence
gen_params (dict): dictionary of generative params for initializing
model
noise_dist (str): 'gaussian' | 'poisson'
nn_params (list): dictionaries for building each layer of the
mapping from the latent space to observations; the same
network architecture is used for each population
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
GenerativeModel.__init__(self, post_z_samples=post_z_samples)
self.dim_obs = dim_obs
self.dim_latent = dim_latent
self.train_A = train_A
self.train_Q0 = train_Q0
self.num_clusters = num_clusters
if linear_predictors is None:
self.dim_predictors = None
self.predictor_indx = None
else:
self.dim_predictors = linear_predictors['dim_predictors']
predictor_indx = linear_predictors['predictor_indx']
if 'predictor_params' in linear_predictors:
predictor_params = linear_predictors['predictor_params']
else:
predictor_params = None
self.num_time_pts = num_time_pts
if gen_params is None:
self.gen_params = {}
else:
self.gen_params = gen_params
# spiking nl
self.noise_dist = noise_dist
if noise_dist is 'gaussian':
activation = 'linear'
elif noise_dist is 'poisson':
activation = 'softplus'
else:
raise ValueError
if nn_params is None:
# use Network defaults
nn_params = [{}]
nn_params[-1]['activation'] = activation
# does the observation network need mark probabilities?
if self.num_clusters is not None:
nn_params[-1]['units'] = self.num_clusters
# networks mapping latent states to obs for each population
self.networks = []
for _, pop_dim in enumerate(dim_obs):
self.networks.append(
Network(output_dim=self.num_clusters, nn_params=nn_params))
else:
self.num_clusters = None
# networks mapping latent states to obs for each population
self.networks = []
for _, pop_dim in enumerate(dim_obs):
self.networks.append(
Network(output_dim=pop_dim, nn_params=nn_params))
# networks mapping linear predictors to obs for each population
# accessed as self.networks_linear[pop][pred]
# only initialize networks if we have linear predictors
if self.dim_predictors is not None:
self.networks_linear = [
[None for _ in range(len(self.dim_predictors))]
for _ in range(len(dim_obs))]
self.predictor_indx = [
[None for _ in range(len(self.dim_predictors))]
for _ in range(len(dim_obs))]
linear_nn_params = [{'activation': 'linear'}]
for pop, pop_dim in enumerate(self.dim_obs):
# for pred, pred_dim in enumerate(self.dim_predictors):
# if any(pred_indx == pred
# for pred_indx in predictor_indx[pop]):
# self.networks_linear[pop][pred] = Network(
# output_dim=pop_dim, nn_params=linear_nn_params)
# self.predictor_indx[pop][pred] = pred
for indx, pred_indx in enumerate(predictor_indx[pop]):
self.predictor_indx[pop][pred_indx] = pred_indx
if predictor_params is not None and predictor_params[pop][indx] is not None:
if 'mark_probabilities' in predictor_params[pop][indx]:
ident_mat = tf.constant_initializer(tf.eye(pop_dim))
pred_params = [{'activation': 'identity',
'kernel_initializer': ident_mat,
'trainable': False,
'use_bias': False}]
else:
pred_params = predictor_params[pop][indx]
else:
pred_params = linear_nn_params
self.networks_linear[pop][pred_indx] = Network(
output_dim=pop_dim, nn_params=pred_params)
else:
self.networks_linear = None
self.predictor_indx = None
# initialize lists for other relevant variables
self.linear_predictors_phs = []
self.y_pred = []
self.y_pred_ls = [] # latent space
self.y_pred_lp = [] # linear predictors
self.y_samples_prior = []
self.latent_indxs = []
if noise_dist is 'gaussian':
self.R_sqrt = []
self.R = []
self.R_inv = []
def build_graph(self, z_samples, param_dict):
"""
Build tensorflow computation graph for generative model
Args:
z_samples (batch_size x num_mc_samples x num_time_pts x dim_latent
tf.Tensor): samples of the latent states
param_dict (dict): output of NetLDS.initialize_prior_vars() method
"""
# set prior variables generated elsewhere
self.z0_mean = param_dict['z0_mean']
self.A = param_dict['A']
self.Q0_sqrt = param_dict['Q0_sqrt']
self.Q_sqrt = param_dict['Q_sqrt']
self.Q0 = param_dict['Q0']
self.Q = param_dict['Q']
self.Q0_inv = param_dict['Q0_inv']
self.Q_inv = param_dict['Q_inv']
# initialize placeholders for linear predictors
with tf.variable_scope('linear_predictors'):
if self.dim_predictors is not None:
for pred, dim_pred in enumerate(self.dim_predictors):
self.linear_predictors_phs.append(
tf.placeholder(
dtype=self.dtype,
shape=[None, self.num_time_pts, dim_pred],
name='linear_pred_ph_%02i' % pred))
# keep track of which latent states belong to each population
indx_start = 0
for pop, pop_dim_latent in enumerate(self.dim_latent):
with tf.variable_scope(str('population_%02i' % pop)):
# initialize mapping from latent space to observations
with tf.variable_scope('latent_space_mapping'):
self.networks[pop].build_graph()
indx_end = indx_start + pop_dim_latent
self.latent_indxs.append(
np.arange(indx_start, indx_end+1, dtype=np.int32))
self.y_pred_ls.append(self.networks[pop].apply_network(
z_samples[:, :, :, indx_start:indx_end]))
if self.num_clusters is not None:
shp = (None,self.num_time_pts,self.num_clusters,self.dim_obs[pop])
self.mark_probs = tf.placeholder(dtype = self.dtype,
shape = shp,
name = 'mark_probs')
F = tf.transpose(self.y_pred_ls[-1], (0, 2, 1, 3))
self.y_pred_ls[-1] = tf.transpose(tf.matmul(F, self.mark_probs),
(0, 2, 1, 3))
indx_start = indx_end
# initialize mapping from linear predictors to observations
with tf.variable_scope('regressor_mapping'):
if self.dim_predictors is not None:
# append new list for this population
self.y_pred_lp.append([])
for pred, pred_dim in enumerate(self.dim_predictors):
if self.predictor_indx[pop][pred] is not None:
self.networks_linear[pop][pred].build_graph()
net_out = self.networks_linear[pop][pred].\
apply_network(
self.linear_predictors_phs[pred])
# expand network output to match dims of
# samples from latent space:
# batch x num_samps x num_time_pts x dim_latent
self.y_pred_lp[-1].append(tf.expand_dims(
net_out, axis=1))
# else:
# self.y_pred_lp[-1].append(0.0)
# add contributions from latent space and linear predictors
if self.dim_predictors is not None:
self.y_pred.append(tf.add(self.y_pred_ls[-1], tf.add_n(
self.y_pred_lp[-1])))
else:
self.y_pred.append(self.y_pred_ls[-1])
self._initialize_noise_dist_vars(pop)
# define branch of graph for evaluating prior model
with tf.variable_scope('generative_samples'):
self._sample_yz()
def initialize_prior_vars(self):
"""Initialize variables of prior"""
tr_norm_initializer = tf.initializers.truncated_normal(
mean=0.0, stddev=0.1, dtype=self.dtype)
zeros_initializer = tf.initializers.zeros(dtype=self.dtype)
# mean of initial latent state
if 'z0_mean' in self.gen_params:
z0_mean = tf.get_variable(
'z0_mean',
initializer=self.gen_params['z0_mean'],
dtype=self.dtype)
else:
z0_mean = tf.get_variable(
'z0_mean',
shape=[1, sum(self.dim_latent)],
initializer=zeros_initializer,
dtype=self.dtype)
# means of transition matrix
if 'A' in self.gen_params:
A = tf.get_variable(
'A',
initializer=self.gen_params['A'],
dtype=self.dtype,
trainable = self.train_A)
else:
A = tf.get_variable(
'A',
initializer=0.5 * np.eye(sum(self.dim_latent),
dtype=self.dtype.as_numpy_dtype()),
dtype=self.dtype,
trainable = self.train_A)
# square root of the innovation precision matrix
if 'Q_sqrt' in self.gen_params:
Q_sqrt = tf.get_variable(
'Q_sqrt',
initializer=self.gen_params['Q_sqrt'],
dtype=self.dtype)
else:
Q_sqrt = tf.get_variable(
'Q_sqrt',
initializer=np.eye(
sum(self.dim_latent),
dtype=self.dtype.as_numpy_dtype()),
dtype=self.dtype)
# square root of the initial innovation precision matrix
if 'Q0_sqrt' in self.gen_params:
Q0_sqrt = tf.get_variable(
'Q0_sqrt',
initializer=self.gen_params['Q0_sqrt'],
dtype=self.dtype,
trainable = self.train_Q0)
else:
Q0_sqrt = tf.get_variable(
'Q0_sqrt',
initializer=np.eye(
sum(self.dim_latent),
dtype=self.dtype.as_numpy_dtype()),
dtype=self.dtype,
trainable = self.train_Q0)
# diag = tf.constant(1.0 * np.eye(
# sum(self.dim_latent), dtype=self.dtype.as_numpy_dtype),
# name='small_const')
diag = tf.constant(1e-6 * np.eye(
sum(self.dim_latent), dtype=self.dtype.as_numpy_dtype),
name='small_const')
Q0 = tf.matmul(Q0_sqrt, Q0_sqrt, transpose_b=True, name='Q0') + diag
Q = tf.matmul(Q_sqrt, Q_sqrt, transpose_b=True, name='Q') + diag
Q0_inv = tf.matrix_inverse(Q0, name='Q0_inv')
Q_inv = tf.matrix_inverse(Q, name='Q_inv')
param_dict = {
'z0_mean': z0_mean, 'A': A,
'Q_sqrt': Q_sqrt, 'Q': Q, 'Q_inv': Q_inv,
'Q0_sqrt': Q0_sqrt, 'Q0': Q0, 'Q0_inv': Q0_inv}
return param_dict
def _initialize_noise_dist_vars(self, pop):
if self.noise_dist is 'gaussian':
tr_norm_initializer = tf.initializers.truncated_normal(
mean=0.0, stddev=0.1, dtype=self.dtype)
zeros_initializer = tf.initializers.zeros(dtype=self.dtype)
# square root of diagonal of observation covariance matrix
# (assume diagonal)
if 'R_sqrt' in self.gen_params:
self.R_sqrt.append(tf.get_variable(
'R_sqrt',
initializer=self.gen_params['R_sqrt'][pop],
dtype=self.dtype))
else:
self.R_sqrt.append(tf.get_variable(
'R_sqrt',
shape=[1, self.dim_obs[pop]],
initializer=tr_norm_initializer,
dtype=self.dtype))
self.R.append(tf.square(self.R_sqrt[pop], name='R'))
self.R_inv.append(tf.divide(1.0, self.R[pop] + 1e-6, name='R_inv'))
def _sample_yz(self):
"""
Define branch of tensorflow computation graph for sampling from the
prior
"""
self.num_samples_ph = tf.placeholder(
dtype=tf.int32, shape=None, name='num_samples_ph')
self._sample_z()
self._sample_y()
def _sample_z(self):
self.latent_rand_samples = tf.random_normal(
shape=[self.num_samples_ph,
self.num_time_pts,
sum(self.dim_latent)],
mean=0.0, stddev=1.0, dtype=self.dtype, name='latent_rand_samples')
# get random samples from latent space
def lds_update(outputs, inputs):
z_val = outputs
rand_z = inputs
z_val = tf.matmul(z_val, tf.transpose(self.A)) \
+ tf.matmul(rand_z, tf.transpose(self.Q_sqrt))
return z_val
# calculate samples for first time point
z0_samples = self.z0_mean \
+ tf.matmul(self.latent_rand_samples[:, 0, :],
tf.transpose(self.Q0_sqrt))
# scan over time points, not samples
rand_ph_shuff = tf.transpose(
self.latent_rand_samples[:, 1:, :], perm=[1, 0, 2])
z_samples = tf.scan(
fn=lds_update,
elems=rand_ph_shuff,
initializer=z0_samples)
# concat across time (num_samples x num_time_pts x dim_latent)
self.z_samples_prior = tf.concat(
[tf.expand_dims(z0_samples, axis=1),
tf.transpose(z_samples, perm=[1, 0, 2])], axis=1)
def _sample_y(self):
# expand dims to account for time and mc dims when applying mapping
# now (1 x num_samples x num_time_pts x dim_latent)
z_samples_ex = tf.expand_dims(self.z_samples_prior, axis=0)
y_means_ls = [] # contribution from latent space
y_means_lp = [] # contribution from linear predictors
y_means = []
for pop, pop_dim in enumerate(self.dim_obs):
y_means_ls.append(tf.squeeze(self.networks[pop].apply_network(
z_samples_ex[:, :, :,
self.latent_indxs[pop][0]:
self.latent_indxs[pop][-1]]),
axis=0))
if self.num_clusters is not None:
F = tf.expand_dims(y_means_ls[-1], axis = 2)
y_means_ls[-1] = tf.squeeze(tf.matmul(F, self.mark_probs))
if self.dim_predictors is not None:
# append new list for this population
y_means_lp.append([])
for pred, pred_dim in enumerate(self.dim_predictors):
if self.predictor_indx[pop][pred] is not None:
net_out = self.networks_linear[pop][pred]. \
apply_network(self.linear_predictors_phs[pred])
y_means_lp[-1].append(net_out)
# else:
# self.y_pred_lp[-1].append(0.0)
y_means.append(
tf.add(y_means_ls[-1], tf.add_n(y_means_lp[-1])))
else:
y_means.append(y_means_ls[-1])
# get random samples from observation space
if self.noise_dist is 'gaussian':
obs_rand_samples = []
for pop, pop_dim in enumerate(self.dim_obs):
obs_rand_samples.append(tf.random_normal(
shape=[self.num_samples_ph, self.num_time_pts, pop_dim],
mean=0.0, stddev=1.0, dtype=self.dtype,
name=str('obs_rand_samples_%02i' % pop)))
self.y_samples_prior.append(y_means[pop] + tf.multiply(
obs_rand_samples[pop], self.R_sqrt[pop]))
elif self.noise_dist is 'poisson':
for pop, pop_dim in enumerate(self.dim_obs):
self.y_samples_prior.append(tf.squeeze(tf.random_poisson(
lam=y_means[pop], shape=[1], dtype=self.dtype), axis=0))
def log_density(self, y, z):
"""
Evaluate log density for generative model, defined as
p(y, z) = p(y | z) p(z)
where
p(z) = \prod_t p(z_t), z_t ~ N(A z_{t-1}, Q)
p(y | z) = \prod_t p(y_t | z_t)
Args:
y (batch_size x num_mc_samples x num_time_pts x dim_obs tf.Tensor)
z (batch_size x num_mc_samples x num_time_pts x dim_latent
tf.Tensor)
Returns:
float: log density over y and z, averaged over minibatch samples
and monte carlo samples
"""
# likelihood
with tf.variable_scope('likelihood'):
self.log_density_y = self._log_density_likelihood(y)
# prior
with tf.variable_scope('prior'):
self.log_density_z = self._log_density_prior(z)
return self.log_density_y + self.log_density_z
def _log_density_likelihood(self, y):
log_density_y = []
for pop, pop_dim in enumerate(self.dim_obs):
with tf.variable_scope('population_%02i' % pop):
if self.noise_dist is 'gaussian':
# expand observation dims over mc samples
res_y = tf.expand_dims(y[pop], axis=1) - self.y_pred[pop]
# average over batch and mc sample dimensions
res_y_R_inv_res_y = tf.reduce_mean(
tf.multiply(tf.square(res_y), self.R_inv[pop]),
axis=[0, 1])
# sum over time and observation dimensions
test_like = tf.reduce_sum(res_y_R_inv_res_y)
tf.summary.scalar('log_joint_like', -0.5 * test_like)
# total term for likelihood
log_density_y.append(-0.5 * (test_like
+ self.num_time_pts * tf.reduce_sum(
tf.log(self.R[pop]))
+ self.num_time_pts * pop_dim * tf.log(2.0 * np.pi)))
elif self.noise_dist is 'poisson':
# expand observation dims over mc samples
obs_y = tf.expand_dims(y[pop], axis=1)
# average over batch and mc sample dimensions
log_density_ya = tf.reduce_mean(
tf.multiply(obs_y[pop], tf.log(self.y_pred[pop]))
- self.y_pred[pop]
- tf.lgamma(1 + obs_y[pop]),
axis=[0, 1])
# sum over time and observation dimensions
log_density_y.append(tf.reduce_sum(log_density_ya))
tf.summary.scalar('log_joint_like', log_density_y[-1])
else:
raise ValueError
return tf.add_n(log_density_y, name='log_joint_like_total')
def _log_density_prior(self, z):
self.res_z0 = res_z0 = z[:, :, 0, :] - self.z0_mean
self.res_z = res_z = z[:, :, 1:, :] - tf.tensordot(
z[:, :, :-1, :], tf.transpose(self.A), axes=[[3], [0]])
# average over batch and mc sample dimensions
res_z_Q_inv_res_z = tf.reduce_mean(tf.multiply(
tf.tensordot(res_z, self.Q_inv, axes=[[3], [0]]), res_z),
axis=[0, 1])
res_z0_Q0_inv_res_z0 = tf.reduce_mean(tf.multiply(
tf.tensordot(res_z0, self.Q0_inv, axes=[[2], [0]]), res_z0),
axis=[0, 1])
# sum over time and latent dimensions
test_prior = tf.reduce_sum(res_z_Q_inv_res_z)
test_prior0 = tf.reduce_sum(res_z0_Q0_inv_res_z0)
tf.summary.scalar('log_joint_prior', -0.5 * test_prior)
tf.summary.scalar('log_joint_prior0', -0.5 * test_prior0)
# total term for prior
log_density_z = -0.5 * (test_prior + test_prior0
+ (self.num_time_pts - 1) * tf.log(tf.matrix_determinant(self.Q))
+ tf.log(tf.matrix_determinant(self.Q0))
+ self.num_time_pts * sum(self.dim_latent) * tf.log(2.0 * np.pi))
return log_density_z
def sample(self, sess, num_samples=1, seed=None, linear_predictors=None, mark_probs = None):
"""
Generate samples from the model
Args:
sess (tf.Session object)
num_samples (int, optional)
seed (int, optional)
linear_predictors (list)
Returns:
num_samples x num_time_pts x dim_obs x numpy array:
sample observations y
num_samples x num_time_pts x dim_latent numpy array:
sample latent states z
"""
if seed is not None:
tf.set_random_seed(seed)
if self.dim_predictors is not None and linear_predictors is None:
raise ValueError('must supply linear predictors for sampling')
if self.num_clusters is not None and mark_probs is None:
raise ValueError('must supply mark probabilities for sampling')
feed_dict = {self.num_samples_ph: num_samples}
if self.dim_predictors is not None:
for pred, pred_ph in enumerate(self.linear_predictors_phs):
feed_dict[pred_ph] = linear_predictors[pred]
if self.num_clusters is not None:
feed_dict[self.mark_probs] = mark_probs
[y, z] = sess.run(
[self.y_samples_prior, self.z_samples_prior],
feed_dict=feed_dict)
return y, z
def get_params(self, sess):
"""Get parameters of generative model"""
if self.noise_dist is 'gaussian':
A, R_sqrt, z0_mean, Q, Q0 = sess.run(
[self.A, self.R_sqrt, self.z0_mean, self.Q, self.Q0])
param_dict = {
'A': A, 'R': np.square(R_sqrt), 'z0_mean': z0_mean,
'Q': Q, 'Q0': Q0}
elif self.noise_dist is 'poisson':
A, z0_mean, Q, Q0 = sess.run(
[self.A, self.z0_mean, self.Q, self.Q0])
param_dict = {
'A': A, 'z0_mean': z0_mean, 'Q': Q, 'Q0': Q0}
else:
raise ValueError
return param_dict
def get_linear_params(self, sess):
"""Get parameters of linear regressors"""
param_dict = []
for pop, pop_dim in enumerate(self.dim_obs):
param_dict.append([])
for pred, pred_dim in enumerate(self.dim_predictors):
if self.predictor_indx[pop][pred] is not None:
layer_weights_ = sess.run(
self.networks_linear[pop][pred].layers[0].weights)
else:
layer_weights_ = []
param_dict[pop].append(layer_weights_)
return param_dict
class NetLDS(NetFLDS):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
y_t^i ~ N(C_i z_t^i + d_i, R_i)
for each population i, where the z_t^i are non-overlapping subsets of z_t
"""
def __init__(
self, dim_obs=None, dim_latent=None, linear_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
post_z_samples=None, **kwargs):
"""
Args:
dim_obs (list): observation dimension for each population
dim_latent (list): latent dimension for each population
linear_predictors (dict):
'dim_predictors' (list): dimension for each set of linear
predictors
'predictor_indx' (list of lists): each element of the list
contains the indices of the predictors in the
`dim_predictors` list used by the corresponding population
num_time_pts (int): number of time points per observation of the
dynamical sequence
gen_params (dict): dictionary of generative params for initializing
model
noise_dist (str): 'gaussian' | 'poisson'
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
if gen_params is None:
gen_params = {}
# iterate through populations
# NOTE: must set kernel/bias initializers outside of this constructor
# for now since NetFLDS assumes nn_params is the same for each pop
for pop, _ in enumerate(dim_obs):
# emissions matrix
if 'C' in gen_params:
kernel_initializer = tf.constant_initializer(
gen_params['C'][pop], dtype=self.dtype)
else:
kernel_initializer = 'trunc_normal'
# biases
if 'd' in gen_params:
bias_initializer = tf.constant_initializer(
gen_params['d'][pop], dtype=self.dtype)
else:
bias_initializer = 'zeros'
# list of dicts specifying (linear) nn to observations
nn_params = [{
'units': dim_obs[pop],
'activation': 'linear',
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': None,
'bias_regularizer': None}]
super().__init__(
dim_obs=dim_obs, dim_latent=dim_latent, nn_params=nn_params,
linear_predictors=linear_predictors, noise_dist=noise_dist,
post_z_samples=post_z_samples, num_time_pts=num_time_pts,
gen_params=gen_params)
def get_params(self, sess):
"""Get parameters of generative model"""
param_dict = super().get_params(sess)
param_dict['C'] = []
param_dict['d'] = []
for pop, pop_dim in enumerate(self.dim_obs):
layer_weights = sess.run(self.networks[pop].layers[0].weights)
param_dict['C'].append(layer_weights[0])
param_dict['d'].append(layer_weights[1])
return param_dict
class FLDS(NetFLDS):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
E[y_t] ~ f(z_t)
"""
def __init__(
self, dim_obs=None, dim_latent=None, dim_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
nn_params=None, post_z_samples=None, train_A = True, train_Q0 = True,
num_clusters = None, **kwargs):
"""
Args:
dim_obs (int): observation dimension
dim_latent (int): latent dimension
dim_predictors (list): dimension for each set of linear predictors
num_time_pts (int): number of time points per observation of the
dynamical sequence
gen_params (dict): dictionary of generative params for initializing
model
noise_dist (str): 'gaussian' | 'poisson'
nn_params (list): dictionaries for building each layer of the
mapping from the latent space to observations; the same
network architecture is used for each population
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
if dim_predictors is not None:
linear_predictors = {
'dim_predictors': dim_predictors,
'predictor_indx': [range(len(dim_predictors))]}
if 'predictor_params' in kwargs:
linear_predictors['predictor_params'] = \
[kwargs['predictor_params']]
else:
linear_predictors = None
super().__init__(
dim_obs=[dim_obs], dim_latent=[dim_latent],
linear_predictors=linear_predictors,
post_z_samples=post_z_samples, num_time_pts=num_time_pts,
gen_params=gen_params, nn_params=nn_params, noise_dist=noise_dist,
train_A = train_A, train_Q0 = train_Q0, num_clusters = num_clusters)
def sample(self, sess, num_samples=1, seed=None, linear_predictors=None, mark_probs = None):
y, z = super().sample(sess, num_samples, seed, linear_predictors, mark_probs)
return y[0], z
class LDS(NetFLDS):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
y_t ~ N(C z_t + d, R)
"""
def __init__(
self, dim_obs=None, dim_latent=None, dim_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
post_z_samples=None, **kwargs):
"""
Args:
dim_obs (int): observation dimension
dim_latent (int): latent dimension
dim_predictors (list): dimension for each set of linear predictors
num_time_pts (int): number of time points per observation of the
dynamical sequence
gen_params (dict): dictionary of generative params for initializing
model
noise_dist (str): 'gaussian' | 'poisson'
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
if gen_params is None:
gen_params = {}
# emissions matrix
if 'C' in gen_params:
kernel_initializer = tf.constant_initializer(
gen_params['C'], dtype=self.dtype)
else:
kernel_initializer = 'trunc_normal'
# biases
if 'd' in gen_params:
bias_initializer = tf.constant_initializer(
gen_params['d'], dtype=self.dtype)
else:
bias_initializer = 'zeros'
# list of dicts specifying (linear) nn to observations
nn_params = [{
'units': dim_obs,
'activation': 'linear',
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': None,
'bias_regularizer': None}]
if dim_predictors is not None:
linear_predictors = {
'dim_predictors': dim_predictors,
'predictor_indx': [range(len(dim_predictors))]}
if 'predictor_params' in kwargs:
linear_predictors['predictor_params'] = \
[kwargs['predictor_params']]
else:
linear_predictors = None
super().__init__(
dim_obs=[dim_obs], dim_latent=[dim_latent],
linear_predictors=linear_predictors,
post_z_samples=post_z_samples, num_time_pts=num_time_pts,
gen_params=gen_params, nn_params=nn_params, noise_dist=noise_dist)
def sample(self, sess, num_samples=1, seed=None, linear_predictors=None):
y, z = super().sample(sess, num_samples, seed, linear_predictors)
return y[0], z
def get_params(self, sess):
"""Get parameters of generative model"""
param_dict = super().get_params(sess)
layer_weights = sess.run(self.networks[0].layers[0].weights)
param_dict['C'] = layer_weights[0]
param_dict['d'] = layer_weights[1]
return param_dict
| [
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"tensorflow.scan",
"tensorflow.set_random_seed",
"tensorflow.log",
"numpy.arange",
"tensorflow.eye",
"tensorflow.tensordot",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.square",
... | [((12317, 12389), 'tensorflow.initializers.truncated_normal', 'tf.initializers.truncated_normal', ([], {'mean': '(0.0)', 'stddev': '(0.1)', 'dtype': 'self.dtype'}), '(mean=0.0, stddev=0.1, dtype=self.dtype)\n', (12349, 12389), True, 'import tensorflow as tf\n'), ((12431, 12470), 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {'dtype': 'self.dtype'}), '(dtype=self.dtype)\n', (12452, 12470), True, 'import tensorflow as tf\n'), ((15019, 15055), 'tensorflow.matrix_inverse', 'tf.matrix_inverse', (['Q0'], {'name': '"""Q0_inv"""'}), "(Q0, name='Q0_inv')\n", (15036, 15055), True, 'import tensorflow as tf\n'), ((15072, 15106), 'tensorflow.matrix_inverse', 'tf.matrix_inverse', (['Q'], {'name': '"""Q_inv"""'}), "(Q, name='Q_inv')\n", (15089, 15106), True, 'import tensorflow as tf\n'), ((16492, 16557), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': 'None', 'name': '"""num_samples_ph"""'}), "(dtype=tf.int32, shape=None, name='num_samples_ph')\n", (16506, 16557), True, 'import tensorflow as tf\n'), ((17475, 17539), 'tensorflow.transpose', 'tf.transpose', (['self.latent_rand_samples[:, 1:, :]'], {'perm': '[1, 0, 2]'}), '(self.latent_rand_samples[:, 1:, :], perm=[1, 0, 2])\n', (17487, 17539), True, 'import tensorflow as tf\n'), ((17582, 17649), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'lds_update', 'elems': 'rand_ph_shuff', 'initializer': 'z0_samples'}), '(fn=lds_update, elems=rand_ph_shuff, initializer=z0_samples)\n', (17589, 17649), True, 'import tensorflow as tf\n'), ((18099, 18143), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.z_samples_prior'], {'axis': '(0)'}), '(self.z_samples_prior, axis=0)\n', (18113, 18143), True, 'import tensorflow as tf\n'), ((23130, 23182), 'tensorflow.add_n', 'tf.add_n', (['log_density_y'], {'name': '"""log_joint_like_total"""'}), "(log_density_y, name='log_joint_like_total')\n", (23138, 23182), True, 'import tensorflow as tf\n'), ((23840, 23872), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res_z_Q_inv_res_z'], {}), '(res_z_Q_inv_res_z)\n', (23853, 23872), True, 'import tensorflow as tf\n'), ((23895, 23930), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res_z0_Q0_inv_res_z0'], {}), '(res_z0_Q0_inv_res_z0)\n', (23908, 23930), True, 'import tensorflow as tf\n'), ((23939, 23994), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""log_joint_prior"""', '(-0.5 * test_prior)'], {}), "('log_joint_prior', -0.5 * test_prior)\n", (23956, 23994), True, 'import tensorflow as tf\n'), ((24003, 24060), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""log_joint_prior0"""', '(-0.5 * test_prior0)'], {}), "('log_joint_prior0', -0.5 * test_prior0)\n", (24020, 24060), True, 'import tensorflow as tf\n'), ((8671, 8709), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""linear_predictors"""'], {}), "('linear_predictors')\n", (8688, 8709), True, 'import tensorflow as tf\n'), ((12133, 12172), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative_samples"""'], {}), "('generative_samples')\n", (12150, 12172), True, 'import tensorflow as tf\n'), ((12574, 12663), 'tensorflow.get_variable', 'tf.get_variable', (['"""z0_mean"""'], {'initializer': "self.gen_params['z0_mean']", 'dtype': 'self.dtype'}), "('z0_mean', initializer=self.gen_params['z0_mean'], dtype=\n self.dtype)\n", (12589, 12663), True, 'import tensorflow as tf\n'), ((13019, 13119), 'tensorflow.get_variable', 'tf.get_variable', (['"""A"""'], {'initializer': "self.gen_params['A']", 'dtype': 'self.dtype', 'trainable': 'self.train_A'}), "('A', initializer=self.gen_params['A'], dtype=self.dtype,\n trainable=self.train_A)\n", (13034, 13119), True, 'import tensorflow as tf\n'), ((13586, 13673), 'tensorflow.get_variable', 'tf.get_variable', (['"""Q_sqrt"""'], {'initializer': "self.gen_params['Q_sqrt']", 'dtype': 'self.dtype'}), "('Q_sqrt', initializer=self.gen_params['Q_sqrt'], dtype=self\n .dtype)\n", (13601, 13673), True, 'import tensorflow as tf\n'), ((14093, 14207), 'tensorflow.get_variable', 'tf.get_variable', (['"""Q0_sqrt"""'], {'initializer': "self.gen_params['Q0_sqrt']", 'dtype': 'self.dtype', 'trainable': 'self.train_Q0'}), "('Q0_sqrt', initializer=self.gen_params['Q0_sqrt'], dtype=\n self.dtype, trainable=self.train_Q0)\n", (14108, 14207), True, 'import tensorflow as tf\n'), ((14865, 14921), 'tensorflow.matmul', 'tf.matmul', (['Q0_sqrt', 'Q0_sqrt'], {'transpose_b': '(True)', 'name': '"""Q0"""'}), "(Q0_sqrt, Q0_sqrt, transpose_b=True, name='Q0')\n", (14874, 14921), True, 'import tensorflow as tf\n'), ((14941, 14994), 'tensorflow.matmul', 'tf.matmul', (['Q_sqrt', 'Q_sqrt'], {'transpose_b': '(True)', 'name': '"""Q"""'}), "(Q_sqrt, Q_sqrt, transpose_b=True, name='Q')\n", (14950, 14994), True, 'import tensorflow as tf\n'), ((15438, 15510), 'tensorflow.initializers.truncated_normal', 'tf.initializers.truncated_normal', ([], {'mean': '(0.0)', 'stddev': '(0.1)', 'dtype': 'self.dtype'}), '(mean=0.0, stddev=0.1, dtype=self.dtype)\n', (15470, 15510), True, 'import tensorflow as tf\n'), ((15560, 15599), 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {'dtype': 'self.dtype'}), '(dtype=self.dtype)\n', (15581, 15599), True, 'import tensorflow as tf\n'), ((20937, 20968), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""likelihood"""'], {}), "('likelihood')\n", (20954, 20968), True, 'import tensorflow as tf\n'), ((21065, 21091), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior"""'], {}), "('prior')\n", (21082, 21091), True, 'import tensorflow as tf\n'), ((24980, 25004), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (24998, 25004), True, 'import tensorflow as tf\n'), ((33727, 33785), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["gen_params['C']"], {'dtype': 'self.dtype'}), "(gen_params['C'], dtype=self.dtype)\n", (33750, 33785), True, 'import tensorflow as tf\n'), ((33944, 34002), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["gen_params['d']"], {'dtype': 'self.dtype'}), "(gen_params['d'], dtype=self.dtype)\n", (33967, 34002), True, 'import tensorflow as tf\n'), ((16201, 16238), 'tensorflow.square', 'tf.square', (['self.R_sqrt[pop]'], {'name': '"""R"""'}), "(self.R_sqrt[pop], name='R')\n", (16210, 16238), True, 'import tensorflow as tf\n'), ((16270, 16319), 'tensorflow.divide', 'tf.divide', (['(1.0)', '(self.R[pop] + 1e-06)'], {'name': '"""R_inv"""'}), "(1.0, self.R[pop] + 1e-06, name='R_inv')\n", (16279, 16319), True, 'import tensorflow as tf\n'), ((17365, 17391), 'tensorflow.transpose', 'tf.transpose', (['self.Q0_sqrt'], {}), '(self.Q0_sqrt)\n', (17377, 17391), True, 'import tensorflow as tf\n'), ((17814, 17848), 'tensorflow.expand_dims', 'tf.expand_dims', (['z0_samples'], {'axis': '(1)'}), '(z0_samples, axis=1)\n', (17828, 17848), True, 'import tensorflow as tf\n'), ((17863, 17902), 'tensorflow.transpose', 'tf.transpose', (['z_samples'], {'perm': '[1, 0, 2]'}), '(z_samples, perm=[1, 0, 2])\n', (17875, 17902), True, 'import tensorflow as tf\n'), ((18641, 18679), 'tensorflow.expand_dims', 'tf.expand_dims', (['y_means_ls[-1]'], {'axis': '(2)'}), '(y_means_ls[-1], axis=2)\n', (18655, 18679), True, 'import tensorflow as tf\n'), ((21351, 21393), 'tensorflow.variable_scope', 'tf.variable_scope', (["('population_%02i' % pop)"], {}), "('population_%02i' % pop)\n", (21368, 21393), True, 'import tensorflow as tf\n'), ((23370, 23390), 'tensorflow.transpose', 'tf.transpose', (['self.A'], {}), '(self.A)\n', (23382, 23390), True, 'import tensorflow as tf\n'), ((23532, 23580), 'tensorflow.tensordot', 'tf.tensordot', (['res_z', 'self.Q_inv'], {'axes': '[[3], [0]]'}), '(res_z, self.Q_inv, axes=[[3], [0]])\n', (23544, 23580), True, 'import tensorflow as tf\n'), ((23686, 23736), 'tensorflow.tensordot', 'tf.tensordot', (['res_z0', 'self.Q0_inv'], {'axes': '[[2], [0]]'}), '(res_z0, self.Q0_inv, axes=[[2], [0]])\n', (23698, 23736), True, 'import tensorflow as tf\n'), ((26065, 26082), 'numpy.square', 'np.square', (['R_sqrt'], {}), '(R_sqrt)\n', (26074, 26082), True, 'import numpy as np\n'), ((28850, 28913), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["gen_params['C'][pop]"], {'dtype': 'self.dtype'}), "(gen_params['C'][pop], dtype=self.dtype)\n", (28873, 28913), True, 'import tensorflow as tf\n'), ((29096, 29159), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["gen_params['d'][pop]"], {'dtype': 'self.dtype'}), "(gen_params['d'][pop], dtype=self.dtype)\n", (29119, 29159), True, 'import tensorflow as tf\n'), ((4966, 5024), 'netlds.network.Network', 'Network', ([], {'output_dim': 'self.num_clusters', 'nn_params': 'nn_params'}), '(output_dim=self.num_clusters, nn_params=nn_params)\n', (4973, 5024), False, 'from netlds.network import Network\n'), ((5288, 5336), 'netlds.network.Network', 'Network', ([], {'output_dim': 'pop_dim', 'nn_params': 'nn_params'}), '(output_dim=pop_dim, nn_params=nn_params)\n', (5295, 5336), False, 'from netlds.network import Network\n'), ((7291, 7341), 'netlds.network.Network', 'Network', ([], {'output_dim': 'pop_dim', 'nn_params': 'pred_params'}), '(output_dim=pop_dim, nn_params=pred_params)\n', (7298, 7341), False, 'from netlds.network import Network\n'), ((9420, 9461), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""latent_space_mapping"""'], {}), "('latent_space_mapping')\n", (9437, 9461), True, 'import tensorflow as tf\n'), ((10590, 10628), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""regressor_mapping"""'], {}), "('regressor_mapping')\n", (10607, 10628), True, 'import tensorflow as tf\n'), ((15783, 15875), 'tensorflow.get_variable', 'tf.get_variable', (['"""R_sqrt"""'], {'initializer': "self.gen_params['R_sqrt'][pop]", 'dtype': 'self.dtype'}), "('R_sqrt', initializer=self.gen_params['R_sqrt'][pop], dtype\n =self.dtype)\n", (15798, 15875), True, 'import tensorflow as tf\n'), ((15986, 16097), 'tensorflow.get_variable', 'tf.get_variable', (['"""R_sqrt"""'], {'shape': '[1, self.dim_obs[pop]]', 'initializer': 'tr_norm_initializer', 'dtype': 'self.dtype'}), "('R_sqrt', shape=[1, self.dim_obs[pop]], initializer=\n tr_norm_initializer, dtype=self.dtype)\n", (16001, 16097), True, 'import tensorflow as tf\n'), ((17084, 17104), 'tensorflow.transpose', 'tf.transpose', (['self.A'], {}), '(self.A)\n', (17096, 17104), True, 'import tensorflow as tf\n'), ((17144, 17169), 'tensorflow.transpose', 'tf.transpose', (['self.Q_sqrt'], {}), '(self.Q_sqrt)\n', (17156, 17169), True, 'import tensorflow as tf\n'), ((18726, 18755), 'tensorflow.matmul', 'tf.matmul', (['F', 'self.mark_probs'], {}), '(F, self.mark_probs)\n', (18735, 18755), True, 'import tensorflow as tf\n'), ((21914, 21946), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res_y_R_inv_res_y'], {}), '(res_y_R_inv_res_y)\n', (21927, 21946), True, 'import tensorflow as tf\n'), ((21967, 22020), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""log_joint_like"""', '(-0.5 * test_like)'], {}), "('log_joint_like', -0.5 * test_like)\n", (21984, 22020), True, 'import tensorflow as tf\n'), ((24338, 24357), 'tensorflow.log', 'tf.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (24344, 24357), True, 'import tensorflow as tf\n'), ((8908, 9022), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'self.dtype', 'shape': '[None, self.num_time_pts, dim_pred]', 'name': "('linear_pred_ph_%02i' % pred)"}), "(dtype=self.dtype, shape=[None, self.num_time_pts, dim_pred],\n name='linear_pred_ph_%02i' % pred)\n", (8922, 9022), True, 'import tensorflow as tf\n'), ((9645, 9696), 'numpy.arange', 'np.arange', (['indx_start', '(indx_end + 1)'], {'dtype': 'np.int32'}), '(indx_start, indx_end + 1, dtype=np.int32)\n', (9654, 9696), True, 'import numpy as np\n'), ((10025, 10087), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'self.dtype', 'shape': 'shp', 'name': '"""mark_probs"""'}), "(dtype=self.dtype, shape=shp, name='mark_probs')\n", (10039, 10087), True, 'import tensorflow as tf\n'), ((10237, 10283), 'tensorflow.transpose', 'tf.transpose', (['self.y_pred_ls[-1]', '(0, 2, 1, 3)'], {}), '(self.y_pred_ls[-1], (0, 2, 1, 3))\n', (10249, 10283), True, 'import tensorflow as tf\n'), ((19394, 19418), 'tensorflow.add_n', 'tf.add_n', (['y_means_lp[-1]'], {}), '(y_means_lp[-1])\n', (19402, 19418), True, 'import tensorflow as tf\n'), ((19988, 20040), 'tensorflow.multiply', 'tf.multiply', (['obs_rand_samples[pop]', 'self.R_sqrt[pop]'], {}), '(obs_rand_samples[pop], self.R_sqrt[pop])\n', (19999, 20040), True, 'import tensorflow as tf\n'), ((21536, 21566), 'tensorflow.expand_dims', 'tf.expand_dims', (['y[pop]'], {'axis': '(1)'}), '(y[pop], axis=1)\n', (21550, 21566), True, 'import tensorflow as tf\n'), ((22465, 22495), 'tensorflow.expand_dims', 'tf.expand_dims', (['y[pop]'], {'axis': '(1)'}), '(y[pop], axis=1)\n', (22479, 22495), True, 'import tensorflow as tf\n'), ((22999, 23053), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""log_joint_like"""', 'log_density_y[-1]'], {}), "('log_joint_like', log_density_y[-1])\n", (23016, 23053), True, 'import tensorflow as tf\n'), ((24249, 24279), 'tensorflow.matrix_determinant', 'tf.matrix_determinant', (['self.Q0'], {}), '(self.Q0)\n', (24270, 24279), True, 'import tensorflow as tf\n'), ((10342, 10371), 'tensorflow.matmul', 'tf.matmul', (['F', 'self.mark_probs'], {}), '(F, self.mark_probs)\n', (10351, 10371), True, 'import tensorflow as tf\n'), ((11868, 11896), 'tensorflow.add_n', 'tf.add_n', (['self.y_pred_lp[-1]'], {}), '(self.y_pred_lp[-1])\n', (11876, 11896), True, 'import tensorflow as tf\n'), ((20219, 20283), 'tensorflow.random_poisson', 'tf.random_poisson', ([], {'lam': 'y_means[pop]', 'shape': '[1]', 'dtype': 'self.dtype'}), '(lam=y_means[pop], shape=[1], dtype=self.dtype)\n', (20236, 20283), True, 'import tensorflow as tf\n'), ((21745, 21761), 'tensorflow.square', 'tf.square', (['res_y'], {}), '(res_y)\n', (21754, 21761), True, 'import tensorflow as tf\n'), ((22948, 22977), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_density_ya'], {}), '(log_density_ya)\n', (22961, 22977), True, 'import tensorflow as tf\n'), ((6759, 6774), 'tensorflow.eye', 'tf.eye', (['pop_dim'], {}), '(pop_dim)\n', (6765, 6774), True, 'import tensorflow as tf\n'), ((22779, 22804), 'tensorflow.lgamma', 'tf.lgamma', (['(1 + obs_y[pop])'], {}), '(1 + obs_y[pop])\n', (22788, 22804), True, 'import tensorflow as tf\n'), ((24197, 24226), 'tensorflow.matrix_determinant', 'tf.matrix_determinant', (['self.Q'], {}), '(self.Q)\n', (24218, 24226), True, 'import tensorflow as tf\n'), ((11502, 11533), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_out'], {'axis': '(1)'}), '(net_out, axis=1)\n', (11516, 11533), True, 'import tensorflow as tf\n'), ((22301, 22320), 'tensorflow.log', 'tf.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (22307, 22320), True, 'import tensorflow as tf\n'), ((22684, 22708), 'tensorflow.log', 'tf.log', (['self.y_pred[pop]'], {}), '(self.y_pred[pop])\n', (22690, 22708), True, 'import tensorflow as tf\n'), ((22223, 22242), 'tensorflow.log', 'tf.log', (['self.R[pop]'], {}), '(self.R[pop])\n', (22229, 22242), True, 'import tensorflow as tf\n')] |
"""CONTINUOUSLY ADOPTIVE MEAN SHIFT"""
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
cap = cv.VideoCapture('pedestrian.mp4')
ret,frame = cap.read()
x,y,w,h = 950,570,70,60
track = (x,y,w,h)
roi = frame[y:y+h, x:x+w]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
hist_roi = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv.normalize(hist_roi, hist_roi, 0, 255, cv.NORM_MINMAX)
term = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT,10,1)
while True:
ret,frame = cap.read()
if ret == True:
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv], [0], hist_roi, [0,180], 1)
ret,track= cv.CamShift(dst, track, term)
#x,y,w,h = track
#cv.rectangle(frame, (x, y), (x+w, y+h), 255, 3)
pts = cv.boxPoints(ret)
pts = np.int0(pts)
image = cv.polylines(frame, [pts], True, (0,255,0),3)
cv.imshow('img',image)
k = cv.waitKey(10)
if k==ord('s'):
break
else:
break
| [
"cv2.calcHist",
"cv2.normalize",
"cv2.calcBackProject",
"cv2.CamShift",
"cv2.boxPoints",
"cv2.polylines",
"numpy.int0",
"cv2.imshow",
"numpy.array",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.waitKey"
] | [((119, 152), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""pedestrian.mp4"""'], {}), "('pedestrian.mp4')\n", (134, 152), True, 'import cv2 as cv\n'), ((263, 297), 'cv2.cvtColor', 'cv.cvtColor', (['roi', 'cv.COLOR_BGR2HSV'], {}), '(roi, cv.COLOR_BGR2HSV)\n', (274, 297), True, 'import cv2 as cv\n'), ((394, 444), 'cv2.calcHist', 'cv.calcHist', (['[hsv_roi]', '[0]', 'mask', '[180]', '[0, 180]'], {}), '([hsv_roi], [0], mask, [180], [0, 180])\n', (405, 444), True, 'import cv2 as cv\n'), ((446, 502), 'cv2.normalize', 'cv.normalize', (['hist_roi', 'hist_roi', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(hist_roi, hist_roi, 0, 255, cv.NORM_MINMAX)\n', (458, 502), True, 'import cv2 as cv\n'), ((326, 353), 'numpy.array', 'np.array', (['(0.0, 60.0, 32.0)'], {}), '((0.0, 60.0, 32.0))\n', (334, 353), True, 'import numpy as np\n'), ((352, 383), 'numpy.array', 'np.array', (['(180.0, 255.0, 255.0)'], {}), '((180.0, 255.0, 255.0))\n', (360, 383), True, 'import numpy as np\n'), ((647, 683), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2HSV'], {}), '(frame, cv.COLOR_BGR2HSV)\n', (658, 683), True, 'import cv2 as cv\n'), ((699, 752), 'cv2.calcBackProject', 'cv.calcBackProject', (['[hsv]', '[0]', 'hist_roi', '[0, 180]', '(1)'], {}), '([hsv], [0], hist_roi, [0, 180], 1)\n', (717, 752), True, 'import cv2 as cv\n'), ((774, 803), 'cv2.CamShift', 'cv.CamShift', (['dst', 'track', 'term'], {}), '(dst, track, term)\n', (785, 803), True, 'import cv2 as cv\n'), ((905, 922), 'cv2.boxPoints', 'cv.boxPoints', (['ret'], {}), '(ret)\n', (917, 922), True, 'import cv2 as cv\n'), ((938, 950), 'numpy.int0', 'np.int0', (['pts'], {}), '(pts)\n', (945, 950), True, 'import numpy as np\n'), ((968, 1016), 'cv2.polylines', 'cv.polylines', (['frame', '[pts]', '(True)', '(0, 255, 0)', '(3)'], {}), '(frame, [pts], True, (0, 255, 0), 3)\n', (980, 1016), True, 'import cv2 as cv\n'), ((1025, 1048), 'cv2.imshow', 'cv.imshow', (['"""img"""', 'image'], {}), "('img', image)\n", (1034, 1048), True, 'import cv2 as cv\n'), ((1061, 1075), 'cv2.waitKey', 'cv.waitKey', (['(10)'], {}), '(10)\n', (1071, 1075), True, 'import cv2 as cv\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.