repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
myriad | myriad-main/myriad/nlp_solvers/extra_gradient.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
from jax import jit, grad
from tensorboardX import SummaryWriter # for parameter tuning
writer = SummaryWriter()
def extra_gradient(fun, x0, method, constraints, bounds, jac, options):
del method, jac
print("we're trying exgd with steps:", options['maxiter'])
constraint_fun = constraints['fun']
max_iter = options['maxiter'] if 'maxiter' in options else 30_000
eta_x = options['eta_x'] if 'eta_x' in options else 1e-1 # primals
eta_v = options['eta_v'] if 'eta_v' in options else 1e-3 # duals
atol = options['atol'] if 'atol' in options else 1e-6 # convergence tolerance
@jit
def lagrangian(x, lmbda):
return fun(x) + lmbda @ constraint_fun(x)
@jit
# We address bounds by clipping
def step(x, lmbda):
x_bar = jnp.clip(x - eta_x * grad(lagrangian, argnums=0)(x, lmbda),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * grad(lagrangian, argnums=0)(x_bar, lmbda),
a_min=bounds[:, 0], a_max=bounds[:, 1])
lmbda_new = lmbda + eta_v * grad(lagrangian, argnums=1)(x_new, lmbda)
return x_new, lmbda_new
def solve(x, lmbda):
nonlocal eta_x, eta_v # so we can modify them during solve
success = False
x_old = x + 20 # just so we don't terminate immediately
for i in range(max_iter):
if i % 2000 == 0:
# Tensorboard recording here
writer.add_scalar('loss/fx', fun(x), i)
cur_lag = lagrangian(x, lmbda)
writer.add_scalar('lagrangian/lag', cur_lag, i)
for d, hi in enumerate(constraint_fun(x)):
writer.add_scalar('vars/lambda_{}'.format(d), lmbda[d], i)
writer.add_scalar('constraints/hx_{}'.format(d), hi, i)
# Success
if i % 1000 == 0 and jnp.allclose(x_old, x, rtol=0., atol=atol): # tune tolerance according to need
success = True
break
# Decrease step size
if i % 1000 == 0:
eta_x *= 0.999
eta_v *= 0.999
x_old = x
x, lmbda = step(x, lmbda)
if i % 1000 and (jnp.isnan(x).any() or jnp.isnan(lmbda).any()):
print("WE GOT NANS")
print("cur x", x)
print("cur lmbda", lmbda)
raise SystemExit
writer.close()
return x, lmbda, success
lmbda_init = jnp.ones_like(constraint_fun(x0))
x, lmbda, success = solve(x0, lmbda_init)
# writer.export_scalars_to_json("./all_scalars.json")
return {
'x': x,
'v': lmbda,
'fun': fun(x),
'success': success
} | 2,531 | 29.878049 | 106 | py |
myriad | myriad-main/myriad/systems/base.py | # (c) 2021 Nikolaus Howe
from abc import ABC
from dataclasses import dataclass
from typing import Mapping, Optional
import jax.numpy as jnp
from myriad.custom_types import Control, Controls, Cost, DState, Params, State, States
@dataclass
class FiniteHorizonControlSystem(object):
"""
Abstract class describing a finite-horizon control system. Model a problem of the form:
.. math::
\\begin{align}
&\\min_u \\quad &&g_T(x_T,u_T,T) + \\int_0^T g(x,u,t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = f(x,u,t) \\\\
& && x(0)=x_0
\\end{align}
"""
x_0: jnp.ndarray
""" State at time 0"""
x_T: Optional[jnp.ndarray]
"""State at time T"""
T: float
"""Duration of trajectory"""
bounds: jnp.ndarray
"""State and control bounds"""
terminal_cost: bool = False
"""Whether or not there is an additional cost added at the end of the trajectory"""
discrete: bool = False
"""Whether or not the system is discrete"""
# def __post_init__(self):
# self.x_0 = self.x_0.astype(jnp.float64)
# if self.x_T is not None:
# assert self.x_0.shape == self.x_T.shape
# self.x_T = self.x_T.astype(jnp.float64)
# assert self.bounds.shape == (self.x_0.shape[0]+1, 2)
# assert self.T > 0
def dynamics(self, x_t: State, u_t: Control) -> DState:
""" The set of equations defining the dynamics of the system. For continuous system, return the vector fields
of the state variables \\(x\\) under the influence of the controls \\(u\\), i.e.:
$$x'(t) = f(x,u,t)$$
Args:
x_t: (State) -- An array, representing the state variables at various time t
u_t: (Control) -- An array, representing the control variables at various time t
Returns:
dx_t: (DState) -- The derivative value of the state variables, x_t, at corresponding time t
"""
raise NotImplementedError
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control):
"""
Run the system with custom parameters. Override in individual system definition
if you want to use this.
Args:
params: (Params)
x_t: (State)
u_t: (Control)
Returns:
dx_t: (DState)
"""
return self.dynamics(x_t, u_t)
def cost(self, x_t: State, u_t: Control, t: Optional[float]) -> Cost:
""" The instantaneous time function that the system seeks to minimize.
Args:
x_t: (State) -- State variables at time t
u_t: (Control) -- Control variables at time t
t: (float, optional) -- Time parameter
Returns:
cost: (Cost) -- The instantaneous cost \\( g(x_t,u_t,t) \\)
"""
raise NotImplementedError
def parametrized_cost(self, params: Params, x_t: State, u_t: Control, t: Optional[float]):
"""
Run the cost with custom parameters. Override in individual system definition
if you want to use this
Args:
params: (Mapping)
x_t: (State)
u_t: (Control)
t: (optional float)
Returns:
cost: (Cost)
"""
return self.cost(x_t, u_t, t)
# TODO: decide if this should also have a parametrized version
def terminal_cost_fn(self, x_T: State, u_T: Control, T: Optional[float] = None) -> Cost:
""" The cost function associated to the final state
Args:
x_T: (State) -- Final state
u_T: (Control) -- Final control
T: (float) -- The Horizon
Returns:
cost_T: (Cost) -- The terminal cost \\(g_T(x_T,u_T,T\\)
"""
return 0
# def plot_solution(self, x: States, u: Controls) -> None:
# """ The plotting tool for the current system
#
# Args:
# x: State array
# u: Control array
# """
#
# raise NotImplementedError
@dataclass
class IndirectFHCS(FiniteHorizonControlSystem, ABC):
"""
Augment the base class for defining control problem under a finite horizon so that indirect methods can be use.
Model a problem of the form:
.. math::
\\begin{align}
& \\min_u \\quad && g_T(x_T,u_T,T) + \\int_0^T g(x,u,t) dt\\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = f(x,u,t)\\\\
& &&x(0)=x_0
\\end{align}
Taking into account the adjoint dynamics and the optimal characterization given by the Pontryagin's maximum principle
"""
adj_T: Optional[jnp.ndarray] = None
"""Adjoint at time T"""
guess_a: Optional[float] = None
"""Initial lower guess for secant method"""
guess_b: Optional[float] = None
"""Initial upper guess for secant method"""
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
"""
The adjoint dynamics, given by:
$$\\lambda '(t) = -\\frac{\\partial H}{\\partial x}$$
\\( H \\) being the system Hamiltonian
Args:
adj_t: (jnp.ndarray) -- An array, representing the adjoint variables at various time t
x_t: (jnp.ndarray) -- An array, representing the state variables at various time t
u_t: (jnp.ndarray, optional) -- An array, representing the control variables at various time t
t: (jnp.ndarray, optional) -- The time array, for time-dependent systems
Returns:
d_adj_t: (jnp.ndarray) -- The derivative value of the adjoint variables, \\(\\lambda\\), at corresponding time t
"""
raise NotImplementedError
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
"""
The optimality characterization of the controls w/r to the state and adjoint variables. That is, the controls cannot
be optimal if they don't satisfy:
$$\\frac{\\partial H}{\\partial u} = 0 \\; \\mathrm{at} \\; u^*$$
This leads to the following condition, the optimality characterization, on \\(u^*\\) if \\(H\\) is quadratic in
\\(u\\):
$$u^* = h(x,t)$$
Args:
adj_t: (jnp.ndarray) -- An array, representing the adjoint variables at various time t
x_t: (jnp.ndarray, optional) -- An array, representing the state variables at various time t
t: (jnp.ndarray, optional) -- The time array, for time-dependent systems
Returns:
u_star: (jnp.ndarray) -- Control candidates at corresponding time t that meets the above condition
"""
raise NotImplementedError
| 6,308 | 33.47541 | 121 | py |
myriad | myriad-main/myriad/systems/neural_ode/node_system.py | # (c) 2021 Nikolaus Howe
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from myriad.neural_ode.create_node import NeuralODE
import jax.numpy as jnp
from myriad.systems.base import FiniteHorizonControlSystem
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
class NodeSystem(FiniteHorizonControlSystem):
def __init__(self, node: NeuralODE, true_system: FiniteHorizonControlSystem) -> None:
"""
A generic system with NODE dynamics
"""
self.node = node
self.true_system = true_system
super().__init__(
x_0=true_system.x_0,
x_T=true_system.x_T,
T=true_system.T,
bounds=true_system.bounds,
terminal_cost=true_system.terminal_cost
)
# NOTE: for now, only the dynamics is learnable (not the cost)
# True dynamics
def dynamics(self, x_t: State, u_t: Control, t: Timestep = None) -> DState:
return self.true_system.dynamics(x_t, u_t) # TODO: we really should make the dynamics accept a t
# Neural ODE dynamics
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control, t: Timestep = None) -> DState:
x_and_u = jnp.append(x_t, u_t)
return self.node.net.apply(params, x_and_u)
# True cost
def cost(self, x_t: State, u_t: Control, t: Timestep = None) -> Cost:
return self.true_system.cost(x_t, u_t, t)
| 1,363 | 30.72093 | 106 | py |
myriad | myriad-main/myriad/systems/miscellaneous/tumour.py | import jax.numpy as jnp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import FiniteHorizonControlSystem
class Tumour(FiniteHorizonControlSystem):
"""
Tumour anti-angiogenesis model, from [Practical Methods for Optimal Control Using Nonlinear Programming (Third Edition, Chapter 10.70)](https://my.siam.org/Store/Product/viewproduct/?ProductId=31657301).
More details can be found in [Ledzewicz and Schattler](https://www.siue.edu/~uledzew/papers/angioMTNS.pdf)
The model describes the growth of a tumor that needs its own blood vessels to continue to grow. Endothelial cells
provide lining for newly forming blood vessels and as such, can be inhibited to reduce the tumor growth. The model can
be described as:
.. math::
\\begin{align}
& \\min_{u} \\quad && p(T) \\\\
& \\; \\mathrm{s.t.}\\quad && p'(t) = -\\xi p \\ln(\\frac{p}{q}) \\\\
& && q'(t) = bp - (\\mu + dp^{\\frac{2}{3}}) q - G u q \\\\
& && y'(t) = u \\\\
& && p(0) = p_0 ,\\; q(0) = q_0 ,\\; y(0) = 0 \\\\
& && 0 <= p(t) ,\\; 0 <= q(t) ,\\; 0 <= y(t) <= A ,\\; 0 <= u(t) <= a \\\\
\\end{align}
Notes
-----
\\(p(t)\\): The size of the tumor \n
\\(q(t)\\): The amount of vascular endothelial cells \n
\\(u(t)\\): Angionesic dose rate; an external inhibitor decreasing \\(q(t)\\) \n
\\(y(t)\\) : A measure of the total amount of external inhibitor used \n
\\(a\\): Instantaneous limit over the inhibitor that can be administered \n
\\(A\\): Total limit over the inhibitor that can be administered \n
\\(\\xi\\): Tumor growth parameter \n
\\(\\mu\\): Loss rate of endothelial cells from natural causes \n
\\(b\\): Birth rate of endothelial cells from stimulation by the tumor \n
\\(d\\): Death rate of endothelial cells from inhibition by the tumor
"""
def __init__(self, xi=0.084, b=5.85, d=0.00873, G=0.15, mu=0.02):
# Learnable parameters
self.xi = xi # per day (tumour growth)
self.b = b # per day (birth rate)
self.d = d # per mm^2 per day (death rate)
self.G = G # kg per mg of dose per day (antiangiogenic killing)
self.mu = mu # per day (loss of endothelial cells due to natural causes)
t_F = 1.2 # days
# State and Control Bounds
a = 75 # maximum instantaneous dosage
A = 15 # maximum cumulative dosage
p_ = q_ = ((self.b - self.mu) / self.d) ** (3 / 2) # asymptotically stable focus
# Initial State
p_0 = p_ / 2 # Initial tumour volume
q_0 = q_ / 4 # Initial vascular capacity
y_0 = 0 # Initial cumulative dosage
assert p_0 >= q_0 # condition for well-posed problem
super().__init__(
x_0=jnp.array([p_0, q_0, y_0]),
x_T=None,
T=t_F,
bounds=jnp.array([
[0., p_], # p
[0., q_], # q
[0., A], # y
[0., a], # control
]),
terminal_cost=True,
)
def dynamics(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
p, q, y = x_t
_p = jnp.squeeze(-self.xi * p * jnp.log(p / q))
_q = jnp.squeeze(q * (self.b - (self.mu + self.d * p ** (2 / 3) + self.G * u_t)))
_y = jnp.squeeze(u_t)
return jnp.asarray([_p, _q, _y])
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
xi = params['xi']
b = params['b']
d = params['d']
mu = params['mu']
G = params['G']
p, q, y = x_t
_p = jnp.squeeze(-xi * p * jnp.log(p / q))
_q = jnp.squeeze(q * (b - (mu + d * p ** (2 / 3) + G * u_t)))
_y = jnp.squeeze(u_t)
return jnp.asarray([_p, _q, _y])
def cost(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
# nh: I think this should be changed to u^2, otherwise there
# is no penalty for oscillating in u
# return u_t * u_t
return 0.
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return 0. # nothing to learn here
def terminal_cost_fn(self, x_T: jnp.ndarray, u_T: jnp.ndarray, T: jnp.ndarray = None) -> float:
p, q, y = x_T
return p
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# colnames = ['p', 'q', 'y']
# x = pd.DataFrame(x, columns=colnames)
#
# sns.set(style='darkgrid')
# plt.figure(figsize=(10, 3))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# for idx, title in enumerate(colnames):
# plt.subplot(1, 4, idx+1)
# plt.title(title)
# plt.plot(ts_x, x[title])
# plt.xlabel('time (days)')
#
# plt.subplot(1, 4, 4)
# plt.title('u')
# plt.step(ts_u, u, where="post")
# plt.xlabel('time (days)')
#
# plt.tight_layout()
# plt.show()
| 4,802 | 35.386364 | 205 | py |
myriad | myriad-main/myriad/systems/miscellaneous/seir.py | import jax.numpy as jnp
from myriad.systems import FiniteHorizonControlSystem
class SEIR(FiniteHorizonControlSystem):
"""
SEIR epidemic model for COVID-19, inspired by [Perkins and Espana, 2020](https://link.springer.com/article/10.1007/s11538-020-00795-y).
This model is an adaptation of SEIR models, specifically tailored to COVID-19 epidemic trying to limit the spread
via non-pharmaceutical interventions (example: reducing contacts between individuals). As such, the control variable
( \\(u(t)\\) ) is a reduction in the transmission coefficient ( \\( \\beta\\) ) resulting from all societal measures
that allow to control the virus spread. The goal of the model is to help decision-maker quantify the impact of
policies limiting the spread.
The formal model is given by:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T D(t)^2 + cu(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && S'(t) = \\mu - (\\delta + \\beta(1-u(t))(\\alpha A(t) + I(t) + H(t))
+ \\iota + \\nu)S(t) \\\\
& && E'(t) = ( \\beta(1-u(t))(\\alpha A(t) + I(t) + H(t))) (S(t) + (1-\\epsilon)V(t)) + \\iota S(t)
- (\\delta + \\rho) E(t) \\\\
& && A'(t) = (1-\\sigma)\\rho E(t) - (\\delta + \\gamma) A(t) \\\\
& && I'(t) = \\sigma \\rho E(t) - (\\delta + \\gamma)I(t) \\\\
& && H'(t) = \\gamma \\kappa I(t) - (\\delta + \\eta) H(t) \\\\
& && V'(t) = \\nu S(t) - (\\delta + \\beta(1 -u(t))(\\alpha A(t) + I(t) + H(t)) (1-\\epsilon)) V(t) \\\\
& && S(0) = S_0 ,\\; E(0) = E_0 ,\\; A(0) = A_0 ,\\; I(0) = I_0 ,\\; H(0) = H_0 ,\\; V(0)=V_0 \\\\
\\end{align}
Notes
-----
\\(D(t)\\): Population death from covid-19, estimated as a ratio of hospitalized population \\(H(t)\\) \n
\\(u(t)\\): Cumulative impact of societal measures (reduction) on the transmission coefficient \n
\\(c\\) : Parameter weighting the cost of societal measures relative to the death toll \n
\\(S(t)\\): Population susceptible to infection \n
\\(E(t)\\): Exposed population but not yet infectious \n
\\(A(t)\\): Infected population but asymptomatic \n
\\(I(t)\\): Infected population and symptomatic \n
\\(H(t)\\): Hospitalized population \n
\\(V(t)\\): Vaccinated population that has not been infected \n
Other constants: See table 2 page 4 of [Perkins and Espana, 2020](https://link.springer.com/content/pdf/10.1007/s11538-020-00795-y.pdf)
"""
def __init__(self):
self.b = 0.525
self.d = 0.5
self.c = 0.0001
self.e = 0.5
self.g = 0.1
self.a = 0.2
self.S_0 = 1000.0
self.E_0 = 100.0
self.I_0 = 50.0
self.R_0 = 15.0
self.N_0 = self.S_0 + self.E_0 + self.I_0 + self.R_0
self.A = 0.1
self.M = 1000
super().__init__(
x_0=jnp.array([self.S_0, self.E_0, self.I_0, self.N_0]),
x_T=None,
T=20,
bounds=jnp.array([
# [-jnp.inf, jnp.inf],
# [-jnp.inf, jnp.inf],
# [-jnp.inf, jnp.inf],
# [-jnp.inf, jnp.inf],
[0., 2000.], # Chosen by observation
[0., 250.], # "
[0., 250.], # "
[0., 3000.], # "
[0., 1.],
]),
terminal_cost=False,
)
def dynamics(self, y_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
S, E, I, N = y_t
s_dot = jnp.squeeze(self.b*N - self.d*S - self.c*S*I - u_t*S)
e_dot = jnp.squeeze(self.c*S*I - (self.e+self.d)*E)
i_dot = jnp.squeeze(self.e*E - (self.g+self.a+self.d)*I)
n_dot = jnp.squeeze((self.b-self.d)*N - self.a*I)
y_t_dot = jnp.array([s_dot, e_dot, i_dot, n_dot])
return y_t_dot
def cost(self, y_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return self.A * y_t[2] + u_t ** 2
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# sns.set()
# plt.figure(figsize=(12, 2.5))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(151)
# plt.title('applied control')
# plt.plot(ts_u, u)
# plt.ylim(-0.1, 1.01)
#
# for idx, title in enumerate(['S', 'E', 'I', 'N']):
# plt.subplot(1, 5, idx+2)
# plt.title(title)
# plt.plot(ts_x, x[:, idx])
#
# plt.tight_layout()
# plt.show()
| 4,207 | 35.591304 | 137 | py |
myriad | myriad-main/myriad/systems/miscellaneous/rocket_landing.py | # (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
from typing import Optional
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
from myriad.systems import FiniteHorizonControlSystem
class RocketLanding(FiniteHorizonControlSystem):
"""
Simulate a starship landing! Inspired by Thomas Godden's [medium post](https://thomas-godden.medium.com/how-spacex-lands-starship-sort-of-ee96cdde650b).
This environment models a rocket trying to land vertically on a flat surface, in a similar fashion to how [SpaceX are
landing their reusable rockets](https://youtu.be/Aq7rDQx9jns?t=20). Usually, the rocket is free-falling
from an initial horizontal position and must uses its thruster ( \\(u_0(t), u_1(t)\\) ) to both rotate the craft and
slow down the fall. The goal is to achieve the desired end state while minimizing the fuel usage (minimizing thrust)
and the angular velocity in order to limit the strain on the vehicle.
A simplified version of this task form can be modeled as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T u_0(t)^2 + u_1(t)^2 + \\phi'(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0''(t) = \\frac{F_v * u_0(t) * \\sin(u_1(t) + \\phi)}{m} \\\\
& && x_1''(t) = \\frac{F_v * u_0(t) * \\cos(u_1(t) + \\phi)}{m} - g \\\\
& && \\phi''(t) = \\frac{-6}{F_v * u_0(t) * \\sin(u_1(t)) * m * l} \\\\
& && x_0(0) = x_0'(0) = 0 ,\\; x_1(0) = h_i ,\\; x_1'(0) = v_i ,\\; \\phi(0) = -\\pi/2 ,\\; \\phi'(0)=0\\\\
& && x_0(T) = x_0'(T) = x_1(T) = x_1'(T) = \\phi(T) = \\phi'(T) = 0 \\\\
& && -1 <= u_0(t) <= 1 \\\\
& && -F_g <= u_1(t) <= F_g
\\end{align}
Notes
-----
\\(x_0\\): Horizontal position of the rocket \n
\\(x_0'\\): Horizontal velocity of the rocket \n
\\(x_1\\): Height of the rocket \n
\\(x_1'\\): Falling velocity of the rocket \n
\\(\\phi\\): Angle of the rocket \n
\\(\\phi'\\): Angular velocity of the rocket \n
\\(u_0\\): The vertical thrust, as a ratio of the maximal thrust \\(F_v\\) \n
\\(u_1\\): The [gimbaled thrust](https://en.wikipedia.org/wiki/Gimbaled_thrust) \n
\\(F_v\\): Maximal thrust \n
\\(F_g\\): Maximal gimbaled thrust \n
\\(g\\): Gravity force \n
\\(m\\): Total mass of the rocket \n
\\(l\\): Length of the rocket \n
\\(h_i, v_i\\): Initial height and falling speed \n
\\(T\\): The horizon
"""
# TODO: think about this http://larsblackmore.com/losslessconvexification.htm
def __init__(self, g: float = 9.8, m: float = 100_000, length: float = 50, width: float = 10) -> None:
self.g = g # m/s^2
self.m = m # kg
self.length = length # m
self.width = width # m
self.min_thrust = 880 * 1000 # N
self.max_thrust = 1 * 2210 * 1000 # kN
# Inertia for a uniform density rod
self.I = 1 / 12 * m * length ** 2
deg_to_rad = 0.01745329
self.max_gimble = 20 * deg_to_rad
self.min_gimble = -self.max_gimble
self.min_percent_thrust = 0.4
self.max_percent_thrust = 1.
# x[0] = x position (m)
# x[1] = x velocity (m/s)
# x[2] = y position (m)
# x[3] = y velocity (m/s)
# x[4] = angle (rad)
# x[5] = angular velocity (rad/s)
# u[0] = thrust (percent)
# u[1] = thrust angle (rad)
super().__init__(
x_0=jnp.array([0., 0., 1000., -80., -jnp.pi / 2., 0.]),
x_T=jnp.array([0., 0., 0., 0., 0., 0.]),
T=16., # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[-250., 150.], # followed by bounds over controls (u_0, u_1, ...)
[-250., 150.],
[0., 1000.],
[-250., 150.],
[-2 * jnp.pi, 2 * jnp.pi],
[-250., 150.],
[self.min_percent_thrust, self.max_percent_thrust],
[self.min_gimble, self.max_gimble],
]),
terminal_cost=False,
)
def dynamics(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
theta = x_t[4]
thrust = u_t[0]
thrust_angle = u_t[1]
# Horizontal force
F_x = self.max_thrust * thrust * jnp.sin(thrust_angle + theta)
x_dot = x_t[1]
x_dotdot = F_x / self.m
# Vertical force
F_y = self.max_thrust * thrust * jnp.cos(thrust_angle + theta)
y_dot = x_t[3]
y_dotdot = F_y / self.m - self.g
# Torque
T = -self.length / 2 * self.max_thrust * thrust * jnp.sin(thrust_angle)
theta_dot = x_t[5]
theta_dotdot = T / self.I
return jnp.array([x_dot, x_dotdot, y_dot, y_dotdot, theta_dot, theta_dotdot])
def cost(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> Cost:
return u_t[0] ** 2 + u_t[1] ** 2 + 2 * x_t[5] ** 2
| 4,647 | 36.184 | 154 | py |
myriad | myriad-main/myriad/systems/miscellaneous/van_der_pol.py | import jax.numpy as jnp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import FiniteHorizonControlSystem
class VanDerPol(FiniteHorizonControlSystem):
"""
Driven Van der Pol oscillator, from [CasADi](http://casadi.sourceforge.net/v1.8.0/users_guide/html/node8.html).
This model tries to drive a [Van de Pol oscillator](https://arxiv.org/pdf/0803.1658.pdf) to the origin and can
formally described as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^{10} x_0(t)^2 + x_1(t)^2 + u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = a (1 - x_1(t)^2) x_0(t) - x_1(t) + u(t) \\\\
& && x_1'(t) = x_0(t) \\\\
& && x_0(0) = 0 ,\\; x_1(0) = 1 \\\\
& && x_0(10) = x_1(10) = 0 \\\\
& && -0.75 <= u_0(t) <= 1.0 \\\\
\\end{align}
"""
def __init__(self, a=1.):
self.a = a
super().__init__(
x_0=jnp.array([0., 1.]),
x_T=jnp.zeros(2),
T=10.0,
bounds=jnp.array([
# [-jnp.inf, jnp.inf], # state 1
# [-jnp.inf, jnp.inf], # state 2
[-4., 4.], # state 1 (from observation)
[-4., 4.], # state 2 (from observation)
[-0.75, 1.0], # control
]),
terminal_cost=False,
)
def dynamics(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
x0, x1 = x_t
_x0 = jnp.squeeze(self.a * (1. - x1 ** 2) * x0 - x1 + u_t)
_x1 = jnp.squeeze(x0)
return jnp.asarray([_x0, _x1])
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
a = params['a']
x0, x1 = x_t
_x0 = jnp.squeeze(a * (1. - x1 ** 2) * x0 - x1 + u_t)
_x1 = jnp.squeeze(x0)
return jnp.asarray([_x0, _x1])
def cost(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return x_t.T @ x_t + u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return x_t.T @ x_t + u_t ** 2 # nothing to learn here!
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# x = pd.DataFrame(x, columns=['x0', 'x1'])
#
# sns.set(style='darkgrid')
# plt.figure(figsize=(9, 4))
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(1, 2, 1)
# plt.plot(x['x0'], x['x1'])
#
# plt.subplot(1, 2, 2)
# plt.step(ts_u, u, where="post")
# plt.xlabel('time (s)')
#
# plt.tight_layout()
# plt.show()
| 2,496 | 29.82716 | 113 | py |
myriad | myriad-main/myriad/systems/classical_control/cartpole.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
from typing import Optional
from myriad.systems.base import FiniteHorizonControlSystem
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
class CartPole(FiniteHorizonControlSystem):
"""
Cart-pole swing-up, from [(Kelly, 2017)](https://epubs.siam.org/doi/10.1137/16M1062569).
This environment models a cart moving in a unidimensional direction with a pendulum hanging freely from it.
The usually associated task is to move the cart in such a way that the pendulum swings up to the non-stationary
equilibrium point above the cart.
The goal is to find the cart velocity ( \\(q_1'(t)\\) ) that will impede
an angular velocity of the pendulum ( \\(q_2'(t)\\) ) that achieves the task, while minimising the force
( \\(u(t)\\) ) needed to generate such a movement. The system to solve is:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && q_1''(t) = \\frac{l m_2 \\sin(q_2(t)) q_2^2(t)' + u(t) + m_2 g \\cos(q_2(t)) \\sin(q_2(t))}
{m_1 + m_2 (1-\\cos^2 (q_2(t)))}\\\\
& && q_2''(t) = - \\frac{l m_2 \\cos(q_2(t))\\sin(q_2(t)) q_2^2(t) + u(t) \\cos(q_2(t))
+ (m_1 +m_2)g \\sin(q_2(t))}{l m_1 + l m_2 (1 - \\cos^2(q_2(t))} \\\\
& && q_1(0)=q_2(0)=q_1'(0)=q_2'(0)=0 \\\\
& && q_1(T) = d ,\\; q_2(T) = \\pi ,\\; q_1'(T)=q_2'(T)=0 \\\\
& && -d_M <= q_1(t) <= d_M ,\\; -u_M <= u(t) <= u_M
\\end{align}
Notes
-----
\\(q_1\\): Position of the cart \n
\\(q_2\\): Angle of the pole \n
\\(q_1'\\): Velocity of the cart \n
\\(q_2'\\): Angular velocity of the pole \n
\\(m_1\\): Mass of the cart \n
\\(m_2\\): Mass of the pendulum \n
\\(l\\): Length of the pole \n
\\(g\\): Gravity force \n
\\(d_M\\): Maximal distance that can be traveled by the cart \n
\\(u_M\\): Maximal force that can be applied to the motor \n
\\(T\\): The horizon
"""
def __init__(self, g: float = 9.81, m1: float = 1., m2: float = .3, length: float = 0.5):
# Physical parameters for the cart-pole example (Table 3)
self.m1 = m1 # kg mass of cart
self.m2 = m2 # kg mass of pole
self.length = length # m pole length
self.g = g # m/s^2 gravity acceleration
self.u_max = 20 # N maximum actuator force
self.d_max = 2.0 # m extent of the rail that cart travels on
self.d = 1.0 # m distance traveled during swing-up
super().__init__(
x_0=jnp.array([0., 0., 0., 0.]), # Starting state (Eq. 6.9)
x_T=jnp.array([self.d, jnp.pi, 0., 0.]), # Ending state (Eq. 6.9)
T=2.0, # s duration of swing-up,
bounds=jnp.array([
[-self.d_max, self.d_max], # Eq. 6.7
[-2 * jnp.pi, 2 * jnp.pi],
[-5., 5.], # Observed from optimal plot, taken as reasonable
[-10., 10.],
[-self.u_max, self.u_max], # Control bounds (Eq. 6.8)
]),
terminal_cost=False,
)
# Cart-Pole Example: System Dynamics (Section 6.1)
def dynamics(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
x, theta, dx, dtheta = x_t
# Eq. 6.1
ddx = ((self.length * self.m2 * jnp.sin(theta) * dtheta ** 2 + u_t + self.m2 * self.g * jnp.cos(theta) * jnp.sin(theta))
/ (self.m1 + self.m2 * (1 - jnp.cos(theta) ** 2)))
ddx = jnp.squeeze(ddx)
# Eq. 6.2
ddtheta = - ((self.length * self.m2 * jnp.cos(theta) * dtheta ** 2 + u_t * jnp.cos(theta)
+ (self.m1 + self.m2) * self.g * jnp.sin(theta))
/ (self.length * self.m1 + self.length * self.m2 * (1 - jnp.cos(theta) ** 2)))
ddtheta = jnp.squeeze(ddtheta)
return jnp.array([dx, dtheta, ddx, ddtheta])
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
g = jnp.abs(params['g']) # convert negative values to positive ones
m1 = jnp.abs(params['m1'])
m2 = jnp.abs(params['m2'])
length = jnp.abs(params['length'])
x, theta, dx, dtheta = x_t
# Eq. 6.1
ddx = ((length * m2 * jnp.sin(theta) * dtheta ** 2 + u_t + m2 * g * jnp.cos(theta) * jnp.sin(theta))
/ (m1 + m2 * (1 - jnp.cos(theta) ** 2)))
ddx = jnp.squeeze(ddx)
# Eq. 6.2
ddtheta = - ((length * m2 * jnp.cos(theta) * dtheta ** 2 + u_t * jnp.cos(theta)
+ (m1 + m2) * g * jnp.sin(theta))
/ (length * m1 + length * m2 * (1 - jnp.cos(theta) ** 2)))
ddtheta = jnp.squeeze(ddtheta)
return jnp.array([dx, dtheta, ddx, ddtheta])
def cost(self, x_t: State, u_t: Control, t: Timestep = None) -> Cost:
# Eq. 6.3
return u_t ** 2
def parametrized_cost(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep]) -> Cost:
return self.cost(x_t, u_t, t)
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# x = pd.DataFrame(x, columns=['q1', 'q2', 'q̈1', 'q̈2'])
#
# # Plot optimal trajectory (Figure 10)
# sns.set(style='darkgrid')
# plt.figure(figsize=(9, 6))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(3, 1, 1)
# plt.ylabel('position (m)')
# plt.xlim(0, 2.01)
# plt.ylim(0, 1.5)
# plt.plot(ts_x, x['q1'], '-bo', clip_on=False, zorder=10)
#
# plt.subplot(3, 1, 2)
# plt.ylabel('angle (rad)')
# plt.plot(ts_x, x['q2'], '-bo', clip_on=False, zorder=10)
# plt.xlim(0, 2.01)
# plt.ylim(-2, 4)
#
# plt.subplot(3, 1, 3)
# plt.ylabel('force (N)')
# # plt.plot(ts_u, u, '-bo', clip_on=False, zorder=10)
# plt.step(ts_u, u, where="post", clip_on=False)
# plt.xlim(0, 2.01)
# plt.ylim(-20, 11)
#
# plt.xlabel('time (s)')
# plt.tight_layout()
# plt.show()
| 5,799 | 39.277778 | 127 | py |
myriad | myriad-main/myriad/systems/classical_control/mountain_car.py | # (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
from typing import Optional
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
from myriad.systems.base import FiniteHorizonControlSystem
def hill_function(x: float) -> float:
# return jnp.max(jnp.array([-3 * x - jnp.pi, -1/3 * jnp.cos(3 * x), 3 * x]))
return 0.5 * x * x
class MountainCar(FiniteHorizonControlSystem):
"""
Continuous Mountain Car environment, inspired by the [OpenAI gym environment](https://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py).
Model was originally described in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf).
This environment model a unidimensional car located between two hills, while the goal is often to make it to the top
of one of the hill. Usually, this environment is made challenging by limiting the force ( \\(u(t)\\) ) the car can
generate, making it unable to climb directly to the desired steep hill top. In this scenario, the solution
is to first climb the opposite hill in order to generate enough potential energy to make it on top of the desired hill.
The system can formally be described as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = p u(t) - g h'(x) \\\\
& && x(0) = x_i ,\\; x'(0) = v_i \\\\
& && x(T) = x_f ,\\; x'(T) = v_f \\\\
& && -1 <= u(t) <= 1
\\end{align}
Notes
-----
\\(x\\): Position of the car \n
\\(x'\\): Velocity of the car \n
\\(p\\): Maximal power that the car engine can output \n
\\(u\\): The force applied to the car, as a fraction of \\(p\\) \n
\\(g\\): Gravity force \n
\\(h(x)\\): Function describing the hill landscape \n
\\(x_i, v_i\\): Initial position and speed \n
\\(x_f, v_f\\): Goal position and speed \n
\\(T\\): The horizon
"""
def __init__(self, power=0.0015, gravity=0.0025) -> None:
self.min_action = -1.0
self.max_action = 1.0
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.start_position = -0.1
self.start_velocity = 0.
self.goal_position = 0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version
self.goal_velocity = 0
# self.power = 0.0015
# self.gravity = 0.0025
self.power = power
self.gravity = gravity
super().__init__(
# [self.np_random.uniform(low=-0.6, high=-0.4), 0]
x_0=jnp.array([self.start_position, self.start_velocity]), # Starting state: position, velocity
x_T=jnp.array([self.goal_position, self.goal_velocity]), # Ending state
T=300., # s duration (note, this is not in the original problem)
bounds=jnp.array([
[self.min_position, self.max_position], # Position bounds
[-self.max_speed, self.max_speed], # Velocity bounds
[self.min_action, self.max_action], # Control bounds
]),
terminal_cost=False,
)
# def _height(self, xs):
# return jnp.sin(3 * xs) * .45 + .55
def dynamics(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
position, velocity = x_t
force = jnp.clip(u_t, a_min=self.min_action, a_max=self.max_action)
d_position = velocity.squeeze()
d_velocity = (force * self.power - self.gravity * jax.grad(hill_function)(position)).squeeze()
return jnp.array([d_position, d_velocity])
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
position, velocity = x_t
power = params['power']
gravity = params['gravity']
force = jnp.clip(u_t, a_min=self.min_action, a_max=self.max_action)
d_position = velocity.squeeze()
d_velocity = (force * power - gravity * jax.grad(hill_function)(position)).squeeze()
return jnp.array([d_position, d_velocity])
def cost(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> Cost:
return 10. * u_t ** 2
def parametrized_cost(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> Cost:
return self.cost(x_t, u_t, t)
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# x = pd.DataFrame(x, columns=['q1', 'q2', 'q̈1', 'q̈2'])
#
# # Plot optimal trajectory (Figure 10)
# sns.set(style='darkgrid')
# plt.figure(figsize=(9, 6))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(3, 1, 1)
# plt.ylabel('position (m)')
# plt.xlim(0, 2.01)
# plt.ylim(0, 1.5)
# plt.plot(ts_x, x['q1'], '-bo', clip_on=False, zorder=10)
#
# plt.subplot(3, 1, 2)
# plt.ylabel('angle (rad)')
# plt.plot(ts_x, x['q2'], '-bo', clip_on=False, zorder=10)
# plt.xlim(0, 2.01)
# plt.ylim(-2, 4)
#
# plt.subplot(3, 1, 3)
# plt.ylabel('force (N)')
# # plt.plot(ts_u, u, '-bo', clip_on=False, zorder=10)
# plt.step(ts_u, u, where="post", clip_on=False)
# plt.xlim(0, 2.01)
# plt.ylim(-20, 11)
#
# plt.xlabel('time (s)')
# plt.tight_layout()
# plt.show()
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
mc = MountainCar()
y1 = np.linspace(-1.5, 1.5, 20)
y2 = np.linspace(-0.1, 0.1, 20)
Y1, Y2 = np.meshgrid(y1, y2)
t = 0
u, v = np.zeros(Y1.shape), np.zeros(Y2.shape)
NI, NJ = Y1.shape
for i in range(NI):
for j in range(NJ):
x = Y1[i, j]
y = Y2[i, j]
yprime = mc.dynamics(jnp.array([x, y]), 0, t)
u[i, j] = yprime[0]
v[i, j] = yprime[1]
Q = plt.quiver(Y1, Y2, u, v, color='r')
plt.xlabel('position')
plt.ylabel('velocity')
plt.xlim([-1.5, 1])
plt.ylim([-.1, .1])
plt.show()
| 5,788 | 31.706215 | 175 | py |
myriad | myriad-main/myriad/systems/classical_control/pendulum.py | # (c) 2021 Nikolaus Howe
# inspired by https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py
# and https://github.com/locuslab/mpc.pytorch/blob/07f43da67581b783f4f230ca97b0efbc421773af/mpc/env_dx/pendulum.py
import jax
import jax.numpy as jnp
from typing import Optional
from myriad.systems.base import FiniteHorizonControlSystem
from myriad.custom_types import Control, DState, Params, State, Timestep
# https://github.com/openai/gym/blob/ee5ee3a4a5b9d09219ff4c932a45c4a661778cd7/gym/envs/classic_control/pendulum.py#L101
@jax.jit
def angle_normalize(x):
return ((x + jnp.pi) % (2 * jnp.pi)) - jnp.pi
class Pendulum(FiniteHorizonControlSystem):
"""
Continuous Pendulum environment, inspired by the [OpenAI gym environment](https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py).
This environment model the movement of a pendulum upon which a torque ( \\(u(t)\\) ) is applied upon the extremity
moving freely. The goal is to generate a movement such that the pendulum will balance in an upright position.
It can be modeled as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T \\theta(t)^2 + 0.1 * \\theta(t)' + C_p u(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && \\theta''(t) = -\\frac{g \\sin(\\theta(t))}{2 l}
+ \\frac{u(t)}{m l^2}\\\\
& && \\theta(0) = \\pi ,\\; \\theta'(0)=0 \\\\
& && \\theta(T) = 0 ,\\; \\theta'(T)=0 \\\\
& && -u_M <= u(t) <= u_M
\\end{align}
Notes
-----
\\(\\theta\\): Pendulum's angle \n
\\(\\theta'\\): Angular velocity \n
\\(u\\): The torque applied to the pendulum \n
\\(l\\): Length of the rope holding the pendulum \n
\\(g\\): Gravity force \n
\\(u_M\\): Maximum torque that can be applied \n
\\(T\\): The horizon
"""
def __init__(self, g: float = 10., m: float = 1., length: float = 1.):
# Learnable parameters
self.g = g
self.m = m
self.length = length
# Fixed parameters
self.max_speed = 8.
self.max_torque = 2.
self.x_0 = jnp.array([0., 0.])
self.x_T = jnp.array([jnp.pi, 0.])
self.ctrl_penalty = 0.001
super().__init__(
x_0=self.x_0, # Starting state: position, velocity
x_T=self.x_T, # Ending state
T=15, # s duration (note, this is not in the original problem)
bounds=jnp.array([
[-jnp.pi, jnp.pi], # theta
[-self.max_speed, self.max_speed], # dtheta
[-self.max_torque, self.max_torque], # Control bounds
]),
terminal_cost=False,
)
def parametrized_dynamics(self, params: Params, x: State, u: Control, t: Optional[Timestep] = None) -> DState:
u = jnp.clip(u, a_min=-self.max_torque, a_max=self.max_torque)
g = params['g']
m = params['m']
length = params['length']
theta, dot_theta = x
theta = angle_normalize(theta)
dot_theta = jnp.clip(dot_theta, a_min=-self.max_speed, a_max=self.max_speed)
# print("theta, dot_theta", x)
dot_dot_theta = (-3. * g / (2. * length) * jnp.sin(theta)
+ 3. * u / (m * length ** 2)).squeeze() * 0.05
# print("dot theta", dot_theta)
# print("dot dot", dot_dot_theta)
return jnp.array([dot_theta, dot_dot_theta])
def dynamics(self, x: State, u: Control, t: Optional[Timestep] = None) -> DState:
u = jnp.clip(u, a_min=-self.max_torque, a_max=self.max_torque)
theta, dot_theta = x
theta = angle_normalize(theta)
dot_theta = jnp.clip(dot_theta, a_min=-self.max_speed, a_max=self.max_speed)
# print("theta, dot_theta", x)
dot_dot_theta = (-3. * self.g / (2. * self.length) * jnp.sin(theta)
+ 3. * u / (self.m * self.length ** 2)).squeeze() * 0.05
# print("dot theta", dot_theta)
# print("dot dot", dot_dot_theta)
return jnp.array([dot_theta, dot_dot_theta])
def parametrized_cost(self, params: Params, x: State, u: Control, t: Timestep):
# Do nothing, for now
return self.cost(x, u, t)
def cost(self, x: State, u: Control, t: Timestep) -> float:
# print("state is", x)
assert len(x) == 2
theta, dot_theta = x
return angle_normalize(theta) ** 2 + 0.1 * dot_theta ** 2 + self.ctrl_penalty * u ** 2
if __name__ == "__main__":
pass
# import numpy as np
# import matplotlib.pyplot as plt
#
# pd = Pendulum()
#
# y1 = np.linspace(-2*jnp.pi, 2*jnp.pi, 20)
# y2 = np.linspace(-pd.max_speed, pd.max_speed, 20)
#
# Y1, Y2 = np.meshgrid(y1, y2)
#
# t = 0
#
# u, v = np.zeros(Y1.shape), np.zeros(Y2.shape)
#
# NI, NJ = Y1.shape
#
# for i in range(NI):
# for j in range(NJ):
# x = Y1[i, j]
# y = Y2[i, j]
# yprime = pd.dynamics(jnp.array([x, y]), 0, t)
# u[i, j] = yprime[0]
# v[i, j] = yprime[1]
#
# Q = plt.quiver(Y1, Y2, u, v, color='r')
#
# plt.xlabel('angle')
# plt.ylabel('angular velocity')
# plt.xlim([-2*jnp.pi, 2*jnp.pi])
# plt.ylim([-pd.max_speed, pd.max_speed])
# plt.show()
# def get_frame(self, x, ax=None):
# x = util.get_data_maybe(x.view(-1))
# assert len(x) == 3
# l = self.params[2].item()
#
# cos_th, sin_th, dth = torch.unbind(x)
# th = np.arctan2(sin_th, cos_th)
# x = sin_th * l
# y = cos_th * l
#
# if ax is None:
# fig, ax = plt.subplots(figsize=(6, 6))
# else:
# fig = ax.get_figure()
#
# ax.plot((0, x), (0, y), color='k')
# ax.set_xlim((-l * 1.2, l * 1.2))
# ax.set_ylim((-l * 1.2, l * 1.2))
# return fig, ax
# def get_true_obj(self):
# q = torch.cat((
# self.goal_weights,
# self.ctrl_penalty * torch.ones(self.n_ctrl)
# ))
# assert not hasattr(self, 'mpc_lin')
# px = -torch.sqrt(self.goal_weights) * self.goal_state # + self.mpc_lin
# p = torch.cat((px, torch.zeros(self.n_ctrl)))
# return Variable(q), Variable(p)
| 5,848 | 30.446237 | 156 | py |
myriad | myriad-main/myriad/systems/lenhart/mould_fungicide.py | from typing import Union, Optional
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class MouldFungicide(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 6, Lab 2)
This environment models the concentration level of a mould population that we try to control by
applying a fungicide. The state ( \\(x\\) ) is the population concentration, while the control ( \\(u\\) ) is
the amount of fungicide added. We are trying to minimize:
.. math::
\\begin{align}
& \\min_u \\quad &&\\int_0^T Ax^2(t) + u^2(t) dt \\\\
&\\; \\mathrm{s.t.}\\quad && x'(t) = r(M - x(t)) - u(t)x(t) \\\\
& && x(0)=x_0 \\;
\\end{align}
"""
def __init__(self, r=0.3, M=10., A=10., x_0=1.0, T=5):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 5.], # followed by bounds over controls (u_0, u_1, ...)
[0., 5.], # nh: I replaced [-inf, inf] with [0, 5] in both of the bounds
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Growth rate"""
self.M = M
"""Carrying capacity"""
self.A = A
"""Weight parameter, balancing between controlling the population and limiting the fungicide use"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = self.r * (self.M - x_t) - u_t * x_t
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
M = params['M']
d_x = r * (M - x_t) - u_t * x_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.A * x_t ** 2 + u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return self.A * x_t ** 2 + u_t ** 2 # don't learn the cost for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return adj_t * (self.r + u_t) - 2 * self.A * x_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 0.5 * adj_t * x_t
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 3,084 | 37.5625 | 113 | py |
myriad | myriad-main/myriad/systems/lenhart/simple_case.py | from typing import Union, Optional, Dict
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.systems import IndirectFHCS
@gin.configurable
class SimpleCase(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 5, Lab 1)
A simple introductory environment example of the form:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^1 Ax(t) - Bu^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = -\\frac{1}{2}x^2(t) + Cu(t) \\\\
& && x(0)=x_0>-2, \\; A \\geq 0, \\; B > 0
\\end{align}
"""
def __init__(self, A=1., B=1., C=4., x_0=1., T=1.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0, u_1, ...)
[jnp.NINF, jnp.inf],
]),
terminal_cost=False,
discrete=False,
)
self.A = A
"""Weight parameter"""
self.B = B
"""Weight parameter"""
self.C = C
"""Weight parameter"""
self.adj_T = None # Final condition over the adjoint, if any
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = -0.5*x_t**2 + self.C*u_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.A*x_t + self.B*u_t**2 # Maximization problem converted to minimization
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return -self.A + x_t*adj_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = (self.C*adj_t)/(2*self.B)
return jnp.minimum(self.bounds[0, 1], jnp.maximum(self.bounds[0, 0], char))
| 2,217 | 34.206349 | 112 | py |
myriad | myriad-main/myriad/systems/lenhart/cancer_treatment.py | import gin
import jax.numpy as jnp
from typing import Optional, Union
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class CancerTreatment(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 10, Lab 5)
The model was originally described in K. Renee Fister and John Carl Panetta. Optimal control applied to
competing chemotherapeutic cell-kill strategies. SIAM Journal of Applied Mathematics, 63(6):1954–71, 2003.
The tumour is assumed to Gompertzian growth and the model follows a Skipper's log-kill hypothesis, that is, the
cell-kill due to the chemotherapy treatment is proportional to the tumour population.
This environment models the normalized density of a cancerous tumour undergoing chemotherapy.
The state ( \\(x\\) ) is the
normalized density of the tumour, while the control ( \\(u\\) ) is the strength of the drug used for chemotherapy.
We are trying to minimize:
An important note about this system: due to the log of the reciprocal of the state in the
dynamics equation, special care must be taken to ensure that your numerical integration
scheme doesn't take a step into the negative values for x. As such, it is recommended
to either take small steps, or to always clip the state to [0., inf] during
integration.
.. math::
\\begin{align}
&\\min_u \\quad && \\int_0^T ax^2(t) + u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad &&x'(t) = rx(t)\\ln \\big( \\frac{1}{x(t)} \\big) - u(t)\\delta x(t) \\\\
& && x(0)=x_0, \\; u(t) \\geq 0
\\end{align}
"""
def __init__(self, r=0.3, a=3., delta=0.45, x_0=0.975, T=20):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[1e-3, 1.], # followed by bounds over controls (u_0, u_1,...)
# [0., jnp.inf] # Original bounds
[0., 2.] # Bounds based on optimal policy
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Growth rate of the tumour"""
self.a = a
"""Positive weight parameter"""
self.delta = delta
"""Magnitude of the dose administered"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = self.r * x_t * jnp.log(1 / x_t) - u_t * self.delta * x_t
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
delta = params['delta']
d_x = r * x_t * jnp.log(1 / x_t) - u_t * delta * x_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.a * x_t ** 2 + u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
# a = params['a'] # TODO: change back if we want to learn the cost also
a = self.a
return a * x_t ** 2 + u_t ** 2
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return adj_t * (self.r + self.delta * u_t - self.r * jnp.log(1 / x_t)) - 2 * self.a * x_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 0.5 * adj_t * self.delta * x_t
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,085 | 43.413043 | 120 | py |
myriad | myriad-main/myriad/systems/lenhart/simple_case_with_bounds.py | from typing import Union, Optional
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.systems import IndirectFHCS
@gin.configurable
class SimpleCaseWithBounds(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 9, Lab 4). \n
A simple introductory environment example of the form:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^1 Ax(t) - u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = -\\frac{1}{2}x^2(t) + Cu(t) \\\\
& && x(0)=x_0>-2, \\; A \\geq 0, \\; M_1 \\leq u(t) \\leq M_2
\\end{align}
"""
def __init__(self, A=1., C=4., M_1=-1., M_2=2., x_0=1., T=1.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
# [jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1,...)
[0., 3.], # changed based on observation of the true optimal trajectory using default M_1 and M_2
[M_1, M_2],
]),
terminal_cost=False,
discrete=False,
)
self.A = A
"""Weight parameter"""
self.C = C
"""Weight parameter"""
self.M_1 = M_1
"""Lower bound for the control"""
self.M_2 = M_2
"""Upper bound for the control"""
self.adj_T = None # Final condition over the adjoint, if any
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = -0.5*x_t**2 + self.C*u_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.A*x_t + u_t**2 # Maximization problem converted to minimization
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return -self.A + x_t*adj_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = (self.C*adj_t)/2
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 2,409 | 35.515152 | 112 | py |
myriad | myriad-main/myriad/systems/lenhart/predator_prey.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class PredatorPrey(IndirectFHCS):
# TODO: there is an error when trying to plot with PredatorPrey
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 22, Lab 13)
The states evolution is base on a standard Lotka-Volterra model.
This particular environment is inspired from Bean San Goh, George Leitmann, and Thomas L. Vincent.
Optimal control of a prey-predator system. Mathematical Biosciences, 19, 1974.
This environment models the evolution of a pest (prey) population ( \\(x_0(t)\\) ) and a predator population ( \\(x_1(t) \\)) in
the presence of a pesticide ( \\(u(t)\\) ) that affects both the pest and predator populations. The objective in mind is
to minimize the final pest population, while limiting the usage of the pesticide. Thus:
.. math::
\\begin{align}
& \\min_{u} \\quad && x_0(T) + \\frac{A}{2}\\int_0^T u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = (1 - x_1(t))x_0(t) - d_1x_0(t)u(t) \\\\
& && x_1'(t) = (x_0(t) - 1)x_1(t) - d_2x_1(t)(t)u(t) \\\\
& && 0 \\leq u(t) \\leq M, \\quad \\int_0^T u(t) dt = B
\\end{align}
The particularity here is that the total amount of pesticide to be applied is fixed. To take into account this
constraint, a virtual state variable ( \\(z(t)\\) ) is added where:
.. math::
z'(t) = u(t), \\; z(0) = 0, \\; z(T) = B
Finally, note that `guess_a` and `guess_b` have been carefully chosen in the study cases to allow for fast iteration
and ensure convergence.
Notes
-----
x_0: Initial density of the pest and prey population \\( (x_0, x_1) \\)
"""
def __init__(self, d_1=.1, d_2=.1, A=1., B=5.,
guess_a=-.52, guess_b=.5, M=1.,
x_0=(10., 1., 0.), T=10.):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2]
]), # Starting state
x_T=[None, None, B], # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[0., 11.], # followed by bounds over controls (u_0, u_1, ...)
[0., 11.],
[0., 5.],
[0, M]
]),
terminal_cost=True,
discrete=False,
)
self.adj_T = jnp.array([1, 0, 0]) # Final condition over the adjoint, if any
self.d_1 = d_1
"""Impact of the pesticide on the pest population"""
self.d_2 = d_2
"""Impact of the pesticide on the prey population"""
self.A = A
"""Weight parameter balancing the cost"""
self.guess_a = guess_a
"""Node 2 at which the secant method begins its iteration (Newton's method)"""
self.guess_b = guess_b
"""Node 1 at which the secant method begins its iteration (Newton's method)"""
self.M = M
"""Bound on pesticide application at a given time"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
(1 - x_1) * x_0 - self.d_1 * x_0 * u_t,
(x_0 - 1) * x_1 - self.d_2 * x_1 * u_t,
u_t,
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_1 = params['d_1']
d_2 = params['d_2']
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
(1 - x_1) * x_0 - d_1 * x_0 * u_t,
(x_0 - 1) * x_1 - d_2 * x_1 * u_t,
u_t,
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.A * 0.5 * u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return self.A * 0.5 * u_t ** 2 # Not learning cost for now
def terminal_cost_fn(self, x_T: Optional[jnp.ndarray], u_T: Optional[jnp.ndarray],
T: Optional[jnp.ndarray] = None) -> float:
return x_T[0]
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
adj_t[0] * (x_t[1] - 1 + self.d_1 * u_t[0]) - adj_t[1] * x_t[1],
adj_t[0] * x_t[0] + adj_t[1] * (1 - x_t[0] + self.d_2 * u_t[0]),
0
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = (adj_t[:, 0] * self.d_1 * x_t[:, 0] + adj_t[:, 1] * self.d_2 * x_t[:, 1] - adj_t[:, 2]) / self.A
char = char.reshape(-1, 1)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 5,190 | 36.615942 | 132 | py |
myriad | myriad-main/myriad/systems/lenhart/bacteria.py | from typing import Union, Optional
import gin
import jax.numpy as jnp
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class Bacteria(IndirectFHCS):
"""Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 7, Lab 3)
This environment models the concentration level of a bacteria population that we try to control by providing
a chemical nutrient that stimulates growth. However, the use of the chemical leads to the production of
a chemical byproduct by the bacteria that in turn hinders growth. The state ( \\(x\\) ) is the bacteria population
concentration, while the control ( \\(u\\) ) is the amount of chemical nutrient added. We are trying to maximize:
(note: fbsm estimates different trajectory here than what you actually
get when you integrate the given controls. Weird!)
Note that the state must always remain positive in this domain.
For this reason, the dynamics are halted if the state ever reaches 0
(or passes it due to numerical integration issues).
.. math::
\\begin{align}
& \\max_u \\quad &&Cx(1) - \\int_0^1 u^2(t) dt \\\\
& \\; \\mathrm{s.t.} \\quad &&x'(t) = rx(t) + Au(t)x(t) - Bu^2(t)e^{-x(t)} \\\\
& && x(0)=x_0, \\\\
& && A,B,C \\geq 0
\\end{align}
"""
def __init__(self, r=1., A=1., B=12., C=1., x_0=1.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=1, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
# [jnp.NINF, jnp.inf], # followed by bounds over controls (u_0, u_1,...)
[0., 10.], # followed by bounds over controls (u_0, u_1,...)
# [jnp.NINF, jnp.inf],
[0., 2.], # set based on observation of optimal
]),
terminal_cost=True,
discrete=False,
)
self.adj_T = jnp.array([C]) # Final condition over the adjoint, if any
self.r = r
"""Growth rate"""
self.A = A
"""Relative strength of the chemical nutrient"""
self.B = B # used to be set at 12
"""Strength of the byproduct"""
self.C = C
"""Payoff associated to the final bacteria population concentration"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
# x_t += 0.1 # Niki added to avoid negatives
d_x = self.r * x_t + self.A * u_t * x_t - self.B * u_t ** 2 * jnp.exp(-x_t)
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
A = params['A']
B = params['B']
# x_t += 0.1 # Niki added to avoid negatives
d_x = r * x_t + A * u_t * x_t - B * u_t ** 2 * jnp.exp(-x_t)
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return u_t ** 2 # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return u_t ** 2 # Maximization problem converted to minimization
def terminal_cost_fn(self, x_T: Optional[jnp.ndarray], u_T: Optional[jnp.ndarray],
T: Optional[jnp.ndarray] = None) -> float:
return -self.C * x_T.squeeze() # squeeze is necessary for using SHOOTING
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return -adj_t * (self.r + self.A * u_t + self.B * u_t ** 2 * jnp.exp(-x_t))
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = adj_t * self.A * x_t / (2 * (1 + self.B * adj_t * jnp.exp(-x_t)))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,287 | 43.666667 | 120 | py |
myriad | myriad-main/myriad/systems/lenhart/harvest.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.systems import IndirectFHCS
@gin.configurable
class Harvest(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 11, Lab 6)
The model was was adapted from Wayne M. Getz. Optimal control and principles in population management.
Proceedings of Symposia in Applied Mathematics, 30:63–82, 1984.
This environment models the population level (scaled) of a population
(for example, of vegetables) to be harvested.
The time scale is too small for reproduction to occur, but the mass
of each member of the population will grow over time following
\\(\\frac{kt}{t+1}\\). The state ( \\(x\\) ) is the population level,
while the control ( \\(u\\) ) is the harvest rate.
We are trying to maximize:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^T A \\frac{kt}{t+1}x(t)u(t) - u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = -(m+u(t)) x(t) \\\\
& && x(0)=x_0, \\; 0\\leq u(t) \\leq M, \\; A > 0
\\end{align}
"""
def __init__(self, A=5., k=10., m=.2, M=1., x_0=.4, T=10.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1, ...)
[0, M],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.A = A
"""Nonnegative weight parameter"""
self.k = k
"""Maximum mass of the species"""
self.m = m
"""Natural death rate of the species"""
self.M = M
"""Upper bound on harvesting that may represent physical limitations"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = -(self.m+u_t)*x_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -1*self.A*(self.k*t/(t+1))*x_t*u_t + u_t**2 # Maximization problem converted to minimization
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return adj_t*(self.m+u_t) - self.A*(self.k*t/(t+1))*u_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 0.5*x_t * (self.A*(self.k*t/(t+1)) - adj_t)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 2,859 | 38.722222 | 112 | py |
myriad | myriad-main/myriad/systems/lenhart/bear_populations.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class BearPopulations(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 15, Lab 9)
Additional reference can be found in R. A. Salinas, S. Lenhart, and L. J. Gross. Control of a metapopulation
harvesting model for black bears. Natural Remyriad Modeling, 18:307–21, 2005.
The model represents the metapopulation of black bears, i.e. a population consisting of multiple local
populations, which can interact with each other. In this particular scenario, the author models the
bear population density in a park (protected) area ( \\(x_0\\)), a forest area ( \\(x_1\\)) and a urban area
( \\(x_2\\)). Natural reproduction happens only inside the park and forest area, and the goal is to limit the bear
population that migrates to the urban area.
The control is a harvesting rate (hunting) that occurs inside the forest area and, with bigger cost, in the
park area. The goal is thus to minimize:
.. math::
\\begin{align}
&\\min_{u_p,u_f} \\quad &&\\int_0^T x_2(t) + c_p u_p(t)^2 + c_f u_f(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = rx_0(t) - \\frac{r}{K}x_0(t)^2 + \\frac{m_f r}{K}\\big( 1 - \\frac{x_0(t)}{K} \\big)x_1(t)^2 - u_p(t)x_0(t),\\; x_0(0)\\geq 0 \\\\
& && x_1'(t) = rx_1(t) - \\frac{r}{K}x_1(t)^2 + \\frac{m_p r}{K}\\big( 1 - \\frac{x_1(t)}{K} \\big)x_0(t)^2 - u_f(t)x_1(t),\\; x_1(0)\\geq 0 \\\\
& && x_2'(t) = r(1-m_p)\\frac{x_0(t)^2}{K} + r(1-m_f)\\frac{x_1(t)^2}{K} + \\frac{m_f r}{K^2}x_0(t)x_1(t)^2 + \\frac{m_p r}{K^2}x_0(t)^2x_1(t)^,\\; x_2(0)\\geq 0 \\\\
& && 0\\leq u_p(t) \\leq 1, \\; 0\\leq u_f(t) \\leq 1
\\end{align}
"""
def __init__(self, r=.1, K=.75, m_p=.5, m_f=.5, c_p=10_000,
c_f=10, x_0=(.4, .2, 0.), T=25):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 2.], # followed by bounds over controls (u_0, u_1, ...)
[0., 2.],
[0., 2.], # nh: I changed the bounds to be reasonable amounts
[0., .2],
[0., .2],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Population growth rate"""
self.K = K
"""Carrying capacity of the areas (density wise)"""
self.m_p = m_p
"""Proportion of the park boundary connected to the forest areas"""
self.m_f = m_f
"""Proportion of the forest areas connected to the park area"""
self.c_p = c_p
"""Cost associated with harvesting in the park"""
self.c_f = c_f
"""Cost associated with harvesting in the forest"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
k = self.r / self.K
k2 = self.r / self.K ** 2
x_0, x_1, x_2 = x_t
u_0, u_1 = u_t
d_x = jnp.array([
self.r * x_0 - k * x_0 ** 2 + k * self.m_f * (1 - x_0 / self.K) * x_1 ** 2 - u_0 * x_0,
self.r * x_1 - k * x_1 ** 2 + k * self.m_p * (1 - x_1 / self.K) * x_0 ** 2 - u_1 * x_1,
k * (1 - self.m_p) * x_0 ** 2 + k * (1 - self.m_f) * x_1 ** 2 + k2 * self.m_f * x_0 * x_1 ** 2 + k2 * self.m_p * (
x_0 ** 2) * x_1,
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
K = params['K']
m_f = params['m_f']
m_p = params['m_p']
k = r / K
k2 = r / K ** 2
x_0, x_1, x_2 = x_t
u_0, u_1 = u_t
d_x = jnp.array([
r * x_0 - k * x_0 ** 2 + k * m_f * (1 - x_0 / K) * x_1 ** 2 - u_0 * x_0,
r * x_1 - k * x_1 ** 2 + k * m_p * (1 - x_1 / K) * x_0 ** 2 - u_1 * x_1,
k * (1 - m_p) * x_0 ** 2 + k * (1 - m_f) * x_1 ** 2 + k2 * m_f * x_0 * x_1 ** 2 + k2 * m_p * (
x_0 ** 2) * x_1,
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return x_t[2] + self.c_p * u_t[0] ** 2 + self.c_f * u_t[1] ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
# Not learning the cost function for now
return x_t[2] + self.c_p * u_t[0] ** 2 + self.c_f * u_t[1] ** 2
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
k = self.r / self.K
k2 = self.r / self.K ** 2
return jnp.array([
adj_t[0] * (2 * k * x_t[0] + k2 * self.m_f * x_t[1] ** 2 + u_t[0] - self.r)
- adj_t[1] * (2 * k * self.m_p * (1 - x_t[1] / self.K) * x_t[0])
+ adj_t[2] * (
2 * k * (self.m_p - 1) * x_t[0] - k2 * self.m_f * x_t[1] ** 2 - 2 * k2 * self.m_p * x_t[0] * x_t[1]),
adj_t[1] * (2 * k * x_t[1] + k2 * self.m_p * x_t[0] ** 2 + u_t[1] - self.r)
- adj_t[0] * (2 * k * self.m_f * (1 - x_t[0] / self.K) * x_t[1])
+ adj_t[2] * (
2 * k * (self.m_f - 1) * x_t[1] - 2 * k2 * self.m_f * x_t[0] * x_t[1] - k2 * self.m_p * x_t[0] ** 2),
-1,
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char_0 = adj_t[:, 0] * x_t[:, 0] / (2 * self.c_p)
char_0 = char_0.reshape(-1, 1)
char_0 = jnp.minimum(self.bounds[-2, 1], jnp.maximum(self.bounds[-2, 0], char_0))
char_1 = adj_t[:, 1] * x_t[:, 1] / (2 * self.c_f)
char_1 = char_1.reshape(-1, 1)
char_1 = jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char_1))
return jnp.hstack((char_0, char_1))
| 6,332 | 42.675862 | 178 | py |
myriad | myriad-main/myriad/systems/lenhart/invasive_plant.py | import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
from typing import Union, Optional
from myriad.systems import IndirectFHCS
@gin.configurable
class InvasivePlant(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 24, Lab 14)
This problem was first look at in M. E. Moody and R. N. Mack. Controlling the spread of plant invasions:
the importance of nascent foci. Journal of Applied Ecology, 25:1009–21, 1988.
The general formulation that the we look at in this environment was presented in A. J. Whittle, S. Lenhart, and
L. J. Gross. Optimal control for management of an invasive plant species. Mathematical Biosciences and
Engineering, to appear, 2007.
The scenario considered in this environment has been modified from its original formulation so
so that the state terminal cost term is linear instead of quadratic. Obviously, the optimal solutions are
different from the original problem, but the model behavior is similar.
In this environment, we look at the growth of an invasive species that has a main focus population ( \\(x_i\\) ) and
4 smaller satellite populations ( \\(x_{i\\neq j}\\) ). The area occupied by the different population are assumed to be
circular, with a growth that can be represented via the total radius of the population area. Annual interventions
are made after the growth period, removing a ratio of the population radius ( \\(u_{j,t}\\) ). Since the interventions are
annual, we are in a discrete time model. We aim to:
.. math::
\\begin{align}
& \\min_{u} \\quad &&\\sum_{j=0}^4 \\bigg[x_{j,T} + B\\sum_{t=0}^{T-1} u_{j,t}^2 \\bigg] \\\\
& \\; \\mathrm{s.t.}\\quad && x_{j,t+1} = \\bigg( x_{j,t} + \\frac{k x_{j,t}}{\\epsilon + x_{j,t}}\\bigg) (1-u_{j,t}) ,\\; x_{j,0} = \\rho_j \\\\
& && 0 \\leq u_{j,t} \\leq 1
\\end{align}
"""
def __init__(self, B=1., k=1., eps=.01,
x_0=(.5, 1., 1.5, 2., 10.), T=10.):
super().__init__(
x_0=jnp.array(x_0), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1,...)
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
]),
terminal_cost=False,
discrete=True,
)
self.adj_T = jnp.ones(5) # Final condition over the adjoint, if any
self.B = B
"""Positive weight parameter"""
self.k = k
"""Spread rate of the population"""
self.eps = eps
"""Small constant, used to scale the spread by \\(\\frac{r}{\\epsilon+r}\\) so eradication is possible"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
next_x = (x_t + x_t*self.k/(self.eps + x_t)) * (1 - u_t)
return next_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.B*(u_t**2).sum()
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
prev_adj = adj_t * (1-u_t) * (1 + self.eps*self.k/(self.eps + x_t)**2)
return prev_adj
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
shifted_adj = adj_t[1:, :]
shifted_x_t = x_t[:-1, :]
char = 0.5*shifted_adj/self.B * (shifted_x_t + shifted_x_t*self.k/(self.eps + shifted_x_t))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char)) # same bounds for all control
def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray,
adj: Optional[jnp.ndarray] = None,
other_x: Optional[jnp.ndarray] = None) -> None:
# sns.set(style='darkgrid')
plt.figure(figsize=(9, 9))
x, u, adj = x.T, u.T, adj.T
ts_x = jnp.linspace(0, self.T, x[0].shape[0])
ts_u = jnp.linspace(0, self.T - 1, u[0].shape[0])
ts_adj = jnp.linspace(0, self.T, adj[0].shape[0])
labels = ["Focus 1", "Focus 2", "Focus 3", "Focus 4", "Focus 5"]
to_print = [0, 1, 2, 3, 4] # curves we want to print out
plt.subplot(3, 1, 1)
for idx, x_i in enumerate(x):
if idx in to_print:
plt.plot(ts_x, x_i, 'o', label=labels[idx])
if other_x is not None:
for idx, x_i in enumerate(other_x.T):
plt.plot(ts_u, x_i, 'o', label="integrated "+labels[idx])
plt.legend()
plt.title("Optimal state of dynamic system via forward-backward sweep")
plt.ylabel("state (x)")
plt.subplot(3, 1, 2)
for idx, u_i in enumerate(u):
plt.plot(ts_u, u_i, 'o', label='Focus ratio cropped')
plt.legend()
plt.title("Optimal control of dynamic system via forward-backward sweep")
plt.ylabel("control (u)")
plt.subplot(3, 1, 3)
for idx, adj_i in enumerate(adj):
if idx in to_print:
plt.plot(ts_adj, adj_i, "o")
plt.title("Optimal adjoint of dynamic system via forward-backward sweep")
plt.ylabel("adjoint (lambda)")
plt.xlabel('time (s)')
plt.tight_layout()
plt.show() | 5,532 | 39.985185 | 151 | py |
myriad | myriad-main/myriad/systems/lenhart/epidemic_seirn.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.systems import IndirectFHCS
@gin.configurable
class EpidemicSEIRN(IndirectFHCS): # TODO : Add R calculation at the end
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 13, Lab 7)
A typical SEIRN (or SEIR) model is considered here in order to find an optimal schedule for a vaccination
campaign. Additional information about this model and some of its variations can be found in H. R. Joshi,
S. Lenhart, M. Y. Li, and L. Wang. Optimal control methods applied to disease models. AMS Volume on Mathematical
Studies on Human Disease Dynamics Emerging Paradigms and Challenges, 410:187–207, 2006
The model contains multiples state variables; \\(S(t)\\)(i.e. \\(x_0\\)) is the number of individuals susceptible of contracting
the disease at time t, while \\(I(t)\\)(i.e. \\(x_2\\)) and \\(R(t)\\)(i.e. \\(x_3\\)), are respectively the number of infectious and recovered
(and immune) individuals. \\(E(t)\\)(i.e. \\(x_1\\)) is the number of individuals who have been exposed to the disease and are
now in a latent state: they may develop the disease later on and become infectious, or they may simply become
immune. \\(N(t)\\)(i.e. \\(x_4\\)) is the total population, i.e., the sum of all other states.
The control is the vaccination rate among the susceptible individuals.
Finally, note that all individuals are considered to be born susceptible. We want to minimize:
.. math::
\\begin{align}
&\\min_u \\quad &&\\int_0^T A x_0(t) + u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = bx_4(t) - dx_0(t) - cx_0(t)x_2(t) - u(t)x_0(t),\\; x_0(0)\\geq 0 \\\\
& && x_1'(t) = cx_0(t)x_2(t) - (e+d)x_1(t),\\; x_1(0)\\geq 0 \\\\
& && x_2'(t) = ex_1(t) - (g+a+d)x_2(t),\\; x_2(0)\\geq 0 \\\\
& && x_3'(t) = gx_2(t) - dx_3(t) + u(t)x_0(t),\\; x_3(0)\\geq 0 \\\\
& && x_4'(t) = (b-d)x_4(t) - ax_2(t),\\; x_4(0)\\geq 0 \\\\
& && 0\\leq u(t) \\leq 0.9, \\; A > 0
\\end{align}
Notes
-----
x_0: The initial state is given here by \\( (S(t_0), E(t_0), I(t_0), R(t_0) ) \\)
"""
def __init__(self, A=.1, b=.525, d=.5, c=.0001,
e=.5, g=.1, a=.2, x_0=(1000., 100., 50., 15.), T=20.):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2],
jnp.sum(jnp.asarray(x_0)),
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1,...)
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[0, 0.9],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.b = b
"""The exponential birth rate of the population"""
self.d = d
"""The exponential death rate of the population"""
self.c = c
"""The incidence rate of contamination"""
self.e = e
"""The rate at which exposed individuals become contagious (1/e is the mean latent period)"""
self.g = g
"""The recovery rate among infectious individuals (1/g is the mean infectious period)"""
self.a = a
"The death rate due to the disease"
self.A = A
"""Weight parameter balancing between the reduction of the infectious population and the vaccination cost"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1, x_2, x_3 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
self.b*x_3 - self.d*x_0 - self.c*x_0*x_2 - u_t*x_0,
self.c*x_0*x_2 - (self.e+self.d)*x_1,
self.e*x_1 - (self.g+self.a+self.d)*x_2,
(self.b-self.d)*x_3 - self.a*x_2
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.A*x_t[2] + u_t**2
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
adj_t[0]*(self.d+self.c*x_t[2]+u_t[0]) - adj_t[1]*self.c*x_t[2],
adj_t[1]*(self.e+self.d) - adj_t[2]*self.e,
-self.A + adj_t[0]*self.c*x_t[0] - adj_t[1]*self.c*x_t[0] + adj_t[2]*(self.g+self.a+self.d)
+ adj_t[3]*self.a,
-self.b*adj_t[0] + adj_t[3]*(self.d-self.d)
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = adj_t[:, 0]*x_t[:, 0]/2
char = char.reshape(-1, 1)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 5,092 | 44.473214 | 149 | py |
myriad | myriad-main/myriad/systems/lenhart/hiv_treatment.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class HIVTreatment(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 14, Lab 8)
Model adapted from : S. Butler, D. Kirschner, and S. Lenhart. Optimal control of chemotherapy affecting the
infectivity of HIV. Advances in Mathematical Population Dynamics - Molecules, Cells and Man, 6:557–69, 1997.
This model describes the the evolution of uninfected and infected (respectively \\(x_0\\) and \\(x_1\\) ) CD4⁺T cells, in the
presence of free virus particles ( \\(x_2\\) ). The control is the administration of a chemotherapy drug that affects
the infectivity of the virus. The goal is to maximize the number of uninfected CD4⁺T cells.
Note that \\(u(t) = 0\\) represents maximum therapy, while \\(u(t) = 1\\) is no therapy. We want to maximize:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^T A x_0(t) - (1-u(t))^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = \\frac{s}{1+x_2(t)} - m_1x_0(t) + rx_0(t)\\big[1 - \\frac{x_0(t)+x_1(t)}{T_{\\mathrm{max}}} \\big],\\; x_0(0)> 0 \\\\
& && x_1'(t) = u(t)kx_2(t)x_0(t) - m_2x_1(t),\\; x_1(0)> 0 \\\\
& && x_2'(t) = Nm_2x_1(t) - m_3x_2(t),\\; x_2(0)> 0 \\\\
& && 0\\leq u(t) \\leq 1, \\; A > 0
\\end{align}
"""
def __init__(self, s=10., m_1=.02, m_2=.5, m_3=4.4, r=.03,
T_max=1500., k=.000024, N=300., x_0=(800., .04, 1.5),
A=.05, T=20.):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[0., 1600.], # followed by bounds over controls (u_0, u_1,...)
[0., 100.],
[0., 100.], # all were inf before, except control
[0., 1.],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.s = s
"""Parameter varying the rate of generation of new CD4⁺T cells"""
self.m_1 = m_1
"""Natural death rate of uninfected CD4⁺T cells"""
self.m_2 = m_2
"""Natural death rate of infected CD4⁺T cells"""
self.m_3 = m_3
"""Natural death rate of free virus particles"""
self.r = r
"""Growth rate of CD4⁺T cells per day"""
self.T_max = T_max
"""Maximum growth of CD4⁺T cells"""
self.k = k
"""Rate of infection among CD4⁺T cells from free virus particles"""
self.N = N
"""Average number of virus particles produced before the CD4⁺T host cell dies."""
self.A = A
"""Weight parameter balancing the cost"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
self.s / (1 + x_2) - self.m_1 * x_0 + self.r * x_0 * (1 - (x_0 + x_1) / self.T_max) - u_t * self.k * x_0 * x_2,
u_t * self.k * x_0 * x_2 - self.m_2 * x_1,
self.N * self.m_2 * x_1 - self.m_3 * x_2,
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
k = params['k']
m_1 = params['m_1']
m_2 = params['m_2']
m_3 = params['m_3']
N = params['N']
r = params['r']
s = params['s']
T_max = params['T_max']
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
s / (1 + x_2) - m_1 * x_0 + r * x_0 * (1 - (x_0 + x_1) / T_max) - u_t * k * x_0 * x_2,
u_t * k * x_0 * x_2 - m_2 * x_1,
N * m_2 * x_1 - m_3 * x_2,
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.A * x_t[0] + (1 - u_t) ** 2 # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return -self.A * x_t[0] + (1 - u_t) ** 2 # No cost learning for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
-self.A + adj_t[0] * (self.m_1 - self.r * (1 - (x_t[0] + x_t[1]) / self.T_max) + self.r * x_t[0] / self.T_max
+ u_t[0] * self.k * x_t[2]) - adj_t[1] * u_t[0] * self.k * x_t[2],
adj_t[0] * self.r * x_t[0] / self.T_max + adj_t[1] * self.m_2 - adj_t[2] * self.N * self.m_2,
adj_t[0] * (self.s / (1 + x_t[2]) ** 2 + u_t[0] * self.k * x_t[0]) - adj_t[1] * u_t[0] * self.k * x_t[0] + adj_t[
2] * self.m_3,
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 1 + 0.5 * self.k * x_t[:, 0] * x_t[:, 2] * (adj_t[:, 1] - adj_t[:, 0])
char = char.reshape(-1, 1)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 5,535 | 40.939394 | 163 | py |
myriad | myriad-main/myriad/systems/lenhart/bioreactor.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class Bioreactor(IndirectFHCS): # TODO: Add resolution for z state after optimization
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 19, Lab 12)
Additional information about this kind of model can be found in A. Heinricher, S. Lenhart, and A. Solomon.
The application of optimal control methodology to a well-stirred bioreactor. Natural Remyriad Modeling, 9:61–80,
1995.
This environment is an example of a model where the cost is linear with respect to the control.
It can still be solved by the FBSM algorithm since the optimal control are of the "bang-bang" type,
i.e. it jumps from one boundary value to the other.
This environment models the evolution of a bacteria population ( \\(x(t)\\) ) that helps in the degradation of a
contaminant ( \\(z(t)\\) ) in the presence of a chemical nutrient ( \\(u(t)\\) ) that is added to boost the bacteria population
growth. In this particular problem, the fact that only a terminal cost is associated to the state variable \\(z(t)\\)
allows for the simplification of the problem into:
.. math::
\\begin{align}
&\\max_{u} \\quad &&\\int_0^T Kx(t) - u(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad &&x'(t) = Gu(t)x(t) - Dx^2(t) ,\\; x(0) = x_0 \\\\
& && 0 \\leq u(t) \\leq M
\\end{align}
"""
def __init__(self, K=2., G=1., D=1., M=1., x_0=(.5, .1), T=2.):
super().__init__(
x_0=jnp.array([
x_0[0],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 1.], # followed by bounds over controls (u_0, u_1, ...)
[0., M],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.K = K
"""Weight parameter"""
self.G = G
"""Maximum growth rate of the bacteria population"""
self.D = D
"""Natural death rate of the bacteria population"""
self.M = M
"""Physical limitation into the application of the chemical nutrient"""
def dynamics(self, x_t: jnp.ndarray,
u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([self.G * u_t * x_t[0] - self.D * x_t[0] ** 2])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
G = params['G']
D = params['D']
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([G * u_t * x_t[0] - D * x_t[0] ** 2])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.K * x_t[0] + u_t # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return -self.K * x_t[0] + u_t # not learning cost for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
-self.K - self.G * u_t[0] * adj_t[0] + 2 * self.D * x_t[0] * adj_t[0]
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
# bang-bang scenario
temp = -1 + self.G * adj_t[:, 0] * x_t[:, 0]
char = jnp.sign(temp.reshape(-1, 1)) * 2 * jnp.max(jnp.abs(self.bounds[-1])) + jnp.max(jnp.abs(self.bounds[-1]))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,278 | 40.95098 | 133 | py |
myriad | myriad-main/myriad/systems/lenhart/timber_harvest.py | from typing import Union, Optional
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class TimberHarvest(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 18, Lab 11)
Additional information can be found in Morton I. Kamien and Nancy L. Schwartz. Dynamic Optimization:
The Calculus of Variations and Optimal Control in Economics and Management. North-Holland, New York, 1991.
This environment is an example of model where the cost is linear with respect to the control.
It can still be solved by the FBSM algorithm since the optimal control are of the "bang-bang" type,
i.e., it jumps from one boundary value to the other.
In this problem we are trying to optimize tree harvesting in a timber farm, resulting in the production of
raw timber ( \\(x(t)\\) ). The harvest percentage over the land
is low enough that we can assume that there will always
be sufficiently many mature trees ready for harvest. The timber is sold immediately after production,
generating a income proportional to the production at every time t. The operators then have the choice of
reinvesting a fraction of this revenue directly into the plant ( \\(u(t)\\) ), thus stimulating future production.
But, this reinvestment comes at the price of losing potential interest over the period T if the
revenue were saved. The control problem is therefore:
.. math::
\\begin{align}
& \\max_{u} \\quad && \\int_0^T e^{-rt}x(t)[1 - u(t)] dt \\\\
& \\mathrm{s.t.}\\quad && x'(t) = kx(t)u(t) ,\\; x(0) > 0 \\\\
& && 0 \\leq u(t) \\leq 1
\\end{align}
"""
def __init__(self, r=0., k=1., x_0=100., T=5.):
super().__init__(
x_0=jnp.array([
x_0,
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[0., 20_000], # followed by bounds over controls (u_0,u_1,...)
[0., 1.], # nh added the bounds
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Discount rate encouraging investment early on"""
self.k = k
"""Return constant of reinvesting into the plant, taking into account cost of labor and land"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
self.k * x_t[0] * u_t
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
k = params['k']
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
k * x_t[0] * u_t
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -jnp.exp(-self.r * t) * x_t[0] * (1 - u_t) # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return -jnp.exp(-self.r * t) * x_t[0] * (1 - u_t) # not learning cost function for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
u_t[0] * (jnp.exp(-self.r * t[0]) - self.k * adj_t[0]) - jnp.exp(-self.r * t[0])
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
# bang-bang scenario
temp = x_t[:, 0] * (self.k * adj_t[:, 0] - jnp.exp(-self.r * t[:, 0]))
char = jnp.sign(temp.reshape(-1, 1)) * 2 * jnp.max(jnp.abs(self.bounds[-1])) + jnp.max(jnp.abs(self.bounds[-1]))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,409 | 41.403846 | 118 | py |
myriad | myriad-main/myriad/systems/lenhart/glucose.py | import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class Glucose(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 16, Lab 10)
Model is presented in more details in Martin Eisen. Mathematical Methods and Models in the Biological Sciences.
Prentice Hall, Englewood Cliffs, New Jersey, 1988.
This environment models the blood glucose ( \\(x_0(t)\\) ) level of a diabetic person in the presence of injected
insulin ( \\(u(t)\\) ), along with the net hormonal concentration ( \\(x_1(t)\\) ) of insulin in the person's system.
In this model, the diabetic person is assumed to be unable to produce natural insulin via their pancreas.
Note that the model was developed for regulating blood glucose levels over a short window of time. As such, \\(T\\)
should be kept under 0.45 in order for the model to make sense.
( \\(T\\) is measured in days, so 0.45 corresponds to ~11 hours)
The goal of the control is to maintain the blood glucose level close to a desired level, \\(l\\), while also taking
into account that there is a cost associated to the treatment. Thus the objective is:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T A(x_0(t)-l)^2 + u_f(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = -ax_0(t) - bx_1(t) ,\\; x_0(0) > 0 \\\\
& && x_1'(t) = -cx_1(t) + u(t) ,\\; x_1(0)=0 \\\\
& && a,b,c > 0, \\; A \\geq 0
\\end{align}
Notes
-----
x(0): Initial blood glucose level and insulin level \\((x_0(0),x_1(0))\\) \n
T: The horizon should be kept under 0.45
"""
def __init__(self, a=1., b=1., c=1., A=2., l=.5, x_0=(.75, 0.), T=.2):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 1.], # followed by bounds over controls (u_0, u_1, ...)
[0., 1.],
[0., 0.01],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.a = a
"""Rate of decrease in glucose level resulting of its use by the body"""
self.b = b
"""Rate of decrease in glucose level resulting from its degradation provoked by insulin"""
self.c = c
"""Rate of degradation of the insulin"""
self.A = A
"""Weight parameter balancing the objective"""
self.l = l
"""Desired level of blood glucose"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
-self.a * x_0 - self.b * x_1,
-self.c * x_1 + u_t
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
a = params['a']
b = params['b']
c = params['c']
x_0, x_1 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
-a * x_0 - b * x_1,
-c * x_1 + u_t
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return 100_000 * (self.A * (x_t[0] - self.l) ** 2 + u_t ** 2) # multiplying by 100_000 so we can actually see it
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
# A = params['A'] # Uncomment these and recomment the others
# l = params['l'] # if we want to also learn the cost
A = self.A
l = self.l
return 100_000 * (A * (x_t[0] - l) ** 2 + u_t ** 2) # multiplying by 100_000 so we can actually see it
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
-2 * self.A * (x_t[0] - self.l) + adj_t[0] * self.a,
adj_t[0] * self.b + adj_t[1] * self.c
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char_0 = -adj_t[:, 1] / 2
char_0 = char_0.reshape(-1, 1)
return char_0
| 4,719 | 36.165354 | 121 | py |
myriad | myriad-main/myriad/trajectory_optimizers/forward_backward_sweep.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
# from jax.ops import index_update
# from ipopt import minimize_ipopt
from scipy.optimize import minimize
from dataclasses import dataclass
from typing import Callable, Dict, Tuple, Union
from myriad.config import Config, HParams, OptimizerType, SystemType, IntegrationMethod, QuadratureRule
from myriad.custom_types import Solution
from myriad.nlp_solvers import solve
from myriad.systems import FiniteHorizonControlSystem, IndirectFHCS
from myriad.utils import integrate_in_parallel, integrate_time_independent, \
integrate_time_independent_in_parallel, integrate_fbsm
from myriad.trajectory_optimizers.base import IndirectMethodOptimizer
class FBSM(IndirectMethodOptimizer): # Forward-Backward Sweep Method
"""
The Forward-Backward Sweep Method, as described in Optimal Control Applied to Biological Models, Lenhart & Workman
An iterative solver that, given an initial guess over the controls, will do a forward pass to retrieve the state
variables trajectory followed by a backward pass to retrieve the adjoint variables trajectory. The optimality
characterization is then used to update the control values.
The process is repeated until convergence over the controls.
"""
def __init__(self, hp: HParams, cfg: Config, system: IndirectFHCS):
self.system = system
self.N = hp.fbsm_intervals
self.h = system.T / self.N
if system.discrete:
self.N = int(system.T)
self.h = 1
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
x_guess = jnp.vstack((system.x_0, jnp.zeros((self.N, state_shape))))
if system.discrete:
u_guess = jnp.zeros((self.N, control_shape))
else:
u_guess = jnp.zeros((self.N + 1, control_shape))
if system.adj_T is not None:
adj_guess = jnp.vstack((jnp.zeros((self.N, state_shape)), system.adj_T))
else:
adj_guess = jnp.zeros((self.N + 1, state_shape))
self.t_interval = jnp.linspace(0, system.T, num=self.N + 1).reshape(-1, 1)
guess, unravel = ravel_pytree((x_guess, u_guess, adj_guess))
self.x_guess, self.u_guess, self.adj_guess = x_guess, u_guess, adj_guess
x_bounds = system.bounds[:-1]
u_bounds = system.bounds[-1:]
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
# Additional condition if terminal condition are present
self.terminal_cdtion = False
if self.system.x_T is not None:
num_term_state = 0
for idx, x_Ti in enumerate(self.system.x_T):
if x_Ti is not None:
self.terminal_cdtion = True
self.term_cdtion_state = idx
self.term_value = x_Ti
num_term_state += 1
if num_term_state > 1:
raise NotImplementedError("Multiple states with terminal condition not supported yet")
super().__init__(hp, cfg, bounds, guess, unravel)
def reinitiate(self, a):
"""Helper function for `sequencesolver`
"""
state_shape = self.system.x_0.shape[0]
control_shape = self.system.bounds.shape[0] - state_shape
self.x_guess = jnp.vstack((self.system.x_0, jnp.zeros((self.N, state_shape))))
self.u_guess = jnp.zeros((self.N + 1, control_shape))
if self.system.adj_T is not None:
adj_guess = jnp.vstack((jnp.zeros((self.N, state_shape)), self.system.adj_T))
else:
adj_guess = jnp.zeros((self.N + 1, state_shape))
# self.adj_guess = index_update(adj_guess, (-1, self.term_cdtion_state), a)
self.adj_guess = adj_guess.at[(-1, self.term_cdtion_state)].set(a)
def solve(self) -> Solution:
"""Solve the continuous optimal problem with the Forward-Backward Sweep Method"""
if self.terminal_cdtion:
return self.sequencesolver()
n = 0
while n == 0 or self.stopping_criterion((self.x_guess, old_x), (self.u_guess, old_u), (self.adj_guess, old_adj)):
old_u = self.u_guess.copy()
old_x = self.x_guess.copy()
old_adj = self.adj_guess.copy()
self.x_guess = integrate_fbsm(self.system.dynamics, self.x_guess[0], self.u_guess, self.h, self.N,
t=self.t_interval, discrete=self.system.discrete)[-1]
self.adj_guess = integrate_fbsm(self.system.adj_ODE, self.adj_guess[-1], self.x_guess, -1 * self.h, self.N,
self.u_guess, t=self.t_interval, discrete=self.system.discrete)[-1]
u_estimate = self.system.optim_characterization(self.adj_guess, self.x_guess, self.t_interval)
# Use basic convex approximation to update the guess on u
self.u_guess = 0.5 * (u_estimate + old_u)
n = n + 1
solution = {
'x': self.x_guess,
'u': self.u_guess,
'adj': self.adj_guess
}
return solution
def sequencesolver(self) -> Solution:
"""Implement the secant method for the special case where there is a terminal value on some state variables in
addition to the initial values.
"""
self.terminal_cdtion = False
count = 0
# Adjust lambda to the initial guess
a = self.system.guess_a
self.reinitiate(a)
tmp_solution = self.solve()
x_a = tmp_solution['x']
# x_a, _, _ = self.solve()
Va = x_a[-1, self.term_cdtion_state] - self.term_value
b = self.system.guess_b
self.reinitiate(b)
tmp_solution = self.solve()
x_b = tmp_solution['x']
# x_b, _, _ = self.solve()
Vb = x_b[-1, self.term_cdtion_state] - self.term_value
while jnp.abs(Va) > 1e-10:
if jnp.abs(Va) > jnp.abs(Vb):
a, b = b, a
Va, Vb = Vb, Va
d = Va * (b - a) / (Vb - Va)
b = a
Vb = Va
a = a - d
self.reinitiate(a)
tmp_solution = self.solve()
x_a = tmp_solution['x']
# x_a, _, _ = self.solve()
Va = x_a[-1, self.term_cdtion_state] - self.term_value
count += 1
solution = {
'x': self.x_guess,
'u': self.u_guess,
'adj': self.adj_guess
}
return solution
| 6,006 | 36.779874 | 117 | py |
myriad | myriad-main/myriad/trajectory_optimizers/base.py | # (c) 2021 Nikolaus Howe
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from myriad.config import Config, HParams
# from myriad.config import
import jax
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
# from ipopt import minimize_ipopt
from scipy.optimize import minimize
from dataclasses import dataclass
from typing import Callable, Dict, Optional, Tuple
from myriad.config import SystemType
from myriad.nlp_solvers import solve
from myriad.systems import FiniteHorizonControlSystem, IndirectFHCS
from myriad.utils import integrate_in_parallel, integrate_time_independent, \
integrate_time_independent_in_parallel, integrate_fbsm
from myriad.custom_types import Params
@dataclass
class TrajectoryOptimizer(object):
"""
An abstract class representing an "optimizer" which can find the solution
(an optimal trajectory) to a given "system", using a direct approach.
"""
hp: HParams
"""The hyperparameters"""
cfg: Config
"""Additional hyperparemeters"""
objective: Callable[[jnp.ndarray], float]
"""Given a sequence of controls and states, calculates how "good" they are"""
parametrized_objective: Callable[[Params, jnp.ndarray], float]
constraints: Callable[[jnp.ndarray], jnp.ndarray]
"""Given a sequence of controls and states, calculates the magnitude of violations of dynamics"""
parametrized_constraints: Callable[[Params, jnp.ndarray], float]
bounds: jnp.ndarray
"""Bounds for the states and controls"""
guess: jnp.ndarray
"""An initial guess for the states and controls"""
unravel: Callable[[jnp.ndarray], Tuple]
"""Use to separate decision variable array into states and controls"""
require_adj: bool = False
"""Does this trajectory optimizer require adjoint dynamics in order to work?"""
def __post_init__(self):
if self.cfg.verbose:
# print("optimizer type", self._type)
print("hp opt type", self.hp.optimizer)
print("hp quadrature rule", self.hp.quadrature_rule)
# print(f"x_guess.shape = {self.x_guess.shape}")
# print(f"u_guess.shape = {self.u_guess.shape}")
print(f"guess.shape = {self.guess.shape}")
# print(f"x_bounds.shape = {self.x_bounds.shape}")
# print(f"u_bounds.shape = {self.u_bounds.shape}")
print(f"bounds.shape = {self.bounds.shape}")
if self.hp.system == SystemType.INVASIVEPLANT:
raise NotImplementedError("Discrete systems are not compatible with Trajectory trajectory_optimizers")
def solve(self) -> Dict[str, jnp.ndarray]:
opt_inputs = {
'objective': self.objective,
'guess': self.guess,
'constraints': self.constraints,
'bounds': self.bounds,
'unravel': self.unravel
}
return solve(self.hp, self.cfg, opt_inputs)
# TODO: fix solve of FBSM
def solve_with_params(self, params: Params, guess: Optional[jnp.ndarray] = None) -> Dict[str, jnp.ndarray]:
opt_inputs = {
'objective': (lambda xs_and_us: self.parametrized_objective(params, xs_and_us)),
'guess': self.guess,
'constraints': (lambda xs_and_us: self.parametrized_constraints(params, xs_and_us)),
'bounds': self.bounds,
'unravel': self.unravel
}
if guess is not None:
opt_inputs['guess'] = guess
return solve(self.hp, self.cfg, opt_inputs)
# NOTE: I believe FBSM doesn't work here either
# You can override these if you want to enable end-to-end planning and model learning
# def parametrized_objective(self, xs_and_us, params):
# raise NotImplementedError
# return self.objective(xs_and_us)
# def parametrized_constraints(self, xs_and_us, params):
# raise NotImplementedError
# return self.constraints(xs_and_us)
@dataclass
class IndirectMethodOptimizer(object):
"""
Abstract class for implementing indirect method trajectory_optimizers, i.e. trajectory_optimizers that relies on the Pontryagin's maximum principle
"""
hp: HParams
"""The collection of hyperparameters for the experiment"""
cfg: Config
"""Configuration options that should not impact results"""
bounds: jnp.ndarray
"""Bounds (lower, upper) over the state variables, followed by the bounds over the controls"""
guess: jnp.ndarray # Initial guess on x_t, u_t and adj_t
"""Initial guess for the state, control and adjoint variables"""
unravel: Callable[[jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]
"""Callable to unravel the pytree -- separate decision variable array into states and controls"""
require_adj: bool = True
"""(bool, optional) -- Does this trajectory optimizer require adjoint dynamics in order to work?"""
def solve(self) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Solve method"""
raise NotImplementedError
def stopping_criterion(self, x_iter: Tuple[jnp.ndarray, jnp.ndarray], u_iter: Tuple[jnp.ndarray, jnp.ndarray],
adj_iter: Tuple[jnp.ndarray, jnp.ndarray], delta: float = 0.001) -> bool:
"""
Criterion for stopping the optimization iterations.
"""
x, old_x = x_iter
u, old_u = u_iter
adj, old_adj = adj_iter
stop_x = jnp.abs(x).sum(axis=0) * delta - jnp.abs(x - old_x).sum(axis=0)
stop_u = jnp.abs(u).sum(axis=0) * delta - jnp.abs(u - old_u).sum(axis=0)
stop_adj = jnp.abs(adj).sum(axis=0) * delta - jnp.abs(adj - old_adj).sum(axis=0)
return jnp.min(jnp.hstack((stop_u, stop_x, stop_adj))) < 0
| 5,442 | 37.330986 | 149 | py |
myriad | myriad-main/myriad/trajectory_optimizers/shooting.py | # (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import numpy as np
from jax.flatten_util import ravel_pytree
from myriad.config import Config, HParams, IntegrationMethod
from myriad.custom_types import Control, Params, Timestep
from myriad.systems import FiniteHorizonControlSystem
from myriad.utils import integrate_in_parallel, integrate_time_independent, integrate_time_independent_in_parallel
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
class MultipleShootingOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem, key: jax.random.PRNGKey = None):
# TODO: make the key live in the hparams
"""
An optimizer that uses performs direct multiple shooting.
For reference, see https://epubs.siam.org/doi/book/10.1137/1.9780898718577
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
num_steps = hp.intervals * hp.controls_per_interval
step_size = system.T / num_steps
interval_size = system.T / hp.intervals
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
midpoints_const = 2 if hp.integration_method == IntegrationMethod.RK4 else 1
if key is None:
self.key = jax.random.PRNGKey(hp.seed)
else:
self.key = key
#################
# Initial Guess #
#################
# Controls
# TODO: decide if we like this way of guessing controls. If yes, then add it to the other trajectory_optimizers too.
self.key, subkey = jax.random.split(self.key)
u_lower = system.bounds[-1, 0]
u_upper = system.bounds[-1, 1]
controls_guess = jnp.zeros((midpoints_const * num_steps + 1, control_shape))
# if jnp.isfinite(u_lower) and jnp.isfinite(u_upper):
# controls_guess += jax.random.normal(subkey, (midpoints_const * num_steps + 1, control_shape)) * (
# u_upper - u_lower) * 0.05
print("the controls guess is", controls_guess.shape)
# States
if system.x_T is not None:
row_guesses = []
# For the state variables which have a required end state, interpolate between start and end;
# otherwise, use rk4 with initial controls as a first guess at intermediate and end state values
for i in range(0, len(system.x_T)):
if system.x_T[i] is not None:
row_guess = jnp.linspace(system.x_0[i], system.x_T[i], num=hp.intervals + 1).reshape(-1, 1)
else:
_, row_guess = integrate_time_independent(system.dynamics, system.x_0,
controls_guess[::midpoints_const * hp.controls_per_interval],
interval_size,
hp.intervals, hp.integration_method)
row_guess = row_guess[:, i].reshape(-1, 1)
row_guesses.append(row_guess)
x_guess = jnp.hstack(row_guesses)
else:
_, x_guess = integrate_time_independent(system.dynamics, system.x_0,
controls_guess[::midpoints_const * hp.controls_per_interval],
interval_size, hp.intervals, hp.integration_method)
guess, unravel = ravel_pytree((x_guess, controls_guess))
assert len(x_guess) == hp.intervals + 1 # we have one state decision var for each node, including start and end
self.x_guess, self.u_guess = x_guess, controls_guess
# Augment the dynamics so we can integrate cost the same way we do state
def augmented_dynamics(x_and_c: jnp.ndarray, u: float, t: float) -> jnp.ndarray:
"""
Augments the dynamics with the cost function, so that all can be integrated together
Args:
x_and_c: State and current cost (current cost doesn't affect the cost calculation)
u: Control
t: Time
custom_dynamics: Optional custom dynamics to replace system dynamics
Returns:
The cost of applying control u to state x at time t
"""
x, c = x_and_c[:-1], x_and_c[-1]
return jnp.append(system.dynamics(x, u), system.cost(x, u, t))
# Augment the dynamics so we can integrate cost the same way we do state
def parametrized_augmented_dynamics(params: Params, x_and_c: jnp.ndarray, u: Control, t: Timestep) -> jnp.ndarray:
# TODO: docstring
x, c = x_and_c[:-1], x_and_c[-1]
return jnp.append(system.parametrized_dynamics(params, x, u), system.parametrized_cost(params, x, u, t))
def reorganize_controls(us): # This still works, even for higher-order control shape
"""
Reorganize controls into per-interval arrays
Go from having controls like (num_controls + 1, control_shape) (left)
to like (hp.intervals, num_controls_per_interval + 1, control_shape) (right)
[ 1. , 1.1] [ 1. , 1.1]
[ 2. , 2.1] [ 2. , 2.1]
[ 3. , 3.1] [ 3. , 3.1]
[ 4. , 4.1] [ 4. , 4.1]
[ 5. , 5.1]
[ 6. , 6.1] [ 4. , 4.1]
[ 7. , 7.1] [ 5. , 5.1]
[ 8. , 8.1] [ 6. , 6.1]
[ 9. , 9.1] [ 7. , 7.1]
[10. , 10.1]
[ 7. , 7.1]
[ 8. , 8.1]
[ 9. , 9.1]
[10. , 10.1]
Args:
us: Controls
Returns:
Controls organized into per-interval arrays
"""
new_controls = jnp.hstack(
[us[:-1].reshape(hp.intervals, midpoints_const * hp.controls_per_interval, control_shape),
us[::midpoints_const * hp.controls_per_interval][1:][:, jnp.newaxis]])
# Needed for single shooting
if len(new_controls.shape) == 3 and new_controls.shape[2] == 1:
new_controls = new_controls.squeeze(axis=2)
return new_controls
def reorganize_times(ts):
"""
Reorganize times into per-interval arrays
Args:
ts: Times
Returns:
Times organized into per-interval arrays
"""
new_times = jnp.hstack([ts[:-1].reshape(hp.intervals, hp.controls_per_interval),
ts[::hp.controls_per_interval][1:][:, jnp.newaxis]])
return new_times
def parametrized_objective(params: Params, variables: jnp.ndarray) -> float:
# TODO: docstring
xs, us = unravel(variables)
reshaped_controls = reorganize_controls(us)
t = jnp.linspace(0., system.T, num=num_steps + 1)
t = reorganize_times(t)
starting_xs_and_costs = jnp.hstack([xs[:-1], jnp.zeros(len(xs[:-1])).reshape(-1, 1)])
def dynamics(x_and_c: jnp.ndarray, u: Control, t: Timestep):
return parametrized_augmented_dynamics(params, x_and_c, u, t)
# Integrate cost in parallel
states_and_costs, _ = integrate_in_parallel(
dynamics, starting_xs_and_costs, reshaped_controls,
step_size, hp.controls_per_interval, t, hp.integration_method)
costs = jnp.sum(states_and_costs[:, -1])
if system.terminal_cost:
last_augmented_state = states_and_costs[-1]
costs += system.terminal_cost_fn(last_augmented_state[:-1], us[-1])
return costs
def objective(variables: jnp.ndarray) -> float:
"""
Calculate the objective of a trajectory
Args:
variables: Raveled states and controls
Returns:
The objective of the trajectory
"""
# print("dynamics are", system.dynamics)
# The commented code runs faster, but only does a linear interpolation for cost.
# Better to have the interpolation match the integration scheme,
# and just use Euler / Heun if we need shooting to be faster
# xs, us = unravel(variables)
# t = jnp.linspace(0, system.T, num=N_x+1)[:-1] # Support cost function with dependency on t
# t = jnp.repeat(t, hp.controls_per_interval)
# _, x = integrate(system.dynamics, system.x_0, u, h_u, N_u)
# x = x[:-1]
# if system.terminal_cost:
# return jnp.sum(system.terminal_cost_fn(x[-1], u[-1])) + h_u * jnp.sum(vmap(system.cost)(x, u, t))
# else:
# return h_u * jnp.sum(vmap(system.cost)(x, u, t))
# ---
xs, us = unravel(variables)
reshaped_controls = reorganize_controls(us)
t = jnp.linspace(0., system.T, num=num_steps + 1)
t = reorganize_times(t)
starting_xs_and_costs = jnp.hstack([xs[:-1], jnp.zeros(len(xs[:-1])).reshape(-1, 1)])
# Integrate cost in parallel
states_and_costs, _ = integrate_in_parallel(
augmented_dynamics, starting_xs_and_costs, reshaped_controls,
step_size, hp.controls_per_interval, t, hp.integration_method)
costs = jnp.sum(states_and_costs[:, -1])
if system.terminal_cost:
last_augmented_state = states_and_costs[-1]
costs += system.terminal_cost_fn(last_augmented_state[:-1], us[-1])
return costs
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> jnp.ndarray:
"""
Calculate the constraint violations of a trajectory
Args:
variables: Raveled states and controls
params: Dict of parameters for the model
Returns:
Constraint violations of trajectory
"""
def dynamics(x_t: jnp.ndarray, u_t: jnp.ndarray):
return system.parametrized_dynamics(params, x_t, u_t)
xs, us = unravel(variables)
px, _ = integrate_time_independent_in_parallel(dynamics, xs[:-1], reorganize_controls(us), step_size,
hp.controls_per_interval, hp.integration_method)
return jnp.ravel(px - xs[1:])
def constraints(variables: jnp.ndarray) -> jnp.ndarray:
"""
Calculate the constraint violations of a trajectory
Args:
variables: Raveled states and controls
Returns:
Constraint violations of trajectory
"""
xs, us = unravel(variables)
px, _ = integrate_time_independent_in_parallel(system.dynamics, xs[:-1], reorganize_controls(us), step_size,
hp.controls_per_interval, hp.integration_method)
return jnp.ravel(px - xs[1:])
############################
# State and Control Bounds #
############################
# State decision variables at every node
x_bounds = np.zeros((hp.intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
# Starting state
x_bounds[0, :, :] = jnp.expand_dims(system.x_0, 1)
# Ending state
if system.x_T is not None:
for i in range(len(system.x_T)):
if system.x_T[i] is not None:
x_bounds[-1, i, :] = system.x_T[i]
# Reshape for call to 'minimize'
x_bounds = x_bounds.reshape((-1, 2))
# Control decision variables at every node, and if RK4, also at midpoints
u_bounds = np.empty(((midpoints_const * num_steps + 1) * control_shape, 2)) # Include midpoints too
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (midpoints_const * num_steps + 1):(control_shape - i + 1) * (
midpoints_const * num_steps + 1)] = system.bounds[-i]
# Reshape for call to 'minimize'
u_bounds = u_bounds.reshape((-1, 2))
# print("u bounds", u_bounds)
# Stack all bounds together for the NLP solver
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel)
| 11,796 | 41.283154 | 120 | py |
myriad | myriad-main/myriad/trajectory_optimizers/collocation/hermite_simpson.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
from typing import Tuple
from myriad.config import Config, HParams
from myriad.custom_types import Control, Controls, Cost, DState, DStates, Params, State, States, Timestep
from myriad.systems import FiniteHorizonControlSystem
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
class HermiteSimpsonCollocationOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem) -> None:
"""
An optimizer that uses direct Hermite-Simpson collocation.
For reference, see https://epubs.siam.org/doi/10.1137/16M1062569.
Note that we are keeping the knot points and the midpoints together
in one big array, instead of separating them. This improves compatibility
with the other trajectory_optimizers.
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
interval_duration = system.T / hp.intervals
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
###########################
# State and Control Guess #
###########################
# Initial guess for controls
u_guess = jnp.zeros((2 * hp.intervals + 1, control_shape))
# Initial guess for state
if system.x_T is not None:
x_guess = jnp.linspace(system.x_0, system.x_T, num=2 * hp.intervals + 1)
else:
x_guess = jnp.ones(shape=(2 * hp.intervals + 1, state_shape)) * 0.1
initial_variables = (x_guess, u_guess)
guess, unravel_decision_variables = ravel_pytree(initial_variables)
self.x_guess, self.u_guess = x_guess, u_guess
############################
# State and Control Bounds #
############################
# Bounds for states
x_bounds = np.zeros((2 * hp.intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
# Starting state
x_bounds[0, :, :] = jnp.expand_dims(system.x_0, 1)
# Ending state
if system.x_T is not None:
for i in range(len(system.x_T)):
if system.x_T[i] is not None:
x_bounds[-1, i, :] = system.x_T[i]
# Reshape for call to 'minimize'
x_bounds = x_bounds.reshape((-1, 2))
# Bounds for controls
u_bounds = np.empty(((2 * hp.intervals + 1) * control_shape, 2)) # Include midpoints too
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (2 * hp.intervals + 1):(control_shape - i + 1) * (
2 * hp.intervals + 1)] = system.bounds[-i]
# Reshape for call to 'minimize'
u_bounds = u_bounds.reshape((-1, 2))
# Stack all bounds together for the NLP solver
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
# Helper function
def get_start_and_next_states_and_controls(variables: jnp.ndarray) -> Tuple[States, States, States,
Controls, Controls, Controls]:
"""
Extracts start, mid, and ending arrays of decision variables
Args:
variables: Raveled state and control variables
Returns:
(start xs, mid xs, end xs, start us, mid us, end us)
"""
xs, us = unravel_decision_variables(variables)
# States
knot_point_xs = xs[::2]
start_xs = knot_point_xs[:-1]
end_xs = knot_point_xs[1:]
mid_point_xs = xs[1::2]
# Controls
knot_point_us = us[::2]
start_us = knot_point_us[:-1]
end_us = knot_point_us[1:]
mid_point_us = us[1::2]
return start_xs, mid_point_xs, end_xs, start_us, mid_point_us, end_us
# Calculates midpoint constraint on-the-fly
def hs_defect(state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Hermite-Simpson collocation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
Returns:
Hermite-Simpson defect of the interval
"""
rhs = next_state - state
lhs = (interval_duration / 6) * (system.dynamics(state, control)
+ 4 * system.dynamics(mid_state, mid_control)
+ system.dynamics(next_state, next_control))
return rhs - lhs
# Calculates midpoint constraint on-the-fly
def parametrized_hs_defect(params: Params,
state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Hermite-Simpson collocation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
params: Custom model parameters
Returns:
Hermite-Simpson defect of the interval
"""
rhs = next_state - state
lhs = (interval_duration / 6) * (system.parametrized_dynamics(params, state, control)
+ 4 * system.parametrized_dynamics(params, mid_state, mid_control)
+ system.parametrized_dynamics(params, next_state, next_control))
return rhs - lhs
def hs_interpolation(state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Calculate Hermite-Simpson interpolation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval (unused)
next_control: Control at end of interval
Returns:
Interpolation constraint
"""
return (mid_state
- (1 / 2) * (state + next_state)
- (interval_duration / 8) * (system.dynamics(state, control)
- system.dynamics(next_state, next_control)))
def parametrized_hs_interpolation(params: Params,
state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Calculate Hermite-Simpson interpolation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval (unused)
next_control: Control at end of interval
params: Custom model parameters
Returns:
Interpolation constraint
"""
return (mid_state
- (1 / 2) * (state + next_state)
- (interval_duration / 8) * (system.parametrized_dynamics(params, state, control)
- system.parametrized_dynamics(params, next_state, next_control)))
# This is the "J" from the tutorial (6.5)
def hs_cost(state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control,
start_time: Timestep, mid_time: Timestep, next_time: Timestep) -> Cost:
"""
Calculate the Hermite-Simpson cost.
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
start_time: Time at start of interval
mid_time: Time at midpoint of interval
next_time: Time at end of interval
Returns:
Hermite-Simpson cost of interval
"""
return (interval_duration / 6) * (system.cost(state, control, start_time)
+ 4 * system.cost(mid_state, mid_control, mid_time)
+ system.cost(next_state, next_control, next_time))
def parametrized_hs_cost(params: Params,
state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control,
start_time: Timestep, mid_time: Timestep, next_time: Timestep) -> Cost:
"""
Calculate the Hermite-Simpson cost.
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
start_time: Time at start of interval
mid_time: Time at midpoint of interval
next_time: Time at end of interval
params: Custom model parameters
Returns:
Hermite-Simpson cost of interval
"""
return (interval_duration / 6) * (system.parametrized_cost(params, state, control, start_time)
+ 4 * system.parametrized_cost(params, mid_state, mid_control, mid_time)
+ system.parametrized_cost(params, next_state, next_control, next_time))
#######################
# Cost and Constraint #
#######################
def objective(variables: jnp.ndarray) -> Cost:
"""
Calculate the Hermite-Simpson objective for this trajectory
Args:
variables: Raveled states and controls
Returns:
Objective of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
all_times = jnp.linspace(0, system.T, num=2 * hp.intervals + 1) # Support cost function with dependency on t
start_and_end_times = all_times[::2]
start_times = start_and_end_times[:-1]
end_times = start_and_end_times[1:]
mid_times = all_times[1::2]
return jnp.sum(vmap(hs_cost)(*unraveled_vars, start_times, mid_times, end_times))
def parametrized_objective(params: Params, variables: jnp.ndarray) -> Cost:
"""
Calculate the Hermite-Simpson objective for this trajectory
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
Objective of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
all_times = jnp.linspace(0, system.T, num=2 * hp.intervals + 1) # Support cost function with dependency on t
start_and_end_times = all_times[::2]
start_times = start_and_end_times[:-1]
end_times = start_and_end_times[1:]
mid_times = all_times[1::2]
return jnp.sum(vmap(parametrized_hs_cost, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
*unraveled_vars, start_times,
mid_times, end_times))
# TODO: test to make sure this actually works ^
def hs_equality_constraints(variables: jnp.ndarray) -> DStates:
"""
Calculate the equality constraint violations for this trajectory (does not include midpoint constraints)
Args:
variables: Raveled states and controls
Returns:
Equality constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(hs_defect)(*unraveled_vars))
def parametrized_hs_equality_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
Calculate the equality constraint violations for this trajectory (does not include midpoint constraints)
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
Equality constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(parametrized_hs_defect, in_axes=(None, 0, 0, 0, 0, 0, 0))(params, *unraveled_vars))
def hs_interpolation_constraints(variables: jnp.ndarray) -> DStates:
"""
Calculate the midpoint constraint violations for this trajectory
Args:
variables: Raveled states and controls
Returns:
Midpoint constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(hs_interpolation)(*unraveled_vars))
def parametrized_hs_interpolation_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
Calculate the midpoint constraint violations for this trajectory
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
Midpoint constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(parametrized_hs_interpolation, in_axes=(None, 0, 0, 0, 0, 0, 0))(params, *unraveled_vars))
def constraints(variables: jnp.ndarray) -> DStates:
"""
Calculate all constraint violations for this trajectory
Args:
variables: Raveled states and controls
Returns:
All constraint violations of trajectory
"""
equality_defects = hs_equality_constraints(variables)
interpolation_defects = hs_interpolation_constraints(variables)
return jnp.hstack((equality_defects, interpolation_defects))
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
Calculate all constraint violations for this trajectory
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
All constraint violations of trajectory
"""
equality_defects = parametrized_hs_equality_constraints(params, variables)
interpolation_defects = parametrized_hs_interpolation_constraints(params, variables)
return jnp.hstack((equality_defects, interpolation_defects))
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel_decision_variables)
| 15,045 | 41.744318 | 118 | py |
myriad | myriad-main/myriad/trajectory_optimizers/collocation/trapezoidal.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
from myriad.config import Config, HParams
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep, DStates
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
from myriad.systems import FiniteHorizonControlSystem
from myriad.utils import integrate_time_independent
class TrapezoidalCollocationOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem) -> None:
"""
An optimizer that uses direct trapezoidal collocation.
For reference, see https://epubs.siam.org/doi/10.1137/16M1062569
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
num_intervals = hp.intervals # Segments
h = system.T / num_intervals # Segment length
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
# print("the control shape is", control_shape)
###########################
# State and Control Guess #
###########################
u_guess = jnp.zeros((num_intervals + 1, control_shape))
if system.x_T is not None:
# We need to handle the cases where a terminal bound is specified only for some state variables, not all
row_guesses = []
for i in range(0, len(system.x_T)):
if system.x_T[i] is not None:
row_guess = jnp.linspace(system.x_0[i], system.x_T[i], num=num_intervals + 1).reshape(-1, 1)
else:
_, row_guess = integrate_time_independent(system.dynamics, system.x_0,
u_guess, h, num_intervals, hp.integration_method)
row_guess = row_guess[:, i].reshape(-1, 1)
row_guesses.append(row_guess)
x_guess = jnp.hstack(row_guesses)
else: # no final state requirement
_, x_guess = integrate_time_independent(system.dynamics, system.x_0,
u_guess, h, num_intervals, hp.integration_method)
guess, unravel_decision_variables = ravel_pytree((x_guess, u_guess))
self.x_guess, self.u_guess = x_guess, u_guess
############################
# State and Control Bounds #
############################
# Control bounds
u_bounds = np.empty(((num_intervals + 1) * control_shape, 2))
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (num_intervals + 1)
:(control_shape - i + 1) * (num_intervals + 1)] = system.bounds[-i]
# Reshape to work with NLP solver
u_bounds = u_bounds.reshape((-1, 2))
# State bounds
x_bounds = np.empty((num_intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
x_bounds[0, :, :] = np.expand_dims(system.x_0, 1)
if system.x_T is not None:
x_bounds[-control_shape, :, :] = np.expand_dims(system.x_T, 1)
# Reshape to work with NLP solver
x_bounds = x_bounds.reshape((-1, 2))
# Put control and state bounds together
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
def trapezoid_cost(x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control,
t1: Timestep, t2: Timestep) -> Cost:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
t1: Time at start of interval
t2: Time at end of interval
Returns:
Trapezoid cost of the interval
"""
return (h / 2) * (system.cost(x_t1, u_t1, t1) + system.cost(x_t2, u_t2, t2))
def parametrized_trapezoid_cost(params: Params,
x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control,
t1: Timestep, t2: Timestep) -> Cost:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
t1: Time at start of interval
t2: Time at end of interval
params: Custom model parameters
Returns:
Trapezoid cost of the interval
"""
return (h / 2) * (system.parametrized_cost(params, x_t1, u_t1, t1)
+ system.parametrized_cost(params, x_t2, u_t2, t2))
def objective(variables: jnp.ndarray) -> Cost:
"""
The objective function.
Args:
variables: Raveled state and decision variables
Returns:
The sum of the trapezoid costs across the whole trajectory
"""
x, u = unravel_decision_variables(variables)
t = jnp.linspace(0, system.T, num=num_intervals + 1) # Support cost function with dependency on t
cost = jnp.sum(vmap(trapezoid_cost)(x[:-1], x[1:], u[:-1], u[1:], t[:-1], t[1:]))
if system.terminal_cost:
cost += jnp.sum(system.terminal_cost_fn(x[-1], u[-1]))
return cost
def parametrized_objective(params: Params, variables: jnp.ndarray) -> Cost:
"""
The objective function.
Args:
variables: Raveled state and decision variables
params: Custom model parameters
Returns:
The sum of the trapezoid costs across the whole trajectory
"""
x, u = unravel_decision_variables(variables)
t = jnp.linspace(0, system.T, num=num_intervals + 1) # Support cost function with dependency on t
cost = jnp.sum(vmap(parametrized_trapezoid_cost, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
x[:-1], x[1:],
u[:-1], u[1:],
t[:-1], t[1:]))
if system.terminal_cost:
cost += jnp.sum(system.terminal_cost_fn(x[-1], u[-1]))
return cost
# TODO: should the terminal cost function also take parameters?
# probably yes... (will need to fix this in shooting and hs too then)
def trapezoid_defect(x_t1: State, x_t2: State, u_t1: Control, u_t2: Control) -> DState:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
Returns:
Trapezoid defect of the interval
"""
left = (h / 2) * (system.dynamics(x_t1, u_t1) + system.dynamics(x_t2, u_t2))
right = x_t2 - x_t1
return left - right
def parametrized_trapezoid_defect(params: Params,
x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control) -> DState:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
params: Custom model parameters
Returns:
Trapezoid defect of the interval
"""
left = (h / 2) * (system.parametrized_dynamics(params, x_t1, u_t1)
+ system.parametrized_dynamics(params, x_t2, u_t2))
right = x_t2 - x_t1
return left - right
def constraints(variables: jnp.ndarray) -> DStates:
"""
The constraints function.
Args:
variables: Raveled state and decision variables
Returns:
An array of the defects of the whole trajectory
"""
x, u = unravel_decision_variables(variables)
return jnp.ravel(vmap(trapezoid_defect)(x[:-1], x[1:], u[:-1], u[1:]))
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
The constraints function.
Args:
variables: Raveled state and decision variables
params: Custom model parameters
Returns:
An array of the defects of the whole trajectory
"""
x, u = unravel_decision_variables(variables)
return jnp.ravel(vmap(parametrized_trapezoid_defect, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
x[:-1], x[1:],
u[:-1], u[1:]))
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel_decision_variables)
| 8,760 | 40.719048 | 110 | py |
myriad | myriad-main/myriad/experiments/e2e_sysid.py | # (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import optax
import pickle as pkl
from pathlib import Path
from typing import Tuple
from myriad.config import HParams, Config, SystemType, NLPSolverType
from myriad.custom_types import Params, DParams
from myriad.defaults import learning_rates, param_guesses
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot
from myriad.systems import get_name
from myriad.utils import integrate_time_independent, get_state_trajectory_and_cost, get_defect
NUM_UNROLLED = 10
# NOTE: we have a choice to make about whether we consider only
# a single trajectory at a time, or if we have a whole
# batch of trajectories (each with its own start state, and
# each with its own optimal controls) from which we sample
# at each iteration of the algorithm.
def run_endtoend(hp, cfg, num_epochs=10_000):
if hp.system not in param_guesses:
print("We do not currently support that kind of system for sysid. Exiting...")
return
data_path = f'datasets/{hp.system.name}/e2e_sysid/'
Path(data_path).mkdir(parents=True, exist_ok=True)
true_us_name = 'true_opt_us'
true_xs_name = 'true_opt_xs'
params_path = f'params/{hp.system.name}/e2e_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True)
params_name = f'e2e_parametric.p'
plots_path = f'plots/{hp.system.name}/e2e_sysid/'
Path(plots_path).mkdir(parents=True, exist_ok=True)
guesses_path = f'intermediate_guesses/{hp.system.name}/e2e_sysid/'
Path(guesses_path).mkdir(parents=True, exist_ok=True)
losses_path = f'losses/{hp.system.name}/e2e_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
true_system = hp.system()
optimizer = get_optimizer(hp, cfg, true_system)
# Get the true optimal controls (and state),
# which we will try to imitate
try:
with open(data_path + true_us_name, 'rb') as myfile:
true_opt_us = jnp.array(pkl.load(myfile))
with open(data_path + true_xs_name, 'rb') as myfile:
true_opt_xs = jnp.array(pkl.load(myfile))
print("successfully loaded the saved optimal trajectory")
# plt.plot(true_opt_us)
# plt.plot(true_opt_xs)
# plt.show()
except Exception as e:
print("We haven't saved the optimal trajectory for this system yet, so we'll do that now")
true_solution = optimizer.solve()
true_opt_us = true_solution['u']
print("true opt us", true_opt_us.shape)
_, true_opt_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, true_opt_us, hp.stepsize, hp.num_steps, hp.integration_method)
print("true opt xs", true_opt_xs.shape)
with open(data_path + true_us_name, 'wb') as myfile:
pkl.dump(true_opt_us, myfile)
with open(data_path + true_xs_name, 'wb') as myfile:
pkl.dump(true_opt_xs, myfile)
try:
params = pkl.load(open(params_path + params_name, 'rb'))
print("It seems we've already trained for this system, so we'll go straight to evaluation.")
except FileNotFoundError as e:
print("unable to find the params, so we'll guess "
"and then optimize and save")
# Make a guess for our parameters
params = param_guesses[hp.system]
# solution_guess = optimizer.solve_with_params(params)
# xs_and_us = solution_guess['xs_and_us']
# lmbdas = solution_guess['lambda']
xs_and_us = optimizer.guess
lmbdas = jnp.zeros_like(optimizer.constraints(optimizer.guess))
# Save the initial parameter guess so we can reset to it later
original_xs_and_us = jnp.array(xs_and_us)
original_lmbdas = jnp.array(lmbdas)
# Parameter optimizer
opt = optax.adam(hp.adam_lr) # 1e-4
opt_state = opt.init(params)
# Control/state/duals optimizer
eta_x = hp.eta_x
eta_v = hp.eta_lmbda
if hp.system in learning_rates:
eta_x = learning_rates[hp.system]['eta_x']
eta_v = learning_rates[hp.system]['eta_v']
bounds = optimizer.bounds
@jax.jit
def lagrangian(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> float:
return (optimizer.parametrized_objective(params, xs_and_us)
+ lmbdas @ optimizer.parametrized_constraints(params, xs_and_us))
@jax.jit
def step(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> Tuple[jnp.ndarray, jnp.ndarray]:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x_new, lmbda, params)
return x_new, lmbda_new
@jax.jit
def step_x(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
return x_new
@jax.jit
def step_lmbda(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x, lmbda, params)
return lmbda_new
jac_x = jax.jit(jax.jacobian(step_x, argnums=(0, 1, 2)))
jac_lmbda = jax.jit(jax.jacobian(step_lmbda, argnums=(0, 1, 2)))
jac_x_p = jax.jit(jax.jacobian(step_x, argnums=2))
jac_lmbda_p = jax.jit(jax.jacobian(step_lmbda, argnums=2))
# Update the primals and duals using the current model,
# and also return the Jacobians of them with respect to the parameters.
@jax.jit
def many_steps_grad(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> DParams:
zx = jac_x_p(xs_and_us, lmbdas, params)
zx = jax.tree_util.tree_map(lambda x: x * 0., zx)
zlmbda = jac_lmbda_p(xs_and_us, lmbdas, params)
zlmbda = jax.tree_util.tree_map(lambda x: x * 0., zlmbda)
@jax.jit
def body_fun(i, vars):
xs_and_us, lmbdas, zx, zlmbda = vars
dx, dlmbda, dp = jac_x(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: dx @ el, zx)
lmbda_part = jax.tree_util.tree_map(lambda el: dlmbda @ el, zlmbda)
zx = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
# zx = jax.tree_util.tree_multimap(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
xs_and_us = step_x(xs_and_us, lmbdas, params)
dx, dlmbda, dp = jac_lmbda(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: dx @ el, zx)
lmbda_part = jax.tree_util.tree_map(lambda el: dlmbda @ el, zlmbda)
zlmbda = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
# zlmbda = jax.tree_util.tree_multimap(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
lmbdas = step_lmbda(xs_and_us, lmbdas, params)
return xs_and_us, lmbdas, zx, zlmbda
xs_and_us, lmbdas, zx, zlmbda = jax.lax.fori_loop(0, NUM_UNROLLED, body_fun, (xs_and_us, lmbdas, zx, zlmbda))
return zx
# Imitation loss for the optimal controls
# def control_imitation_loss(params: Params, xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, epoch: int):
# for _ in range(NUM_UNROLLED):
# xs_and_us, lmbdas = step(xs_and_us, lmbdas, params)
# xs, us = optimizer.unravel(xs_and_us)
#
# diff = us - true_opt_us
# sq_diff = diff * diff
# long = jnp.mean(sq_diff, axis=1) # average all axes except time
# discount = (1 - 1 / (1 + jnp.exp(2 + 0.00001 * epoch))) ** jnp.arange(len(long))
# if hp.system in [SystemType.MOUNTAINCAR, SystemType.PENDULUM]:
# print("min discount", discount[-1])
# else:
# discount = 1.
# return jnp.mean(long * discount)
@jax.jit
def simple_imitation_loss(xs_and_us: jnp.ndarray, epoch):
xs, us = optimizer.unravel(xs_and_us)
diff = us - true_opt_us
sq_diff = diff * diff
long = jnp.mean(sq_diff, axis=1) # average all axes except time
discount = (1 - 1 / (1 + jnp.exp(2 + 0.000001 * epoch))) ** jnp.arange(len(long))
if hp.system in [SystemType.BACTERIA, SystemType.MOUNTAINCAR, SystemType.CARTPOLE, SystemType.PENDULUM]:
print("min discount", discount[-1])
else:
discount = 1.
return jnp.mean(long * discount)
@jax.jit
def lookahead_update(params: Params, opt_state: optax.OptState,
xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, epoch: int) -> Tuple[Params, optax.OptState]:
dx_dp = many_steps_grad(xs_and_us, lmbdas, params)
dJ_dx = jax.grad(simple_imitation_loss)(xs_and_us, epoch)
dJdp = jax.tree_util.tree_map(lambda x: dJ_dx @ x, dx_dp)
updates, opt_state = opt.update(dJdp, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# Use these to record the guesses
ts = []
primal_guesses = []
dual_guesses = []
# Use this to record the losses
imitation_losses = []
print("starting guess of params", params)
save_and_reset_time = 1_000
record_things_time = 10
for epoch in range(num_epochs):
if epoch % save_and_reset_time == 0:
# Check if the next params already exist (in which case we go straight to them)
try:
cur_params_name = f'{epoch + save_and_reset_time}e2e_parametric.p'
params = pkl.load(open(params_path + cur_params_name, 'rb'))
print("It seems we've already trained up to the next epoch, so we'll go straight there")
epoch += save_and_reset_time
continue
except FileNotFoundError as e:
pass
# Record more around the very start
record_things_time = 1
print("saving current params")
pkl.dump(params, open(params_path + str(epoch) + params_name, 'wb'))
print("saving guesses so far")
pkl.dump(ts, open(guesses_path + str(epoch) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(epoch) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(epoch) + 'duals', 'wb'))
print("saving imitation losses")
pkl.dump(imitation_losses, open(losses_path + str(epoch) + '_losses', 'wb'))
print("resetting guess")
# Reset the guess to a different random small amount
hp.key, subkey = jax.random.split(hp.key)
optimizer = get_optimizer(hp, cfg, true_system)
xs_and_us = optimizer.guess
lmbdas = original_lmbdas
if epoch % record_things_time == 0:
# Only have high-density recording around the start of each guess
if epoch >= 10:
record_things_time = 50
# Save the current params
ts.append(epoch)
primal_guesses.append(np.array(xs_and_us))
dual_guesses.append(np.array(lmbdas))
# Save the current imitation loss
cur_loss = simple_imitation_loss(xs_and_us, epoch)
imitation_losses.append(cur_loss)
print("loss", cur_loss)
print("params", params)
# Take step(s) with the model
for _ in range(NUM_UNROLLED):
xs_and_us, lmbdas = step(xs_and_us, lmbdas, params)
# Now update to prepare for next steps
params, opt_state = lookahead_update(params, opt_state, xs_and_us, lmbdas, epoch)
print("Saving the final params", params)
pkl.dump(params, open(params_path + params_name, 'wb'))
print("Saving the final guesses")
pkl.dump(ts, open(guesses_path + str(num_epochs - 1) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(num_epochs - 1) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(num_epochs - 1) + 'duals', 'wb'))
print("Saving the final losses")
pkl.dump(imitation_losses, open(losses_path + str(num_epochs - 1) + 'losses', 'wb'))
#######################
# Imitation loss plot #
#######################
b = matplotlib.get_backend()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (4, 3.3)
# Plot the imitation loss over time (params are already open, but putting this here for clarity)
params = pkl.load(open(params_path + params_name, 'rb'))
print("the params are", params)
losses = pkl.load(open(losses_path + str(num_epochs - 1) + 'losses', 'rb'))
ts = pkl.load(open(guesses_path + str(num_epochs - 1) + 'ts', 'rb'))
primal_guesses = pkl.load(open(guesses_path + str(num_epochs - 1) + 'primals', 'rb'))
# Plot the imitation loss over time
plt.plot(ts, losses)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('imitation loss')
plt.title("Imitation Loss")
plt.tight_layout()
plt.savefig(plots_path + f'imitation_loss.{cfg.file_extension}', bbox_inches='tight')
plt.close()
#####################
# Control loss plot #
#####################
# Plot the control performance over time
print("Plotting control performance over time")
true_state_trajectory, optimal_cost = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
parallel_get_state_trajectory_and_cost = jax.vmap(get_state_trajectory_and_cost, in_axes=(None, None, None, 0))
ar_primal_guesses = jnp.array(primal_guesses)
# parallel_unravel = jax.vmap(optimizer.unravel, in_axes=0)
# long_uus = jnp.array(long_uus)
# _, uus = parallel_unravel(ar_primal_guesses)
# NOTE: for some reason, the above approach stopped working with a jax update.
# Manually going through the loop works fine.
long_uus = []
for uu in ar_primal_guesses:
long_uus.append(optimizer.unravel(uu)[1])
uus = jnp.array(long_uus)
xxs, cs = parallel_get_state_trajectory_and_cost(hp, true_system, true_system.x_0, uus)
plt.axhline(optimal_cost, color='grey', linestyle='dashed')
plt.plot(ts, cs)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title("Trajectory Cost")
plt.tight_layout()
plt.savefig(plots_path + f'control_performance.{cfg.file_extension}', bbox_inches='tight')
plt.close()
#######################
# Final planning plot #
#######################
# Plot the performance of planning with the final model
print("Plotting final planning performance")
hp = HParams(nlpsolver=NLPSolverType.EXTRAGRADIENT)
cfg = Config()
true_system = hp.system()
optimizer = get_optimizer(hp, cfg, true_system)
learned_solution = optimizer.solve_with_params(params)
learned_x, learned_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, learned_solution['u'])
learned_defect = get_defect(true_system, learned_x)
true_x, true_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
true_defect = get_defect(true_system, true_x)
plot(hp, true_system,
data={'x': true_opt_xs,
'other_x': learned_x,
'u': true_opt_us,
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=plots_path + f'planning_with_model.{cfg.file_extension}',
figsize=cfg.figsize)
#####################
# Decision var plot #
#####################
# Plot showing how the guess converges to the optimal trajectory
matplotlib.use(b)
plt.rcParams["figure.figsize"] = (7, 5.6)
print("Plotting convergence")
title = get_name(hp)
if title is not None:
plt.suptitle(title)
plt.suptitle(r"Intermediate Trajectories" + r" $-$ " + title)
plt.subplot(2, 1, 1)
plt.grid()
# Plot intermediate controls with transparency
for xs in xxs:
plt.plot(xs, color='orange', alpha=0.01)
plt.ylabel('state (x)')
plt.plot(true_opt_xs, label="true state from controls planned with true model", lw=3)
# Plot the final state curve
plt.plot(xxs[-1], 'x-', label="true state from final controls")
plt.legend(loc='upper right')
# Plot controls
plt.subplot(2, 1, 2)
plt.plot(true_opt_us, label="planned with true model", lw=3)
# Plot intermediate controls with transparency
for us in uus:
plt.plot(us, color='orange', alpha=0.01)
# Plot the final control curve
plt.ylabel('control (u)')
plt.xlabel('time (s)')
plt.plot(uus[-1], 'x-', label="controls at the end of training")
plt.legend(loc='upper right')
plt.grid()
plt.tight_layout()
plt.savefig(plots_path + 'e2e_cool_plot.png', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
hp, cfg = HParams(), Config()
run_endtoend(hp, cfg)
| 17,345 | 37.892377 | 116 | py |
myriad | myriad-main/myriad/experiments/node_mle_sysid.py | # (c) Nikolaus Howe 2021
from __future__ import annotations
import csv
import jax
import jax.numpy as jnp
import numpy as np
import pickle as pkl
from pathlib import Path
from myriad.config import Config, HParams, IntegrationMethod
from myriad.neural_ode.create_node import NeuralODE
from myriad.neural_ode.node_training import train
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot, plot_losses
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.utils import get_state_trajectory_and_cost, integrate_time_independent, sample_x_init
def run_node_mle_sysid(hp: HParams, cfg: Config) -> None:
# Instantiate the neural ode. We'll keep updating its parameters
# (either by training or by loading from save)
node = NeuralODE(hp, cfg)
true_opt = get_optimizer(hp, cfg, node.system)
true_solution = true_opt.solve()
learned_system = NodeSystem(node, node.system)
learned_opt = get_optimizer(hp, cfg, learned_system)
official_trained_for = 0
for experiment_number in range(0, hp.num_experiments):
print(f"### EXPERIMENT {experiment_number} ###")
official_trained_for += hp.num_epochs
actual_trained_for = official_trained_for # overwrite if not exact (if we have the info)
losses_path = f'losses/{hp.system.name}/node_mle_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
losses_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.l'
params_path = f'params/{hp.system.name}/node_mle_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True) # create the directory if it doesn't already exist
params_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.p'
plots_path = f'plots/{hp.system.name}/node_mle_sysid/'
progress_plots_path = f'plots/{hp.system.name}/node_mle_sysid/progress_plots/'
Path(progress_plots_path).mkdir(parents=True, exist_ok=True)
plots_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}'
data_path = f'datasets/{hp.system.name}/node_mle_sysid/'
Path(data_path).mkdir(parents=True, exist_ok=True) # create the directory if it doesn't already exist
data_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.d'
try:
node.load_params(params_path + params_name)
except FileNotFoundError as e:
print("unable to find the params file, so we'll train our"
"model to learn some, and then save them")
# If the datasets already exist, then load it.
# If it doesn't then we augment the dataset
try:
node.load_dataset(data_path + data_name)
except FileNotFoundError as e:
print("unable to find dataset for this experiment, so we'll make our own")
# (unless it's the first one, in which case we use the
# dataset which is already there)
if experiment_number > 0:
print("We will now augment the dataset. Currently, the train data are", node.train_data.shape)
node.augment_datasets()
print("After augmenting, the train data are", node.train_data.shape)
# Save the dataset for the next time
pkl.dump(node.full_data, open(data_path + data_name, 'wb'))
# Now, we train on this dataset, until early stopping
node.key, subkey = jax.random.split(node.key)
# Perform the training
end_epoch = train(node, save_as=progress_plots_path + plots_name, extension=cfg.file_extension)
actual_trained_for = official_trained_for - node.hp.num_epochs + end_epoch
# TODO: do we care how many epochs it trained for?
# start_epoch += increment * node.train_size
# Save the learned parameters
node.save_params(params_path + params_name)
# Save the losses for this experiment
# print("saving train and val losses for experiment", experiment_number)
with open(losses_path + losses_name, 'w') as f:
write = csv.writer(f)
for i, t in enumerate(node.losses['ts']):
write.writerow([t, node.losses['train_loss'][i], node.losses['validation_loss'][i]])
if cfg.plot:
#################
# Planning plot #
#################
learned_solution = learned_opt.solve_with_params(node.params)
true_x, true_c = get_state_trajectory_and_cost(hp, node.system, node.system.x_0, true_solution['u'])
if node.system.x_T is not None:
true_defect = []
for i, s in enumerate(true_x[-1]):
if node.system.x_T[i] is not None:
true_defect.append(s - node.system.x_T[i])
true_defect = np.array(true_defect)
else:
true_defect = None
learned_x, learned_c = get_state_trajectory_and_cost(hp, node.system, node.system.x_0, learned_solution['u'])
if node.system.x_T is not None:
learned_defect = []
for i, s in enumerate(learned_x[-1]):
if node.system.x_T[i] is not None:
learned_defect.append(s - node.system.x_T[i])
learned_defect = np.array(learned_defect)
else:
learned_defect = None
planning_plot_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_' \
f'exp_{experiment_number}_planning.{cfg.file_extension}'
plot(hp, node.system,
data={'x': true_x,
'other_x': learned_x,
'u': true_solution['u'],
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=plots_path + planning_plot_name,
figsize=cfg.figsize)
###############
# Losses plot #
###############
print("plotting losses for experiment", experiment_number)
losses_plot_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_' \
f'exp_{experiment_number}_training.{cfg.file_extension}'
if cfg.plot:
plot_losses(node.hp, losses_path + losses_name, save_as=plots_path + losses_plot_name)
###################
# Prediction plot #
###################
x_0 = sample_x_init(hp, n_batch=1)[0] # remove the leading (batch) axis
print("x0", x_0.shape, x_0)
us = np.random.uniform(low=node.system.bounds[-1, 0],
high=node.system.bounds[-1, 1],
size=(hp.num_steps + 1, hp.control_size))
us = jnp.array(us)
_, predicted_states1 = integrate_time_independent(
node.system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
_, predicted_states2 = integrate_time_independent(
learned_system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
Path(plots_path).mkdir(parents=True, exist_ok=True)
save_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}' \
f'_prediction.{cfg.file_extension}'
plot(hp, node.system,
data={'x': predicted_states1,
'other_x': predicted_states2,
'u': us},
labels={'x': ' (true state trajectory)',
'other_x': ' (state trajectory predicted by learned model)',
'u': ' (chosen uniformly at random)'},
styles={'x': '-',
'other_x': '-x',
'u': '-'},
widths={'x': 3,
'other_x': 1,
'u': 1},
save_as=plots_path + save_name,
figsize=cfg.figsize)
| 8,692 | 42.034653 | 115 | py |
myriad | myriad-main/myriad/experiments/mle_sysid.py | # (c) 2021 Nikolaus Howe
from __future__ import annotations
import csv
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import optax
import pickle as pkl
from pathlib import Path
from typing import Dict, Tuple, Union
from myriad.config import Config, HParams, IntegrationMethod, SystemType
from myriad.defaults import param_guesses
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot, plot_losses
from myriad.utils import integrate_time_independent, integrate_time_independent_in_parallel, \
get_state_trajectory_and_cost, get_defect, sample_x_init, generate_dataset
def run_mle_sysid(hp: HParams, cfg: Config) -> None:
if hp.system not in param_guesses:
print("We do not currently support that kind of system for sysid. Exiting...")
return
test_system = hp.system()
# Create, or load, a train and validation (and test, unused) set.
dataset_size = hp.train_size + hp.val_size + hp.test_size
file_path = f'datasets/{hp.system.name}/mle_sysid/'
Path(file_path).mkdir(parents=True, exist_ok=True)
file_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}.d'
try:
dataset = pkl.load(open(file_path + file_name, 'rb'))
dataset = jnp.array(dataset)
print("loaded the dataset from file")
except FileNotFoundError as e:
print("unable to find the file, so we're making our own")
dataset = generate_dataset(hp, cfg)
pkl.dump(dataset, open(file_path + file_name, 'wb'))
assert dataset.shape == (dataset_size, hp.num_steps + 1, hp.state_size + hp.control_size)
if cfg.verbose:
print("full dataset", dataset.shape)
assert np.isfinite(dataset).all()
# Perform the learning
train_set, val_set, test_set = dataset[:hp.train_size], dataset[hp.train_size:-hp.test_size], dataset[-hp.test_size:]
if cfg.verbose:
print("train set", train_set.shape)
print("val set", val_set.shape)
print("test set", test_set.shape)
losses_path = f'losses/{hp.system.name}/mle_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
losses_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}.l'
params_path = f'params/{hp.system.name}/mle_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True)
params_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_{hp.train_size}_{hp.val_size}_{hp.test_size}.p'
plots_path = f'plots/{hp.system.name}/mle_sysid/'
Path(plots_path).mkdir(parents=True, exist_ok=True)
plots_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}'
try:
if cfg.load_params_if_saved:
params = pkl.load(open(params_path + params_name, 'rb'))
print("loaded params from file")
else:
raise FileNotFoundError
except FileNotFoundError as e:
print("unable to find the params file, so we'll train "
"our model to learn some, and then save them")
# Make an initial guess for the system parameters
params = param_guesses[hp.system]
# Initialize optimizer
opt = optax.adam(1e-3)
opt_state = opt.init(params)
# Calculate the MSE between the simulated trajectory and the real one
@jax.jit
def loss(given_params, dataset, epoch):
# print("the given params are", given_params)
def dynamics(x, u):
return test_system.parametrized_dynamics(given_params, x, u)
train_xs = dataset[:, :, :hp.state_size]
train_us = dataset[:, :, hp.state_size:]
start_xs = train_xs[:, 0, :]
# if cfg.verbose:
# print("train xs", train_xs.shape)
# print("train us", train_us.shape)
# print("start train xs", start_xs.shape)
_, predicted_states = integrate_time_independent_in_parallel(
dynamics, start_xs, train_us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
# assert jnp.isfinite(predicted_states).all()
# if cfg.verbose:
# print("the predicted states are", predicted_states.shape)
# print(predicted_states)
# Calculate the loss, using a discount factor
# to incentivize learning the earlier part of the
# trajectory first. This seems to avoid local minima.
# print('predicted', predicted_states.shape)
# print('true', train_xs.shape)
diff = predicted_states - train_xs
sq_diff = diff * diff
long = jnp.mean(sq_diff, axis=(0, 2)) # average all axes except time
discount = (1 - 1 / (1 + jnp.exp(2 + 0.000001 * epoch))) ** jnp.arange(len(long))
if hp.system in [SystemType.BACTERIA, SystemType.MOUNTAINCAR, SystemType.CARTPOLE]:
print("min discount", discount[-1])
else:
discount = 1.
return jnp.mean(long * discount)
# Gradient descent on the loss function already in scope
@jax.jit
def update(params: Dict[str, Union[float, jnp.ndarray]],
opt_state: optax.OptState, minibatch: jnp.ndarray,
epoch: int) \
-> Tuple[Dict[str, Union[float, jnp.ndarray]], optax.OptState]:
grads = jax.grad(loss)(params, minibatch, epoch)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# MLE train
epochs = []
train_losses = []
val_losses = []
best_val_loss = None
best_params = None
check_frequency = 500
count = 0
for epoch in range(hp.num_epochs * 10):
if epoch % check_frequency == 0:
cur_loss = loss(params, train_set, epoch)
val_loss = loss(params, val_set, epoch)
epochs.append(epoch)
train_losses.append(cur_loss)
val_losses.append(val_loss)
if cfg.verbose:
print("loss", cur_loss)
print("val loss", val_loss)
if np.isnan(cur_loss):
print("current params", params)
print("train set", train_set)
with open('t_set', 'wb') as afile:
pkl.dump(train_set, afile)
raise SystemExit
# print("params", params)
# writer.add_scalar('loss/train', cur_loss, epoch)
# writer.add_scalar('loss/val', val_loss, epoch)
# Break if we have converged
if best_val_loss is None or val_loss < best_val_loss:
best_val_loss = val_loss
best_params = params
count = 0
else:
if count > hp.early_stop_threshold:
print("stopping early at epoch", epoch)
break
# If we're still going, increase the count
count += check_frequency
if epoch % 2500 == 0:
# Plot the situation
first_xs = train_set[0, :, :hp.state_size]
first_us = train_set[0, :, hp.state_size:]
@jax.jit
def dynamics(x, u):
return test_system.parametrized_dynamics(params, x, u)
_, predicted_states = integrate_time_independent(
dynamics, first_xs[0], first_us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
# if cfg.verbose:
# print("plotting xs", first_xs.shape)
# print("plotting us", first_us.shape)
# Plot states
plt.subplot(2, 1, 1)
plt.plot(first_xs, label="true xs")
plt.plot(predicted_states, label="predicted xs")
plt.legend()
# Plot controls
plt.subplot(2, 1, 2)
plt.plot(first_us, label="true us")
plt.legend()
# Save the plot
plt.savefig(f"{plots_path + plots_name}_epoch_{epoch}.png")
plt.close()
# Update the params
params, opt_state = update(params, opt_state, train_set, epoch)
print("saving the final params", best_params)
pkl.dump(best_params, open(params_path + params_name, 'wb'))
print("saving the train and val losses")
with open(losses_path + losses_name, 'w') as f:
write = csv.writer(f)
for i, ep in enumerate(epochs):
write.writerow([ep, train_losses[i], val_losses[i]])
# Use the best params for the plotting, etc.
params = best_params
print("the final params are", params)
# Now we compare the performance when using the learned model for planning,
# compared with the performance of using the original model for planning.
true_system = hp.system()
learned_system = hp.system(**params)
# Uncomment the following 12 lines if you want to verify the performance on the
# dataset that was used for training
# (note: this should _not_ be used as a form of evaluation!)
# dataset = pkl.load(open(file_path, 'rb'))
# dataset = jnp.array(dataset)
# first_xs = dataset[0, :, :state_size]
# first_us = dataset[0, :, state_size:]
# _, predicted_states1 = integrate_time_independent(
# p1.dynamics, first_xs[0], first_us, stepsize, num_steps, IntegrationMethod.HEUN
# )
# _, predicted_states2 = integrate_time_independent(
# p2.dynamics, first_xs[0], first_us, stepsize, num_steps, IntegrationMethod.HEUN
# )
# print("first xs", first_xs.shape)
# print("first us", first_us.shape)
# Test imitation on random controls and a random start point
x_0 = sample_x_init(hp, n_batch=1)[0] # remove the leading (batch) axis
print("x0", x_0.shape, x_0)
us = np.random.uniform(low=true_system.bounds[-1, 0],
high=true_system.bounds[-1, 1],
size=(hp.num_steps + 1, hp.control_size))
us = jnp.array(us)
_, predicted_states1 = integrate_time_independent(
true_system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
_, predicted_states2 = integrate_time_independent(
learned_system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
save_path = f'plots/{hp.system.name}/mle_sysid/'
Path(save_path).mkdir(parents=True, exist_ok=True)
save_name = f'{hp.train_size}_{hp.val_size}_' \
f'noise_{hp.noise_level}_{hp.test_size}_prediction.{cfg.file_extension}'
plot(hp, true_system,
data={'x': predicted_states1,
'other_x': predicted_states2,
'u': us},
labels={'x': ' (true state trajectory)',
'other_x': ' (state trajectory predicted by learned model)',
'u': ' (chosen uniformly at random)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-'},
widths={'x': 3,
'other_x': 1,
'u': 1},
save_as=save_path + save_name,
figsize=cfg.figsize)
# plt.figure(figsize=(9, 7))
# plt.plot(predicted_states1, '.', label="true")
# plt.plot(predicted_states2, label="predicted")
# plt.title("Imitation")
# plt.legend()
# plt.show()
#
# Perform optimal control using the learned dynamics and the real dynamics
true_opt = get_optimizer(hp, cfg, true_system)
true_solution = true_opt.solve()
learned_opt = get_optimizer(hp, cfg, learned_system)
learned_solution = learned_opt.solve()
true_x, true_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_solution['u'])
true_defect = get_defect(true_system, true_x)
learned_x, learned_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, learned_solution['u'])
learned_defect = get_defect(true_system, learned_x)
save_path = f'plots/{hp.system.name}/mle_sysid/'
Path(save_path).mkdir(parents=True, exist_ok=True)
save_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}_planning.{cfg.file_extension}'
plot(hp, true_system,
data={'x': true_x,
'other_x': learned_x,
'u': true_solution['u'],
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=save_path + save_name,
figsize=cfg.figsize)
losses_plot_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}' \
f'_training.{cfg.file_extension}'
plot_losses(hp, losses_path + losses_name, save_as=save_path + losses_plot_name)
| 12,626 | 36.247788 | 119 | py |
myriad | myriad-main/myriad/experiments/node_e2e_sysid.py | # (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import optax
import pickle as pkl
from pathlib import Path
from typing import Tuple
from myriad.config import HParams, Config, NLPSolverType
from myriad.defaults import learning_rates, param_guesses
from myriad.neural_ode.create_node import NeuralODE
from myriad.custom_types import Params, DParams
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.systems import get_name
from myriad.utils import integrate_time_independent, get_state_trajectory_and_cost, get_defect
def run_node_endtoend(hp, cfg, num_epochs=10_000, load_specific_epoch_params=None):
if hp.system not in param_guesses:
print("We do not currently support that kind of system for sysid. Exiting...")
return
data_path = f'datasets/{hp.system.name}/node_e2e_sysid/'
Path(data_path).mkdir(parents=True, exist_ok=True)
true_us_name = 'true_opt_us'
true_xs_name = 'true_opt_xs'
params_path = f'params/{hp.system.name}/node_e2e_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True)
params_name = f'node_e2e.p'
plots_path = f'plots/{hp.system.name}/node_e2e_sysid/'
Path(plots_path).mkdir(parents=True, exist_ok=True)
guesses_path = f'intermediate_guesses/{hp.system.name}/node_e2e_sysid/'
Path(guesses_path).mkdir(parents=True, exist_ok=True)
losses_path = f'losses/{hp.system.name}/node_e2e_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
node = NeuralODE(hp, cfg, mle=False)
true_system = hp.system() # use the default params here
true_optimizer = get_optimizer(hp, cfg, true_system)
node_system = NodeSystem(node=node, true_system=true_system)
node_optimizer = get_optimizer(hp, cfg, node_system)
# Get the true optimal controls (and state),
# which we will try to imitate
try:
with open(data_path + true_us_name, 'rb') as myfile:
true_opt_us = jnp.array(pkl.load(myfile))
with open(data_path + true_xs_name, 'rb') as myfile:
true_opt_xs = jnp.array(pkl.load(myfile))
print("successfully loaded the saved optimal trajectory")
except Exception as e:
print("We haven't saved the optimal trajectory for this system yet, so we'll do that now")
true_solution = true_optimizer.solve()
true_opt_us = true_solution['u']
print("true opt us", true_opt_us.shape)
_, true_opt_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, true_opt_us, hp.stepsize, hp.num_steps, hp.integration_method)
print("true opt xs", true_opt_xs.shape)
with open(data_path + true_us_name, 'wb') as myfile:
pkl.dump(true_opt_us, myfile)
with open(data_path + true_xs_name, 'wb') as myfile:
pkl.dump(true_opt_xs, myfile)
try:
node.load_params(params_path + params_name)
print("It seems we've already trained for this system, so we'll go straight to evaluation.")
except FileNotFoundError as e:
print("unable to find the params, so we'll guess "
"and then optimize and save")
xs_and_us = true_optimizer.guess
lmbdas = jnp.zeros_like(true_optimizer.constraints(true_optimizer.guess))
# As a sanity check, use the true optimal controls and see if we diverge from them
# opt_xs_and_us = pkl.load(open('bleble', 'rb'))
# print('xs_and_us', xs_and_us.shape)
# Save these so we can reset them later
original_xs_and_us = jnp.array(xs_and_us)
original_lmbdas = jnp.array(lmbdas)
# Parameter optimization
opt = optax.adam(1e-4)
opt_state = opt.init(node.params)
# Control/state/duals optimizer
eta_x = hp.eta_x
eta_v = hp.eta_lmbda
if hp.system in learning_rates:
eta_x = learning_rates[hp.system]['eta_x']
eta_v = learning_rates[hp.system]['eta_v']
bounds = true_optimizer.bounds
@jax.jit
def lagrangian(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> float:
return (node_optimizer.parametrized_objective(params, xs_and_us)
+ lmbdas @ node_optimizer.parametrized_constraints(params, xs_and_us))
@jax.jit
def step(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> Tuple[jnp.ndarray, jnp.ndarray]:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x_new, lmbda, params)
return x_new, lmbda_new
@jax.jit
def step_x(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
return x_new
@jax.jit
def step_lmbda(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x, lmbda, params)
return lmbda_new
jac_x = jax.jit(jax.jacobian(step_x, argnums=(0, 1, 2)))
jac_lmbda = jax.jit(jax.jacobian(step_lmbda, argnums=(0, 1, 2)))
jac_x_p = jax.jit(jax.jacobian(step_x, argnums=2))
jac_lmbda_p = jax.jit(jax.jacobian(step_lmbda, argnums=2))
@jax.jit
def many_steps_grad(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> DParams:
zx = jac_x_p(xs_and_us, lmbdas, params)
zx = jax.tree_util.tree_map(lambda x: x * 0., zx)
zlmbda = jac_lmbda_p(xs_and_us, lmbdas, params)
zlmbda = jax.tree_util.tree_map(lambda x: x * 0., zlmbda)
@jax.jit
def body_fun(i, vars):
xs_and_us, lmbdas, zx, zlmbda = vars
dx, dlmbda, dp = jac_x(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dx, el, axes=(1, 0)), zx)
lmbda_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dlmbda, el, axes=(1, 0)), zlmbda)
zx = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part) # multimap
xs_and_us = step_x(xs_and_us, lmbdas, params)
dx, dlmbda, dp = jac_lmbda(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dx, el, axes=(1, 0)), zx)
lmbda_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dlmbda, el, axes=(1, 0)), zlmbda)
zlmbda = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part) # multimap
lmbdas = step_lmbda(xs_and_us, lmbdas, params)
return xs_and_us, lmbdas, zx, zlmbda
xs_and_us, lmbdas, zx, zlmbda = jax.lax.fori_loop(0, hp.num_unrolled, body_fun, (xs_and_us, lmbdas, zx, zlmbda))
return zx
# @jax.jit
# def control_imitation_loss(params: Params, init_xs_and_us: jnp.ndarray, init_lmbdas: jnp.ndarray):
# xs_and_us_new, lmbda_new = step(init_xs_and_us, init_lmbdas, params)
# xs, us = true_optimizer.unravel(xs_and_us_new)
# return jnp.mean((us - true_opt_us) ** 2) # same loss as "Diff. MPC"
@jax.jit
def simple_imitation_loss(xs_and_us: jnp.ndarray, epoch: int):
xs, us = true_optimizer.unravel(xs_and_us)
diff = us - true_opt_us
sq_diff = diff * diff
long = jnp.mean(sq_diff, axis=1)
discount = (1 - 1 / (1 + jnp.exp(2 + 0.000001 * epoch))) ** jnp.arange(len(long))
if hp.system in []:
print("min discount", discount[-1])
else:
discount = 1.
return jnp.mean(long * discount)
@jax.jit
def lookahead_update(params: Params, opt_state: optax.OptState, xs_and_us: jnp.ndarray,
lmbdas: jnp.ndarray, epoch: int) -> Tuple[Params, optax.OptState]:
dloop_dp = many_steps_grad(xs_and_us, lmbdas, params)
dx_dloop = jax.grad(simple_imitation_loss)(xs_and_us, epoch)
dJdp = jax.tree_util.tree_map(lambda x: jnp.tensordot(dx_dloop, x, axes=(0, 0)), dloop_dp)
updates, opt_state = opt.update(dJdp, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# Use to record the guesses
ts = []
primal_guesses = []
dual_guesses = []
# Use to record the losses
imitation_losses = []
# print("true params", true_params)
# print("starting guess of params", node.params)
# u_lower = true_system.bounds[hp.state_size:, 0]
# u_upper = true_system.bounds[hp.state_size:, 1]
record_things_time = 10
save_and_reset_time = 1000
for epoch in range(num_epochs):
if epoch % save_and_reset_time == 0:
# Record more around the very start
record_things_time = 1
print("saving current params")
pkl.dump(node.params, open(params_path + str(epoch) + params_name, 'wb'))
print("saving guesses so far")
pkl.dump(ts, open(guesses_path + str(epoch) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(epoch) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(epoch) + 'duals', 'wb'))
print("saving imitation losses")
pkl.dump(imitation_losses, open(losses_path + str(epoch) + '_losses', 'wb'))
print("resetting guess")
# Reset the guess to a different random small amount
hp.key, subkey = jax.random.split(hp.key)
optimizer = get_optimizer(hp, cfg, true_system)
xs_and_us = optimizer.guess
lmbdas = original_lmbdas
if epoch % record_things_time == 0:
# Only have high-density recording around the start of each guess
if epoch >= 10:
record_things_time = 10
xs, cur_us = true_optimizer.unravel(xs_and_us)
plt.ion()
fig = plt.figure()
# if a_plt is None:
ax1 = fig.add_subplot(211)
a_plt = ax1.plot(true_opt_xs, label="true opt xs")
b_plt = ax1.plot(xs, label="xs from given controls")
plt.legend()
ax2 = fig.add_subplot(212)
c_plt = ax2.plot(true_opt_us, label="true opt us")
d_plt = ax2.plot(cur_us, label="current us")
plt.legend()
# plt.show()
# else:
# b_plt[0].set_ydata(predicted_states[:, 0])
# b_plt[1].set_ydata(predicted_states[:, 1])
# b_plt = ax1.plot(np.sin(np.arange(epoch, epoch+10)), label="predicted xs")
plt.savefig(f"{plots_path}progress_epoch_{epoch}.png")
plt.close()
fig.canvas.draw()
fig.canvas.flush_events()
# Save the current params
ts.append(epoch)
primal_guesses.append(np.array(xs_and_us))
dual_guesses.append(np.array(lmbdas))
# Save the current imitation loss
cur_loss = simple_imitation_loss(xs_and_us, epoch)
imitation_losses.append(cur_loss)
print(epoch, "loss", cur_loss)
# Take step(s) with the model
for _ in range(hp.num_unrolled):
xs_and_us, lmbdas = step(xs_and_us, lmbdas, node.params)
# Use the new technique for updating
node.params, opt_state = lookahead_update(node.params, opt_state, xs_and_us, lmbdas, epoch)
print("Saving the final params", node.params)
pkl.dump(node.params, open(params_path + params_name, 'wb'))
print("Saving the final guesses")
pkl.dump(ts, open(guesses_path + str(num_epochs - 1) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(num_epochs - 1) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(num_epochs - 1) + 'duals', 'wb'))
print("Saving the final losses")
pkl.dump(imitation_losses, open(losses_path + str(num_epochs - 1) + 'losses', 'wb'))
if cfg.plot:
# Plot the imitation loss over time # params are already open, but putting this here for clarity
if load_specific_epoch_params is not None:
node.load_params(params_path + str(load_specific_epoch_params) + params_name)
losses = pkl.load(open(losses_path + str(load_specific_epoch_params) + '_losses', 'rb'))
ts = pkl.load(open(guesses_path + str(load_specific_epoch_params) + 'ts', 'rb'))
primal_guesses = pkl.load(open(guesses_path + str(load_specific_epoch_params) + 'primals', 'rb'))
else:
node.load_params(params_path + params_name)
losses = pkl.load(open(losses_path + str(num_epochs - 1) + 'losses', 'rb'))
ts = pkl.load(open(guesses_path + str(num_epochs - 1) + 'ts', 'rb'))
primal_guesses = pkl.load(open(guesses_path + str(num_epochs - 1) + 'primals', 'rb'))
# Check the lengths
if len(losses) % 100 == 0: # 10000:
print("clipping losses")
losses = losses[:-1]
if len(ts) % 100 == 0: # == 10000:
print("clipping ts")
ts = ts[:-1]
if len(primal_guesses) % 100 == 0: # == 10000:
print("clipping primal guesses")
primal_guesses = primal_guesses[:-1]
# assert len(losses) == 999
# assert len(ts) == 999
# assert len(primal_guesses) == 999
##################
# Imitation loss #
##################
b = matplotlib.get_backend()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (4, 3.3)
# Plot the imitation loss over time
plt.plot(ts, losses)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('imitation loss')
plt.title("Imitation Loss")
plt.tight_layout()
plt.savefig(plots_path + f'imitation_loss.{cfg.file_extension}', bbox_inches='tight')
plt.close()
#######################
# Control performance #
#######################
true_state_trajectory, optimal_cost = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
# Plot the control performance over time
print("Plotting control performance over time")
parallel_get_state_trajectory_and_cost = jax.vmap(get_state_trajectory_and_cost, in_axes=(None, None, None, 0))
parallel_unravel = jax.vmap(true_optimizer.unravel, in_axes=0)
ar_primal_guesses = np.array(primal_guesses)
_, uus = parallel_unravel(ar_primal_guesses)
xxs, cs = parallel_get_state_trajectory_and_cost(hp, true_system, true_system.x_0, uus)
plt.axhline(optimal_cost, color='grey', linestyle='dashed')
plt.plot(ts, cs)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title("Trajectory Cost")
plt.tight_layout()
plt.savefig(plots_path + f'control_performance.{cfg.file_extension}', bbox_inches='tight')
plt.close()
# Save the plot
# plt.savefig(f"{plots_path + plots_name}_epoch_{epoch}.png")
# plt.close()
# Plot the performance of planning with the final model
print("Plotting final planning performance")
hp = HParams(nlpsolver=NLPSolverType.EXTRAGRADIENT)
cfg = Config()
node_optimizer = get_optimizer(hp, cfg, node_system)
learned_solution = node_optimizer.solve_with_params(node.params)
learned_x, learned_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, learned_solution['u'])
learned_defect = get_defect(true_system, learned_x)
true_x, true_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
true_defect = get_defect(true_system, true_x)
plot(hp, true_system,
data={'x': true_opt_xs,
'other_x': learned_x,
'u': true_opt_us,
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=plots_path + f'planning_with_model.{cfg.file_extension}',
figsize=cfg.figsize)
#####################
# Decision var plot #
#####################
# Plot showing how the guess converges to the optimal trajectory
matplotlib.use(b)
plt.rcParams["figure.figsize"] = (7, 5.6)
print("Plotting convergence")
# plt.suptitle("Intermediate Trajectories")
title = get_name(hp)
if title is not None:
plt.suptitle(title)
plt.suptitle(r"Intermediate Trajectories" + r" $-$ " + title)
plt.subplot(2, 1, 1)
plt.grid()
# Plot intermediate controls with transparency
for xs in xxs:
plt.plot(xs, color='orange', alpha=0.01)
plt.ylabel('state (x)')
plt.plot(true_opt_xs, label="true state from controls planned with true model", lw=3)
# Plot the final state curve
plt.plot(xxs[-1], 'x-', label="true state from final controls")
plt.legend(loc='upper right')
# Plot controls
plt.subplot(2, 1, 2)
plt.plot(true_opt_us, label="planned with true model", lw=3)
# Plot intermediate controls with transparency
for us in uus:
plt.plot(us, color='orange', alpha=0.01)
# Plot the final control curve
plt.ylabel('control (u)')
plt.xlabel('time (s)')
plt.plot(uus[-1], 'x-', label="controls at the end of training")
plt.legend(loc='upper right')
plt.grid()
plt.tight_layout()
plt.savefig(plots_path + 'node_e2e_cool_plot.png', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
hp = HParams()
cfg = Config()
run_node_endtoend(hp, cfg)
| 18,018 | 38.342795 | 118 | py |
SkeletonGCL | SkeletonGCL-main/main.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import inspect
import os
import pickle
import random
import shutil
import sys
import time
from collections import OrderedDict
import traceback
from sklearn.metrics import confusion_matrix
import csv
import numpy as np
import glob
# torch
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from model.loss import InfoNCEGraph
from torchlight import DictAction
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def get_parser():
# parameter priority: command line > config > default
parser = argparse.ArgumentParser(
description='Spatial Temporal Graph Convolution Network')
parser.add_argument(
'--work-dir',
default='./work_dir/temp',
help='the work folder for storing results')
parser.add_argument('-model_saved_name', default='')
parser.add_argument(
'--config',
default='./config/nturgbd-cross-view/test_bone.yaml',
help='path to the configuration file')
# processor
parser.add_argument(
'--phase', default='train', help='must be train or test')
parser.add_argument(
'--save-score',
type=str2bool,
default=False,
help='if ture, the classification score will be stored')
# visulize and debug
parser.add_argument(
'--seed', type=int, default=1, help='random seed for pytorch')
parser.add_argument(
'--log-interval',
type=int,
default=100,
help='the interval for printing messages (#iteration)')
parser.add_argument(
'--save-interval',
type=int,
default=1,
help='the interval for storing models (#iteration)')
parser.add_argument(
'--save-epoch',
type=int,
default=30,
help='the start epoch to save model (#iteration)')
parser.add_argument(
'--eval-interval',
type=int,
default=5,
help='the interval for evaluating models (#iteration)')
parser.add_argument(
'--print-log',
type=str2bool,
default=True,
help='print logging or not')
parser.add_argument(
'--show-topk',
type=int,
default=[1, 5],
nargs='+',
help='which Top K accuracy will be shown')
# feeder
parser.add_argument(
'--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument(
'--num-worker',
type=int,
default=32,
help='the number of worker for data loader')
parser.add_argument(
'--train-feeder-args',
action=DictAction,
default=dict(),
help='the arguments of data loader for training')
parser.add_argument(
'--test-feeder-args',
action=DictAction,
default=dict(),
help='the arguments of data loader for test')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument(
'--model-args',
action=DictAction,
default=dict(),
help='the arguments of model')
parser.add_argument(
'--weights',
default=None,
help='the weights for network initialization')
parser.add_argument(
'--ignore-weights',
type=str,
default=[],
nargs='+',
help='the name of weights which will be ignored in the initialization')
# optim
parser.add_argument(
'--base-lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument(
'--step',
type=int,
default=[20, 40, 60],
nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument(
'--device',
type=int,
default=0,
nargs='+',
help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument(
'--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument(
'--batch-size', type=int, default=256, help='training batch size')
parser.add_argument(
'--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument(
'--start-epoch',
type=int,
default=0,
help='start training from which epoch')
parser.add_argument(
'--num-epoch',
type=int,
default=80,
help='stop training in which epoch')
parser.add_argument(
'--weight-decay',
type=float,
default=0.0005,
help='weight decay for optimizer')
parser.add_argument(
'--temperature',
type=float,
default=0.8,
help='temperature for cross entropy loss in GCL')
parser.add_argument(
'--lr-decay-rate',
type=float,
default=0.1,
help='decay rate for learning rate')
parser.add_argument('--warm_up_epoch', type=int, default=0)
return parser
class Processor():
"""
Processor for Skeleton-based Action Recgnition
"""
def __init__(self, arg):
self.arg = arg
self.save_arg()
if arg.phase == 'train':
if not arg.train_feeder_args['debug']:
arg.model_saved_name = os.path.join(arg.work_dir, 'runs')
if os.path.isdir(arg.model_saved_name):
print('log_dir: ', arg.model_saved_name, 'already exist')
answer = input('delete it? y/n:')
if answer == 'y':
shutil.rmtree(arg.model_saved_name)
print('Dir removed: ', arg.model_saved_name)
input('Refresh the website of tensorboard by pressing any keys')
else:
print('Dir not removed: ', arg.model_saved_name)
self.train_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'train'), 'train')
self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'val'), 'val')
else:
self.train_writer = self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'test'), 'test')
self.global_step = 0
# pdb.set_trace()
self.load_data()
self.load_model()
if self.arg.phase == 'model_size':
pass
else:
self.load_optimizer()
self.lr = self.arg.base_lr
self.best_acc = 0
self.best_acc_epoch = 0
self.model = self.model.cuda(self.output_device)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
self.model = nn.DataParallel(
self.model,
device_ids=self.arg.device,
output_device=self.output_device)
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if self.arg.phase == 'train':
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker,
drop_last=True,
worker_init_fn=init_seed)
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker,
drop_last=False,
worker_init_fn=init_seed)
def load_model(self):
output_device = self.arg.device[0] if type(self.arg.device) is list else self.arg.device
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
print(Model)
self.model = Model(**self.arg.model_args)
print(self.model)
self.loss = nn.CrossEntropyLoss().cuda(output_device)
mem_size = self.data_loader['train'].dataset.__len__() if self.arg.phase == 'train' else 0
label_all = self.data_loader['train'].dataset.label if self.arg.phase == 'train' else []
self.graphContrast = InfoNCEGraph(in_channels=3*25*25, out_channels=256, class_num=self.arg.model_args["num_class"], \
mem_size=mem_size, label_all=label_all, T=self.arg.temperature).cuda(output_device)
if self.arg.weights:
self.global_step = int(arg.weights[:-3].split('-')[-1])
self.print_log('Load weights from {}.'.format(self.arg.weights))
if '.pkl' in self.arg.weights:
with open(self.arg.weights, 'r') as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict([[k.split('module.')[-1], v.cuda(output_device)] for k, v in weights.items()])
keys = list(weights.keys())
for w in self.arg.ignore_weights:
for key in keys:
if w in key:
if weights.pop(key, None) is not None:
self.print_log('Sucessfully Remove Weights: {}.'.format(key))
else:
self.print_log('Can Not Remove Weights: {}.'.format(key))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print('Can not find these weights:')
for d in diff:
print(' ' + d)
state.update(weights)
self.model.load_state_dict(state)
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
self.print_log('using warm up, epoch: {}'.format(self.arg.warm_up_epoch))
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
f.write(f"# command line: {' '.join(sys.argv)}\n\n")
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
self.arg.lr_decay_rate ** np.sum(epoch >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + ' ] ' + str
print(str)
if self.arg.print_log:
with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, save_model=False):
self.model.train()
self.print_log('Training epoch: {}'.format(epoch + 1))
loader = self.data_loader['train']
self.adjust_learning_rate(epoch)
loss_value = []
contrast_loss_value = []
acc_value = []
self.train_writer.add_scalar('epoch', epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader, ncols=40)
for batch_idx, (data, label, index) in enumerate(process):
self.global_step += 1
with torch.no_grad():
data = data.float().cuda(self.output_device)
label = label.long().cuda(self.output_device)
timer['dataloader'] += self.split_time()
# forward
output, graph = self.model(data)
loss = self.loss(output, label)
if graph is not None:
contrast_loss = self.graphContrast(graph, label, index)
else:
contrast_loss = torch.zeros(1, device=output.device)
if contrast_loss > 0:
loss = loss + contrast_loss
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_value.append(loss.data.item())
contrast_loss_value.append(contrast_loss.data.item())
timer['model'] += self.split_time()
value, predict_label = torch.max(output.data, 1)
acc = torch.mean((predict_label == label.data).float())
acc_value.append(acc.data.item())
self.train_writer.add_scalar('acc', acc, self.global_step)
self.train_writer.add_scalar('loss', loss.data.item(), self.global_step)
self.train_writer.add_scalar('contrast loss', contrast_loss.data.item(), self.global_step)
# statistics
self.lr = self.optimizer.param_groups[0]['lr']
self.train_writer.add_scalar('lr', self.lr, self.global_step)
timer['statistics'] += self.split_time()
# statistics of time consumption and loss
proportion = {
k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))
for k, v in timer.items()
}
self.print_log(
'\tMean training loss: {:.4f}. Mean graph loss: {:.4f}. Mean training acc: {:.2f}%.'.format(np.mean(loss_value), np.mean(contrast_loss_value), np.mean(acc_value)*100))
self.print_log('\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(**proportion))
if save_model:
state_dict = self.model.state_dict()
weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in state_dict.items()])
torch.save(weights, self.arg.model_saved_name + '-' + str(epoch+1) + '-' + str(int(self.global_step)) + '.pt')
def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None):
if wrong_file is not None:
f_w = open(wrong_file, 'w')
if result_file is not None:
f_r = open(result_file, 'w')
self.model.eval()
self.print_log('Eval epoch: {}'.format(epoch + 1))
for ln in loader_name:
loss_value = []
score_frag = []
label_list = []
pred_list = []
step = 0
process = tqdm(self.data_loader[ln], ncols=40)
for batch_idx, (data, label, index) in enumerate(process):
label_list.append(label.numpy())
with torch.no_grad():
data = data.float().cuda(self.output_device)
label = label.long().cuda(self.output_device)
output, _ = self.model(data)
loss = self.loss(output, label)
score_frag.append(output.data.cpu().numpy())
loss_value.append(loss.data.item())
_, predict_label = torch.max(output.data, 1)
pred_list.append(predict_label.data.cpu().numpy())
step += 1
# if step == 10:
# break
if wrong_file is not None or result_file is not None:
predict = list(predict_label.cpu().numpy())
true = list(label.data.cpu().numpy())
for i, x in enumerate(predict):
if result_file is not None:
f_r.write(str(x) + ',' + str(true[i]) + '\n')
if x != true[i] and wrong_file is not None:
f_w.write(str(index[i]) + ',' + str(x) + ',' + str(true[i]) + '\n')
score = np.concatenate(score_frag)
loss = np.mean(loss_value)
if 'ucla' in self.arg.feeder:
self.data_loader[ln].dataset.sample_name = np.arange(len(score))
accuracy = self.data_loader[ln].dataset.top_k(score, 1)
if accuracy > self.best_acc:
self.best_acc = accuracy
self.best_acc_epoch = epoch + 1
print('Accuracy: ', accuracy, ' model: ', self.arg.model_saved_name)
if self.arg.phase == 'train':
self.val_writer.add_scalar('loss', loss, self.global_step)
self.val_writer.add_scalar('acc', accuracy, self.global_step)
score_dict = dict(
zip(self.data_loader[ln].dataset.sample_name, score))
self.print_log('\tMean {} loss of {} batches: {}.'.format(
ln, len(self.data_loader[ln]), np.mean(loss_value)))
for k in self.arg.show_topk:
self.print_log('\tTop{}: {:.2f}%'.format(
k, 100 * self.data_loader[ln].dataset.top_k(score, k)))
if save_score:
with open('{}/epoch{}_{}_score.pkl'.format(
self.arg.work_dir, epoch + 1, ln), 'wb') as f:
pickle.dump(score_dict, f)
# acc for each class:
label_list = np.concatenate(label_list)
pred_list = np.concatenate(pred_list)
confusion = confusion_matrix(label_list, pred_list)
list_diag = np.diag(confusion)
list_raw_sum = np.sum(confusion, axis=1)
each_acc = list_diag / list_raw_sum
with open('{}/epoch{}_{}_each_class_acc.csv'.format(self.arg.work_dir, epoch + 1, ln), 'w') as f:
writer = csv.writer(f)
writer.writerow(each_acc)
writer.writerows(confusion)
def start(self):
if self.arg.phase == 'train':
self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
self.global_step = self.arg.start_epoch * len(self.data_loader['train']) / self.arg.batch_size
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
self.print_log(f'# Parameters: {count_parameters(self.model)}')
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
save_model = (((epoch + 1) % self.arg.save_interval == 0) or (
epoch + 1 == self.arg.num_epoch)) and (epoch+1) > self.arg.save_epoch
self.train(epoch, save_model=save_model)
self.eval(epoch, save_score=self.arg.save_score, loader_name=['test'])
# test the best model
weights_path = glob.glob(os.path.join(self.arg.work_dir, 'runs-'+str(self.best_acc_epoch)+'*'))[0]
weights = torch.load(weights_path)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
weights = OrderedDict([['module.'+k, v.cuda(self.output_device)] for k, v in weights.items()])
self.model.load_state_dict(weights)
wf = weights_path.replace('.pt', '_wrong.txt')
rf = weights_path.replace('.pt', '_right.txt')
self.arg.print_log = False
self.eval(epoch=0, save_score=True, loader_name=['test'], wrong_file=wf, result_file=rf)
self.arg.print_log = True
num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
self.print_log(f'Best accuracy: {self.best_acc}')
self.print_log(f'Epoch number: {self.best_acc_epoch}')
self.print_log(f'Model name: {self.arg.work_dir}')
self.print_log(f'Model total number of params: {num_params}')
self.print_log(f'Weight decay: {self.arg.weight_decay}')
self.print_log(f'Base LR: {self.arg.base_lr}')
self.print_log(f'Batch Size: {self.arg.batch_size}')
self.print_log(f'Test Batch Size: {self.arg.test_batch_size}')
self.print_log(f'seed: {self.arg.seed}')
elif self.arg.phase == 'test':
wf = self.arg.weights.replace('.pt', '_wrong.txt')
rf = self.arg.weights.replace('.pt', '_right.txt')
if self.arg.weights is None:
raise ValueError('Please appoint --weights.')
self.arg.print_log = False
self.print_log('Model: {}.'.format(self.arg.model))
self.print_log('Weights: {}.'.format(self.arg.weights))
self.eval(epoch=0, save_score=self.arg.save_score, loader_name=['test'], wrong_file=wf, result_file=rf)
self.print_log('Done.\n')
if __name__ == '__main__':
parser = get_parser()
# load arg form config file
p = parser.parse_args()
if p.config is not None:
with open(p.config, 'r') as f:
default_arg = yaml.load(f, Loader=yaml.FullLoader)
key = vars(p).keys()
for k in default_arg.keys():
if k not in key:
print('WRONG ARG: {}'.format(k))
assert (k in key)
parser.set_defaults(**default_arg)
arg = parser.parse_args()
init_seed(arg.seed)
processor = Processor(arg)
processor.start()
| 23,323 | 38.2 | 180 | py |
SkeletonGCL | SkeletonGCL-main/torchlight/setup.py | from setuptools import find_packages, setup
setup(
name='torchlight',
version='1.0',
description='A mini framework for pytorch',
packages=find_packages(),
install_requires=[])
| 197 | 21 | 47 | py |
SkeletonGCL | SkeletonGCL-main/torchlight/torchlight/util.py | #!/usr/bin/env python
import argparse
import os
import sys
import traceback
import time
import pickle
from collections import OrderedDict
import yaml
import h5py
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
# from torchpack.runner.hooks import PaviLogger
class IO():
def __init__(self, work_dir, save_log=True, print_log=True):
self.work_dir = work_dir
self.save_log = save_log
self.print_to_screen = print_log
self.cur_time = time.time()
self.split_timer = {}
self.pavi_logger = None
self.session_file = None
self.model_text = ''
def log(self, *args, **kwargs):
try:
if self.pavi_logger is None:
url = 'http://pavi.parrotsdnn.org/log'
with open(self.session_file, 'r') as f:
info = dict(session_file=self.session_file, session_text=f.read(), model_text=self.model_text)
self.pavi_logger = PaviLogger(url)
self.pavi_logger.connect(self.work_dir, info=info)
self.pavi_logger.log(*args, **kwargs)
except: #pylint: disable=W0702
pass
def load_model(self, model, **model_args):
Model = import_class(model)
model = Model(**model_args)
self.model_text += '\n\n' + str(model)
return model
def load_weights(self, model, weights_path, ignore_weights=None, fix_weights=False):
if ignore_weights is None:
ignore_weights = []
if isinstance(ignore_weights, str):
ignore_weights = [ignore_weights]
self.print_log(f'Load weights from {weights_path}.')
weights = torch.load(weights_path)
weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in weights.items()])
# filter weights
for i in ignore_weights:
ignore_name = list()
for w in weights:
if w.find(i) == 0:
ignore_name.append(w)
for n in ignore_name:
weights.pop(n)
self.print_log(f'Filter [{i}] remove weights [{n}].')
for w in weights:
self.print_log(f'Load weights [{w}].')
try:
model.load_state_dict(weights)
except (KeyError, RuntimeError):
state = model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
for d in diff:
self.print_log(f'Can not find weights [{d}].')
state.update(weights)
model.load_state_dict(state)
if fix_weights:
for name, param in model.named_parameters():
if name in weights.keys():
param.requires_grad = False
self.print_log(f'Fix weights [{name}].')
return model
def save_pkl(self, result, filename):
with open(f'{self.work_dir}/{filename}', 'wb') as f:
pickle.dump(result, f)
def save_h5(self, result, filename, append=False):
with h5py.File(f'{self.work_dir}/{filename}', 'a' if append else 'w') as f:
for k in result.keys():
f[k] = result[k]
def save_model(self, model, name):
model_path = f'{self.work_dir}/{name}'
# symlink = f'{self.work_dir}/latest_model.pt'
state_dict = model.state_dict()
weights = OrderedDict([[''.join(k.split('module.')), v.cpu()] for k, v in state_dict.items()])
torch.save(weights, model_path)
# os.symlink(model_path, symlink)
self.print_log(f'The model has been saved as {model_path}.')
def save_arg(self, arg):
self.session_file = f'{self.work_dir}/config.yaml'
# save arg
arg_dict = vars(arg)
if not os.path.exists(self.work_dir):
os.makedirs(self.work_dir)
with open(self.session_file, 'w') as f:
f.write(f"# command line: {' '.join(sys.argv)}\n\n")
yaml.dump(arg_dict, f, default_flow_style=False, indent=4)
def print_log(self, str, print_time=True):
if print_time:
# localtime = time.asctime(time.localtime(time.time()))
str = time.strftime("[%m.%d.%y|%X] ", time.localtime()) + str
if self.print_to_screen:
print(str)
if self.save_log:
with open(f'{self.work_dir}/log.txt', 'a') as f:
print(str, file=f)
def init_timer(self, *name):
self.record_time()
self.split_timer = {k: 0.0000001 for k in name}
def check_time(self, name):
self.split_timer[name] += self.split_time()
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def print_timer(self):
proportion = {
k: f'{int(round(v * 100 / sum(self.split_timer.values()))):02d}%'
for k, v in self.split_timer.items()
}
self.print_log(f'Time consumption:')
for k in proportion:
self.print_log(f'\t[{k}][{proportion[k]}]: {self.split_timer[k]:.4f}')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2dict(v):
return eval(f'dict({v})') #pylint: disable=W0123
def _import_class_0(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
class DictAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(DictAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
input_dict = eval(f'dict({values})') #pylint: disable=W0123
output_dict = getattr(namespace, self.dest)
for k in input_dict:
output_dict[k] = input_dict[k]
setattr(namespace, self.dest, output_dict)
| 6,649 | 32.756345 | 117 | py |
SkeletonGCL | SkeletonGCL-main/torchlight/torchlight/gpu.py | import os
import torch
def visible_gpu(gpus):
"""
set visible gpu.
can be a single id, or a list
return a list of new gpus ids
"""
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(list(map(str, gpus)))
return list(range(len(gpus)))
def ngpu(gpus):
"""
count how many gpus used.
"""
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
return len(gpus)
def occupy_gpu(gpus=None):
"""
make program appear on nvidia-smi.
"""
if gpus is None:
torch.zeros(1).cuda()
else:
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
for g in gpus:
torch.zeros(1).cuda(g)
| 750 | 19.861111 | 71 | py |
SkeletonGCL | SkeletonGCL-main/model/agcn.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3):
super(unit_gcn, self).__init__()
inter_channels = out_channels // coff_embedding
self.inter_c = inter_channels
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
nn.init.constant_(self.PA, 1e-6)
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.num_subset = num_subset
self.conv_a = nn.ModuleList()
self.conv_b = nn.ModuleList()
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax(-2)
self.relu = nn.ReLU()
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def forward(self, x):
N, C, T, V = x.size()
A = self.A.cuda(x.get_device())
A = A + self.PA
y = None
graph_list = []
for i in range(self.num_subset):
A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, self.inter_c * T)
A2 = self.conv_b[i](x).view(N, self.inter_c * T, V)
graph = torch.matmul(A1, A2)
graph_list.append(graph)
A1 = self.soft(graph / A1.size(-1)) # N V V
A1 = A1 + A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
return self.relu(y), torch.stack(graph_list, 1)
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU()
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
y, graph = self.gcn1(x)
x = self.tcn1(y) + self.residual(x)
return self.relu(x), graph
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3, drop_out=0, adaptive=True):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = self.graph.A
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = TCN_GCN_unit(3, 64, A, residual=False)
self.l2 = TCN_GCN_unit(64, 64, A)
self.l3 = TCN_GCN_unit(64, 64, A)
self.l4 = TCN_GCN_unit(64, 64, A)
self.l5 = TCN_GCN_unit(64, 128, A, stride=2)
self.l6 = TCN_GCN_unit(128, 128, A)
self.l7 = TCN_GCN_unit(128, 128, A)
self.l8 = TCN_GCN_unit(128, 256, A, stride=2)
self.l9 = TCN_GCN_unit(256, 256, A)
self.l10 = TCN_GCN_unit(256, 256, A)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x, _ = self.l1(x)
x, _ = self.l2(x)
x, _ = self.l3(x)
x, _ = self.l4(x)
x, _ = self.l5(x)
x, _ = self.l6(x)
x, _ = self.l7(x)
x, _ = self.l8(x)
x, _ = self.l9(x)
x, graph = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
graph = graph.view(N, M, -1, V, V).mean(1).view(N, -1)
return self.fc(x), graph | 6,153 | 32.086022 | 138 | py |
SkeletonGCL | SkeletonGCL-main/model/loss.py | from importlib_metadata import requires
import torch
import torch.nn as nn
from torch import einsum, positive
import math
import random
class InfoNCEGraph(nn.Module):
def __init__(self, in_channels=128, out_channels=256, mem_size=512, positive_num=128, negative_num=512, T=0.8, class_num=60, label_all=[]):
super(InfoNCEGraph, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mem_size = mem_size
self.positive_num = positive_num
self.negative_num = negative_num
self.T = T
self.trans = nn.Linear(in_channels, out_channels)
self.Bank = nn.Parameter(
torch.zeros((mem_size, out_channels)), requires_grad=False
)
self.label_all = torch.from_numpy(label_all)
nn.init.normal_(self.trans.weight, 0, math.sqrt(2. / class_num))
nn.init.zeros_(self.trans.bias)
self.bank_flag = nn.Parameter(
torch.zeros(len(self.label_all)), requires_grad=False
)
self.cross_entropy = nn.CrossEntropyLoss()
def forward(self, f, label, input_index):
# f: n c label: n
n, _ = f.size()
f = self.trans(f)
f_norm = f.norm(dim=-1, p=2, keepdim=True)
f_normed = f / f_norm
self.Bank[input_index] = f_normed.detach()
self.bank_flag[input_index] = 1
all_pairs = einsum('n c, m c -> n m', f_normed, self.Bank)
bank_label = self.label_all.to(label.device) # mem_size
positive_mask = (label.view(n, 1) == bank_label.view(1, -1)).view(n, self.mem_size) # n mem_size
negative_mask = (1-positive_mask.float())
positive_mask = positive_mask * self.bank_flag
negative_mask = negative_mask * self.bank_flag
combined_pairs_list = []
for i in range(n):
if (positive_mask[i].sum(dim=-1) < self.positive_num) or (negative_mask[i].sum(dim=-1) < self.negative_num):
continue
positive_pairs = torch.masked_select(all_pairs[i], mask=positive_mask[i].bool()).view(-1)
positive_pairs_hard = positive_pairs.sort(dim=-1, descending=False)[0][:self.positive_num].view(1, self.positive_num, 1)
negative_pairs = torch.masked_select(all_pairs[i], mask=negative_mask[i].bool()).view(-1)
negative_pairs_hard = negative_pairs.sort(dim=-1, descending=True)[0][:self.negative_num].view(1, 1, self.negative_num)\
.expand(-1, self.positive_num, -1)
idx = random.sample(list(range(len(negative_pairs))), k=self.negative_num)
negative_pairs_random = negative_pairs[idx].view(1, 1, self.negative_num).expand(-1, self.positive_num, -1)
combined_pairs_hard2hard = torch.cat([positive_pairs_hard, negative_pairs_hard], -1).view(self.positive_num, -1)
combined_pairs_hard2random = torch.cat([positive_pairs_hard, negative_pairs_random], -1).view(self.positive_num, -1)
combined_pairs = torch.cat([combined_pairs_hard2hard, combined_pairs_hard2random], 0)
combined_pairs_list.append((combined_pairs))
if len(combined_pairs_list) == 0:
return torch.zeros(1, device=f.device)
combined_pairs = torch.cat(combined_pairs_list, 0)
combined_label = torch.zeros(combined_pairs.size(0), device=f.device).long()
loss = self.cross_entropy(combined_pairs/self.T, combined_label)
return loss | 3,455 | 45.702703 | 143 | py |
SkeletonGCL | SkeletonGCL-main/model/baseline.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
if conv.weight is not None:
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, adaptive=True):
super(unit_gcn, self).__init__()
self.out_c = out_channels
self.in_c = in_channels
self.num_subset = A.shape[0]
self.adaptive = adaptive
if adaptive:
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)), requires_grad=True)
else:
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def L2_norm(self, A):
# A:N,V,V
A_norm = torch.norm(A, 2, dim=1, keepdim=True) + 1e-4 # N,1,V
A = A / A_norm
return A
def forward(self, x):
N, C, T, V = x.size()
y = None
if self.adaptive:
A = self.PA
A = self.L2_norm(A)
else:
A = self.A.cuda(x.get_device())
for i in range(self.num_subset):
A1 = A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
y = self.relu(y)
return y
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU(inplace=True)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
y = self.relu(self.tcn1(self.gcn1(x)) + self.residual(x))
return y
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3,
drop_out=0, adaptive=True, num_set=3):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = np.stack([np.eye(num_point)] * num_set, axis=0)
self.num_class = num_class
self.num_point = num_point
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = TCN_GCN_unit(3, 64, A, residual=False, adaptive=adaptive)
self.l2 = TCN_GCN_unit(64, 64, A, adaptive=adaptive)
self.l3 = TCN_GCN_unit(64, 64, A, adaptive=adaptive)
self.l4 = TCN_GCN_unit(64, 64, A, adaptive=adaptive)
self.l5 = TCN_GCN_unit(64, 128, A, stride=2, adaptive=adaptive)
self.l6 = TCN_GCN_unit(128, 128, A, adaptive=adaptive)
self.l7 = TCN_GCN_unit(128, 128, A, adaptive=adaptive)
self.l8 = TCN_GCN_unit(128, 256, A, stride=2, adaptive=adaptive)
self.l9 = TCN_GCN_unit(256, 256, A, adaptive=adaptive)
self.l10 = TCN_GCN_unit(256, 256, A, adaptive=adaptive)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
if drop_out:
self.drop_out = nn.Dropout(drop_out)
else:
self.drop_out = lambda x: x
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = self.l5(x)
x = self.l6(x)
x = self.l7(x)
x = self.l8(x)
x = self.l9(x)
x = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
x = self.drop_out(x)
return self.fc(x)
| 6,316 | 31.06599 | 110 | py |
SkeletonGCL | SkeletonGCL-main/model/ctrgcn.py | import math
import pdb
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
if conv.weight is not None:
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if hasattr(m, 'weight'):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if hasattr(m, 'bias') and m.bias is not None and isinstance(m.bias, torch.Tensor):
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
if hasattr(m, 'weight') and m.weight is not None:
m.weight.data.normal_(1.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.fill_(0)
class TemporalConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1, dilation=1):
super(TemporalConv, self).__init__()
pad = (kernel_size + (kernel_size-1) * (dilation-1) - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=(kernel_size, 1),
padding=(pad, 0),
stride=(stride, 1),
dilation=(dilation, 1))
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class MultiScale_TemporalConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
dilations=[1,2,3,4],
residual=True,
residual_kernel_size=1):
super().__init__()
assert out_channels % (len(dilations) + 2) == 0, '# out channels should be multiples of # branches'
# Multiple branches of temporal convolution
self.num_branches = len(dilations) + 2
branch_channels = out_channels // self.num_branches
if type(kernel_size) == list:
assert len(kernel_size) == len(dilations)
else:
kernel_size = [kernel_size]*len(dilations)
# Temporal Convolution branches
self.branches = nn.ModuleList([
nn.Sequential(
nn.Conv2d(
in_channels,
branch_channels,
kernel_size=1,
padding=0),
nn.BatchNorm2d(branch_channels),
nn.ReLU(inplace=True),
TemporalConv(
branch_channels,
branch_channels,
kernel_size=ks,
stride=stride,
dilation=dilation),
)
for ks, dilation in zip(kernel_size, dilations)
])
# Additional Max & 1x1 branch
self.branches.append(nn.Sequential(
nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0),
nn.BatchNorm2d(branch_channels),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3,1), stride=(stride,1), padding=(1,0)),
nn.BatchNorm2d(branch_channels) # 为什么还要加bn
))
self.branches.append(nn.Sequential(
nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0, stride=(stride,1)),
nn.BatchNorm2d(branch_channels)
))
# Residual connection
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = TemporalConv(in_channels, out_channels, kernel_size=residual_kernel_size, stride=stride)
# initialize
self.apply(weights_init)
def forward(self, x):
# Input dim: (N,C,T,V)
res = self.residual(x)
branch_outs = []
for tempconv in self.branches:
out = tempconv(x)
branch_outs.append(out)
out = torch.cat(branch_outs, dim=1)
out += res
return out
class CTRGC(nn.Module):
def __init__(self, in_channels, out_channels, rel_reduction=8, mid_reduction=1):
super(CTRGC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if in_channels == 3 or in_channels == 9:
self.rel_channels = 8
self.mid_channels = 16
else:
self.rel_channels = in_channels // rel_reduction
self.mid_channels = in_channels // mid_reduction
self.conv1 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
self.conv2 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
self.conv3 = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1)
self.conv4 = nn.Conv2d(self.rel_channels, self.out_channels, kernel_size=1)
self.tanh = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
def forward(self, x, A=None, alpha=1):
x1, x2, x3 = self.conv1(x), self.conv2(x), self.conv3(x)
graph = self.tanh(x1.mean(-2).unsqueeze(-1) - x2.mean(-2).unsqueeze(-2))
graph = self.conv4(graph)
graph_c = graph * alpha + (A.unsqueeze(0).unsqueeze(0) if A is not None else 0) # N,C,V,V
y = torch.einsum('ncuv,nctv->nctu', graph_c, x3)
return y, graph
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, adaptive=True, residual=True):
super(unit_gcn, self).__init__()
inter_channels = out_channels // coff_embedding
self.inter_c = inter_channels
self.out_c = out_channels
self.in_c = in_channels
self.adaptive = adaptive
self.num_subset = A.shape[0]
self.convs = nn.ModuleList()
for i in range(self.num_subset):
self.convs.append(CTRGC(in_channels, out_channels))
if residual:
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
else:
self.down = lambda x: 0
if self.adaptive:
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
else:
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.alpha = nn.Parameter(torch.zeros(1))
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax(-2)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
def forward(self, x):
y = None
graph_list = []
if self.adaptive:
A = self.PA
else:
A = self.A.cuda(x.get_device())
for i in range(self.num_subset):
z, graph = self.convs[i](x, A[i], self.alpha)
graph_list.append(graph)
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
y = self.relu(y)
return y, torch.stack(graph_list, 1)
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True, kernel_size=5, dilations=[1,2]):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive)
# self.tcn1 = TemporalConv(out_channels, out_channels, stride=stride)
self.tcn1 = MultiScale_TemporalConv(out_channels, out_channels, kernel_size=kernel_size, stride=stride, dilations=dilations,
residual=False)
self.relu = nn.ReLU(inplace=True)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
z, graph = self.gcn1(x)
y = self.relu(self.tcn1(z) + self.residual(x))
return y, graph
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3,
drop_out=0, adaptive=True):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = self.graph.A # 3,25,25
self.num_class = num_class
self.num_point = num_point
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
base_channel = 64
self.l1 = TCN_GCN_unit(in_channels, base_channel, A, residual=False, adaptive=adaptive)
self.l2 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
self.l3 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
self.l4 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
self.l5 = TCN_GCN_unit(base_channel, base_channel*2, A, stride=2, adaptive=adaptive)
self.l6 = TCN_GCN_unit(base_channel*2, base_channel*2, A, adaptive=adaptive)
self.l7 = TCN_GCN_unit(base_channel*2, base_channel*2, A, adaptive=adaptive)
self.l8 = TCN_GCN_unit(base_channel*2, base_channel*4, A, stride=2, adaptive=adaptive)
self.l9 = TCN_GCN_unit(base_channel*4, base_channel*4, A, adaptive=adaptive)
self.l10 = TCN_GCN_unit(base_channel*4, base_channel*4, A, adaptive=adaptive)
self.fc = nn.Linear(base_channel*4, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
if drop_out:
self.drop_out = nn.Dropout(drop_out)
else:
self.drop_out = lambda x: x
def partDivison(self, graph):
_, k, u, v = graph.size() # n k u v
head = [2, 3]
left_arm = [4, 5, 6, 7, 21, 22]
right_arm = [8, 9, 10, 11, 23, 24]
torso = [0, 1, 20]
left_leg = [12, 13, 14, 15]
right_leg = [16, 17, 18, 19]
graph_list = []
part_list = [head, torso, right_arm, left_arm, right_leg, left_leg]
for part in part_list:
part_grah = graph[:,:,part,:].mean(dim=2, keepdim=True)
graph_list.append(part_grah)
graph = torch.cat(graph_list, 2)
graph_list = []
for part in part_list:
part_grah = graph[:,:,:,part].mean(dim=-1, keepdim=True)
graph_list.append(part_grah)
return torch.cat(graph_list, -1)
def forward(self, x):
if len(x.shape) == 3:
N, T, VC = x.shape
x = x.view(N, T, self.num_point, -1).permute(0, 3, 1, 2).contiguous().unsqueeze(-1)
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x, _ = self.l1(x)
x, _ = self.l2(x)
x, _ = self.l3(x)
x, _ = self.l4(x)
x, _ = self.l5(x)
x, _ = self.l6(x)
x, _ = self.l7(x)
x, _ = self.l8(x)
x, _ = self.l9(x)
x, graph = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
x = self.drop_out(x)
graph2 = graph.view(N, M, -1, c_new, V, V)
# graph4 = torch.einsum('n m k c u v, n m k c v l -> n m k c u l', graph2, graph2)
graph2 = graph2.view(N, M, -1, c_new, V, V).mean(1).mean(2).view(N, -1)
# graph4 = graph4.view(N, M, -1, c_new, V, V).mean(1).mean(2).view(N, -1)
# graph = torch.cat([graph2, graph4], -1)
return self.fc(x), graph2 | 13,345 | 35.664835 | 132 | py |
SkeletonGCL | SkeletonGCL-main/feeders/feeder_ucla.py | import numpy as np
import pickle
import json
import random
import math
from torch.utils.data import Dataset
class Feeder(Dataset):
def __init__(self, data_path, label_path, repeat=1, random_choose=False, random_shift=False, random_move=False,
window_size=-1, normalization=False, debug=False, use_mmap=True):
if 'val' in label_path:
self.train_val = 'val'
self.data_dict = [{"file_name": "a05_s04_e02_v03", "length": 21, "label": 5}, {"file_name": "a12_s09_e04_v03", "length": 26, "label": 10}, {"file_name": "a03_s03_e04_v03", "length": 35, "label": 3}, {"file_name": "a08_s02_e01_v03", "length": 101, "label": 7}, {"file_name": "a03_s05_e03_v03", "length": 26, "label": 3}, {"file_name": "a12_s10_e01_v03", "length": 21, "label": 10}, {"file_name": "a01_s07_e03_v03", "length": 31, "label": 1}, {"file_name": "a03_s08_e02_v03", "length": 21, "label": 3}, {"file_name": "a11_s10_e03_v03", "length": 51, "label": 9}, {"file_name": "a11_s03_e00_v03", "length": 46, "label": 9}, {"file_name": "a03_s02_e00_v03", "length": 32, "label": 3}, {"file_name": "a11_s01_e04_v03", "length": 16, "label": 9}, {"file_name": "a09_s08_e04_v03", "length": 63, "label": 8}, {"file_name": "a09_s06_e01_v03", "length": 41, "label": 8}, {"file_name": "a09_s07_e01_v03", "length": 51, "label": 8}, {"file_name": "a02_s08_e01_v03", "length": 21, "label": 2}, {"file_name": "a01_s04_e01_v03", "length": 23, "label": 1}, {"file_name": "a02_s02_e02_v03", "length": 31, "label": 2}, {"file_name": "a02_s07_e05_v03", "length": 31, "label": 2}, {"file_name": "a06_s02_e00_v03", "length": 16, "label": 6}, {"file_name": "a03_s02_e02_v03", "length": 22, "label": 3}, {"file_name": "a11_s09_e04_v03", "length": 22, "label": 9}, {"file_name": "a09_s03_e04_v03", "length": 61, "label": 8}, {"file_name": "a04_s01_e02_v03", "length": 23, "label": 4}, {"file_name": "a12_s01_e01_v03", "length": 17, "label": 10}, {"file_name": "a02_s07_e03_v03", "length": 9, "label": 2}, {"file_name": "a05_s08_e04_v03", "length": 19, "label": 5}, {"file_name": "a02_s07_e02_v03", "length": 31, "label": 2}, {"file_name": "a04_s07_e02_v03", "length": 16, "label": 4}, {"file_name": "a01_s08_e03_v03", "length": 27, "label": 1}, {"file_name": "a08_s03_e01_v03", "length": 68, "label": 7}, {"file_name": "a04_s08_e03_v03", "length": 21, "label": 4}, {"file_name": "a03_s10_e00_v03", "length": 17, "label": 3}, {"file_name": "a04_s03_e03_v03", "length": 21, "label": 4}, {"file_name": "a06_s06_e02_v03", "length": 21, "label": 6}, {"file_name": "a09_s03_e00_v03", "length": 81, "label": 8}, {"file_name": "a09_s03_e03_v03", "length": 46, "label": 8}, {"file_name": "a04_s02_e02_v03", "length": 21, "label": 4}, {"file_name": "a08_s01_e02_v03", "length": 78, "label": 7}, {"file_name": "a04_s04_e00_v03", "length": 11, "label": 4}, {"file_name": "a03_s02_e03_v03", "length": 39, "label": 3}, {"file_name": "a05_s04_e00_v03", "length": 21, "label": 5}, {"file_name": "a05_s07_e03_v03", "length": 36, "label": 5}, {"file_name": "a06_s10_e00_v03", "length": 31, "label": 6}, {"file_name": "a11_s07_e00_v03", "length": 31, "label": 9}, {"file_name": "a03_s01_e01_v03", "length": 24, "label": 3}, {"file_name": "a04_s06_e01_v03", "length": 16, "label": 4}, {"file_name": "a08_s02_e04_v03", "length": 96, "label": 7}, {"file_name": "a09_s08_e03_v03", "length": 46, "label": 8}, {"file_name": "a05_s07_e00_v03", "length": 36, "label": 5}, {"file_name": "a05_s02_e02_v03", "length": 21, "label": 5}, {"file_name": "a04_s06_e04_v03", "length": 21, "label": 4}, {"file_name": "a05_s09_e03_v03", "length": 21, "label": 5}, {"file_name": "a03_s06_e02_v03", "length": 15, "label": 3}, {"file_name": "a01_s01_e00_v03", "length": 27, "label": 1}, {"file_name": "a06_s06_e03_v03", "length": 11, "label": 6}, {"file_name": "a06_s10_e02_v03", "length": 25, "label": 6}, {"file_name": "a02_s07_e04_v03", "length": 36, "label": 2}, {"file_name": "a09_s06_e00_v03", "length": 80, "label": 8}, {"file_name": "a04_s07_e04_v03", "length": 16, "label": 4}, {"file_name": "a05_s02_e01_v03", "length": 19, "label": 5}, {"file_name": "a01_s06_e04_v03", "length": 17, "label": 1}, {"file_name": "a04_s08_e01_v03", "length": 17, "label": 4}, {"file_name": "a01_s09_e00_v03", "length": 31, "label": 1}, {"file_name": "a08_s03_e03_v03", "length": 67, "label": 7}, {"file_name": "a12_s03_e00_v03", "length": 21, "label": 10}, {"file_name": "a11_s02_e03_v03", "length": 29, "label": 9}, {"file_name": "a12_s07_e02_v03", "length": 13, "label": 10}, {"file_name": "a05_s06_e01_v03", "length": 16, "label": 5}, {"file_name": "a06_s02_e04_v03", "length": 16, "label": 6}, {"file_name": "a06_s04_e00_v03", "length": 16, "label": 6}, {"file_name": "a05_s09_e01_v03", "length": 26, "label": 5}, {"file_name": "a11_s10_e04_v03", "length": 24, "label": 9}, {"file_name": "a03_s01_e00_v03", "length": 33, "label": 3}, {"file_name": "a11_s02_e01_v03", "length": 14, "label": 9}, {"file_name": "a04_s02_e00_v03", "length": 31, "label": 4}, {"file_name": "a11_s01_e01_v03", "length": 14, "label": 9}, {"file_name": "a02_s06_e03_v03", "length": 21, "label": 2}, {"file_name": "a12_s10_e03_v03", "length": 16, "label": 10}, {"file_name": "a01_s06_e00_v03", "length": 21, "label": 1}, {"file_name": "a05_s07_e01_v03", "length": 41, "label": 5}, {"file_name": "a01_s09_e01_v03", "length": 26, "label": 1}, {"file_name": "a02_s06_e00_v03", "length": 18, "label": 2}, {"file_name": "a11_s09_e00_v03", "length": 26, "label": 9}, {"file_name": "a03_s03_e01_v03", "length": 47, "label": 3}, {"file_name": "a03_s08_e00_v03", "length": 22, "label": 3}, {"file_name": "a06_s04_e01_v03", "length": 21, "label": 6}, {"file_name": "a02_s05_e01_v03", "length": 34, "label": 2}, {"file_name": "a03_s04_e04_v03", "length": 29, "label": 3}, {"file_name": "a01_s09_e02_v03", "length": 26, "label": 1}, {"file_name": "a08_s03_e04_v03", "length": 46, "label": 7}, {"file_name": "a01_s10_e00_v03", "length": 6, "label": 1}, {"file_name": "a01_s02_e02_v03", "length": 26, "label": 1}, {"file_name": "a09_s03_e01_v03", "length": 36, "label": 8}, {"file_name": "a05_s06_e00_v03", "length": 26, "label": 5}, {"file_name": "a05_s01_e02_v03", "length": 22, "label": 5}, {"file_name": "a02_s02_e04_v03", "length": 28, "label": 2}, {"file_name": "a06_s07_e03_v03", "length": 26, "label": 6}, {"file_name": "a04_s02_e04_v03", "length": 16, "label": 4}, {"file_name": "a02_s07_e01_v03", "length": 31, "label": 2}, {"file_name": "a03_s07_e03_v03", "length": 11, "label": 3}, {"file_name": "a12_s08_e01_v03", "length": 16, "label": 10}, {"file_name": "a05_s01_e03_v03", "length": 19, "label": 5}, {"file_name": "a02_s09_e02_v03", "length": 43, "label": 2}, {"file_name": "a05_s08_e03_v03", "length": 26, "label": 5}, {"file_name": "a04_s06_e00_v03", "length": 16, "label": 4}, {"file_name": "a09_s01_e02_v03", "length": 41, "label": 8}, {"file_name": "a12_s09_e00_v03", "length": 24, "label": 10}, {"file_name": "a04_s09_e02_v03", "length": 26, "label": 4}, {"file_name": "a03_s03_e03_v03", "length": 43, "label": 3}, {"file_name": "a08_s07_e03_v03", "length": 63, "label": 7}, {"file_name": "a08_s09_e02_v03", "length": 134, "label": 7}, {"file_name": "a08_s09_e00_v03", "length": 91, "label": 7}, {"file_name": "a06_s06_e04_v03", "length": 11, "label": 6}, {"file_name": "a01_s07_e04_v03", "length": 26, "label": 1}, {"file_name": "a05_s04_e01_v03", "length": 24, "label": 5}, {"file_name": "a04_s07_e00_v03", "length": 21, "label": 4}, {"file_name": "a05_s08_e01_v03", "length": 21, "label": 5}, {"file_name": "a11_s06_e03_v03", "length": 16, "label": 9}, {"file_name": "a01_s04_e03_v03", "length": 21, "label": 1}, {"file_name": "a11_s06_e04_v03", "length": 12, "label": 9}, {"file_name": "a12_s07_e03_v03", "length": 21, "label": 10}, {"file_name": "a06_s07_e05_v03", "length": 21, "label": 6}, {"file_name": "a01_s02_e04_v03", "length": 23, "label": 1}, {"file_name": "a03_s01_e03_v03", "length": 36, "label": 3}, {"file_name": "a12_s02_e02_v03", "length": 21, "label": 10}, {"file_name": "a03_s06_e01_v03", "length": 17, "label": 3}, {"file_name": "a05_s02_e03_v03", "length": 21, "label": 5}, {"file_name": "a03_s02_e04_v03", "length": 23, "label": 3}, {"file_name": "a08_s02_e03_v03", "length": 103, "label": 7}, {"file_name": "a08_s03_e02_v03", "length": 66, "label": 7}, {"file_name": "a09_s01_e01_v03", "length": 40, "label": 8}, {"file_name": "a02_s01_e01_v03", "length": 30, "label": 2}, {"file_name": "a08_s06_e00_v03", "length": 96, "label": 7}, {"file_name": "a12_s08_e02_v03", "length": 16, "label": 10}, {"file_name": "a02_s08_e00_v03", "length": 26, "label": 2}, {"file_name": "a01_s08_e02_v03", "length": 36, "label": 1}, {"file_name": "a09_s04_e01_v03", "length": 36, "label": 8}, {"file_name": "a04_s01_e04_v03", "length": 16, "label": 4}, {"file_name": "a08_s10_e03_v03", "length": 68, "label": 7}, {"file_name": "a02_s05_e00_v03", "length": 28, "label": 2}, {"file_name": "a06_s04_e03_v03", "length": 16, "label": 6}, {"file_name": "a06_s09_e03_v03", "length": 21, "label": 6}, {"file_name": "a05_s03_e02_v03", "length": 21, "label": 5}, {"file_name": "a06_s03_e04_v03", "length": 16, "label": 6}, {"file_name": "a06_s01_e03_v03", "length": 21, "label": 6}, {"file_name": "a11_s03_e01_v03", "length": 21, "label": 9}, {"file_name": "a09_s02_e01_v03", "length": 31, "label": 8}, {"file_name": "a02_s02_e00_v03", "length": 42, "label": 2}, {"file_name": "a01_s01_e03_v03", "length": 25, "label": 1}, {"file_name": "a08_s06_e02_v03", "length": 93, "label": 7}, {"file_name": "a12_s01_e03_v03", "length": 18, "label": 10}, {"file_name": "a09_s09_e01_v03", "length": 56, "label": 8}, {"file_name": "a04_s10_e03_v03", "length": 16, "label": 4}, {"file_name": "a06_s09_e04_v03", "length": 16, "label": 6}, {"file_name": "a02_s04_e01_v03", "length": 31, "label": 2}, {"file_name": "a12_s10_e04_v03", "length": 21, "label": 10}, {"file_name": "a06_s03_e01_v03", "length": 26, "label": 6}, {"file_name": "a02_s03_e04_v03", "length": 62, "label": 2}, {"file_name": "a11_s09_e02_v03", "length": 26, "label": 9}, {"file_name": "a08_s08_e02_v03", "length": 51, "label": 7}, {"file_name": "a03_s02_e01_v03", "length": 36, "label": 3}, {"file_name": "a12_s02_e00_v03", "length": 19, "label": 10}, {"file_name": "a12_s08_e03_v03", "length": 14, "label": 10}, {"file_name": "a02_s09_e03_v03", "length": 31, "label": 2}, {"file_name": "a09_s02_e02_v03", "length": 33, "label": 8}, {"file_name": "a05_s09_e04_v03", "length": 21, "label": 5}, {"file_name": "a01_s04_e00_v03", "length": 21, "label": 1}, {"file_name": "a08_s04_e03_v03", "length": 68, "label": 7}, {"file_name": "a12_s09_e03_v03", "length": 17, "label": 10}, {"file_name": "a02_s04_e03_v03", "length": 31, "label": 2}, {"file_name": "a04_s03_e04_v03", "length": 21, "label": 4}, {"file_name": "a12_s06_e01_v03", "length": 11, "label": 10}, {"file_name": "a11_s04_e03_v03", "length": 36, "label": 9}, {"file_name": "a05_s03_e00_v03", "length": 20, "label": 5}, {"file_name": "a12_s07_e00_v03", "length": 11, "label": 10}, {"file_name": "a06_s03_e02_v03", "length": 21, "label": 6}, {"file_name": "a03_s03_e05_v03", "length": 33, "label": 3}, {"file_name": "a11_s08_e01_v03", "length": 26, "label": 9}, {"file_name": "a06_s10_e01_v03", "length": 21, "label": 6}, {"file_name": "a04_s03_e02_v03", "length": 11, "label": 4}, {"file_name": "a02_s03_e03_v03", "length": 56, "label": 2}, {"file_name": "a09_s10_e04_v03", "length": 51, "label": 8}, {"file_name": "a04_s08_e04_v03", "length": 21, "label": 4}, {"file_name": "a11_s08_e00_v03", "length": 35, "label": 9}, {"file_name": "a02_s01_e00_v03", "length": 39, "label": 2}, {"file_name": "a04_s02_e03_v03", "length": 19, "label": 4}, {"file_name": "a04_s02_e01_v03", "length": 36, "label": 4}, {"file_name": "a06_s08_e00_v03", "length": 21, "label": 6}, {"file_name": "a08_s08_e01_v03", "length": 52, "label": 7}, {"file_name": "a02_s03_e01_v03", "length": 45, "label": 2}, {"file_name": "a11_s02_e02_v03", "length": 29, "label": 9}, {"file_name": "a09_s07_e02_v03", "length": 38, "label": 8}, {"file_name": "a02_s05_e03_v03", "length": 21, "label": 2}, {"file_name": "a01_s07_e02_v03", "length": 31, "label": 1}, {"file_name": "a03_s05_e00_v03", "length": 20, "label": 3}, {"file_name": "a09_s03_e02_v03", "length": 38, "label": 8}, {"file_name": "a01_s03_e07_v03", "length": 28, "label": 1}, {"file_name": "a09_s04_e04_v03", "length": 56, "label": 8}, {"file_name": "a11_s10_e00_v03", "length": 16, "label": 9}, {"file_name": "a04_s04_e01_v03", "length": 13, "label": 4}, {"file_name": "a02_s08_e02_v03", "length": 21, "label": 2}, {"file_name": "a04_s01_e07_v03", "length": 16, "label": 4}, {"file_name": "a11_s06_e00_v03", "length": 26, "label": 9}, {"file_name": "a05_s02_e00_v03", "length": 27, "label": 5}, {"file_name": "a02_s02_e03_v03", "length": 29, "label": 2}, {"file_name": "a05_s06_e02_v03", "length": 16, "label": 5}, {"file_name": "a08_s01_e03_v03", "length": 76, "label": 7}, {"file_name": "a08_s09_e01_v03", "length": 91, "label": 7}, {"file_name": "a02_s08_e04_v03", "length": 36, "label": 2}, {"file_name": "a01_s02_e03_v03", "length": 29, "label": 1}, {"file_name": "a11_s08_e05_v03", "length": 28, "label": 9}, {"file_name": "a03_s09_e02_v03", "length": 26, "label": 3}, {"file_name": "a04_s08_e00_v03", "length": 17, "label": 4}, {"file_name": "a12_s03_e04_v03", "length": 16, "label": 10}, {"file_name": "a08_s04_e01_v03", "length": 56, "label": 7}, {"file_name": "a12_s04_e03_v03", "length": 11, "label": 10}, {"file_name": "a04_s09_e03_v03", "length": 31, "label": 4}, {"file_name": "a05_s06_e03_v03", "length": 26, "label": 5}, {"file_name": "a09_s06_e02_v03", "length": 56, "label": 8}, {"file_name": "a06_s08_e05_v03", "length": 21, "label": 6}, {"file_name": "a12_s02_e03_v03", "length": 21, "label": 10}, {"file_name": "a11_s03_e03_v03", "length": 36, "label": 9}, {"file_name": "a11_s07_e04_v03", "length": 23, "label": 9}, {"file_name": "a04_s01_e00_v03", "length": 31, "label": 4}, {"file_name": "a03_s08_e03_v03", "length": 14, "label": 3}, {"file_name": "a04_s10_e00_v03", "length": 12, "label": 4}, {"file_name": "a08_s03_e00_v03", "length": 86, "label": 7}, {"file_name": "a02_s08_e03_v03", "length": 21, "label": 2}, {"file_name": "a01_s09_e03_v03", "length": 26, "label": 1}, {"file_name": "a01_s01_e04_v03", "length": 28, "label": 1}, {"file_name": "a01_s07_e00_v03", "length": 28, "label": 1}, {"file_name": "a02_s03_e00_v03", "length": 46, "label": 2}, {"file_name": "a01_s02_e00_v03", "length": 21, "label": 1}, {"file_name": "a03_s09_e04_v03", "length": 21, "label": 3}, {"file_name": "a01_s06_e02_v03", "length": 26, "label": 1}, {"file_name": "a03_s07_e02_v03", "length": 17, "label": 3}, {"file_name": "a03_s05_e04_v03", "length": 39, "label": 3}, {"file_name": "a08_s07_e01_v03", "length": 126, "label": 7}, {"file_name": "a04_s07_e03_v03", "length": 26, "label": 4}, {"file_name": "a08_s04_e04_v03", "length": 56, "label": 7}, {"file_name": "a08_s08_e00_v03", "length": 68, "label": 7}, {"file_name": "a02_s09_e00_v03", "length": 37, "label": 2}, {"file_name": "a06_s03_e00_v03", "length": 16, "label": 6}, {"file_name": "a09_s09_e04_v03", "length": 68, "label": 8}, {"file_name": "a05_s04_e04_v03", "length": 21, "label": 5}, {"file_name": "a09_s04_e03_v03", "length": 31, "label": 8}, {"file_name": "a01_s09_e04_v03", "length": 28, "label": 1}, {"file_name": "a05_s10_e00_v03", "length": 33, "label": 5}, {"file_name": "a09_s08_e02_v03", "length": 49, "label": 8}, {"file_name": "a11_s07_e01_v03", "length": 20, "label": 9}, {"file_name": "a06_s01_e00_v03", "length": 21, "label": 6}, {"file_name": "a12_s08_e04_v03", "length": 14, "label": 10}, {"file_name": "a08_s09_e04_v03", "length": 75, "label": 7}, {"file_name": "a12_s10_e02_v03", "length": 21, "label": 10}, {"file_name": "a04_s01_e01_v03", "length": 33, "label": 4}, {"file_name": "a01_s08_e01_v03", "length": 21, "label": 1}, {"file_name": "a09_s07_e00_v03", "length": 41, "label": 8}, {"file_name": "a04_s09_e00_v03", "length": 21, "label": 4}, {"file_name": "a08_s02_e02_v03", "length": 111, "label": 7}, {"file_name": "a09_s09_e02_v03", "length": 81, "label": 8}, {"file_name": "a09_s02_e03_v03", "length": 31, "label": 8}, {"file_name": "a11_s09_e01_v03", "length": 16, "label": 9}, {"file_name": "a03_s10_e01_v03", "length": 11, "label": 3}, {"file_name": "a11_s03_e02_v03", "length": 21, "label": 9}, {"file_name": "a11_s08_e04_v03", "length": 19, "label": 9}, {"file_name": "a06_s08_e02_v03", "length": 11, "label": 6}, {"file_name": "a11_s04_e04_v03", "length": 21, "label": 9}, {"file_name": "a12_s01_e00_v03", "length": 18, "label": 10}, {"file_name": "a02_s06_e04_v03", "length": 21, "label": 2}, {"file_name": "a06_s07_e01_v03", "length": 16, "label": 6}, {"file_name": "a05_s10_e03_v03", "length": 26, "label": 5}, {"file_name": "a03_s06_e00_v03", "length": 23, "label": 3}, {"file_name": "a12_s02_e01_v03", "length": 21, "label": 10}, {"file_name": "a08_s10_e02_v03", "length": 76, "label": 7}, {"file_name": "a08_s02_e00_v03", "length": 86, "label": 7}, {"file_name": "a06_s10_e03_v03", "length": 21, "label": 6}, {"file_name": "a11_s04_e02_v03", "length": 21, "label": 9}, {"file_name": "a08_s09_e03_v03", "length": 121, "label": 7}, {"file_name": "a12_s06_e04_v03", "length": 16, "label": 10}, {"file_name": "a01_s07_e01_v03", "length": 26, "label": 1}, {"file_name": "a05_s02_e04_v03", "length": 26, "label": 5}, {"file_name": "a09_s08_e00_v03", "length": 52, "label": 8}, {"file_name": "a02_s04_e04_v03", "length": 33, "label": 2}, {"file_name": "a06_s07_e00_v03", "length": 8, "label": 6}, {"file_name": "a04_s09_e01_v03", "length": 34, "label": 4}, {"file_name": "a09_s01_e00_v03", "length": 41, "label": 8}, {"file_name": "a08_s10_e01_v03", "length": 111, "label": 7}, {"file_name": "a11_s10_e02_v03", "length": 61, "label": 9}, {"file_name": "a09_s10_e02_v03", "length": 49, "label": 8}, {"file_name": "a03_s07_e04_v03", "length": 11, "label": 3}, {"file_name": "a05_s08_e00_v03", "length": 26, "label": 5}, {"file_name": "a11_s09_e03_v03", "length": 15, "label": 9}, {"file_name": "a12_s04_e04_v03", "length": 14, "label": 10}, {"file_name": "a04_s01_e03_v03", "length": 16, "label": 4}, {"file_name": "a04_s10_e02_v03", "length": 16, "label": 4}, {"file_name": "a06_s10_e04_v03", "length": 16, "label": 6}, {"file_name": "a01_s08_e00_v03", "length": 21, "label": 1}, {"file_name": "a03_s10_e02_v03", "length": 28, "label": 3}, {"file_name": "a03_s07_e01_v03", "length": 11, "label": 3}, {"file_name": "a05_s04_e03_v03", "length": 21, "label": 5}, {"file_name": "a01_s01_e02_v03", "length": 25, "label": 1}, {"file_name": "a05_s10_e04_v03", "length": 19, "label": 5}, {"file_name": "a06_s08_e03_v03", "length": 21, "label": 6}, {"file_name": "a02_s04_e02_v03", "length": 33, "label": 2}, {"file_name": "a12_s01_e04_v03", "length": 15, "label": 10}, {"file_name": "a05_s07_e05_v03", "length": 18, "label": 5}, {"file_name": "a02_s01_e02_v03", "length": 28, "label": 2}, {"file_name": "a12_s10_e00_v03", "length": 21, "label": 10}, {"file_name": "a11_s02_e00_v03", "length": 31, "label": 9}, {"file_name": "a02_s09_e01_v03", "length": 40, "label": 2}, {"file_name": "a02_s04_e00_v03", "length": 46, "label": 2}, {"file_name": "a12_s01_e02_v03", "length": 14, "label": 10}, {"file_name": "a01_s03_e06_v03", "length": 31, "label": 1}, {"file_name": "a03_s01_e04_v03", "length": 36, "label": 3}, {"file_name": "a01_s03_e04_v03", "length": 34, "label": 1}, {"file_name": "a01_s06_e03_v03", "length": 21, "label": 1}, {"file_name": "a02_s06_e01_v03", "length": 16, "label": 2}, {"file_name": "a12_s07_e04_v03", "length": 21, "label": 10}, {"file_name": "a08_s10_e04_v03", "length": 86, "label": 7}, {"file_name": "a02_s03_e02_v03", "length": 58, "label": 2}, {"file_name": "a05_s06_e04_v03", "length": 18, "label": 5}, {"file_name": "a05_s10_e01_v03", "length": 26, "label": 5}, {"file_name": "a09_s10_e01_v03", "length": 55, "label": 8}, {"file_name": "a08_s08_e04_v03", "length": 61, "label": 7}, {"file_name": "a06_s01_e02_v03", "length": 21, "label": 6}, {"file_name": "a01_s01_e01_v03", "length": 21, "label": 1}, {"file_name": "a06_s08_e04_v03", "length": 17, "label": 6}, {"file_name": "a09_s06_e03_v03", "length": 56, "label": 8}, {"file_name": "a06_s09_e01_v03", "length": 21, "label": 6}, {"file_name": "a08_s06_e01_v03", "length": 134, "label": 7}, {"file_name": "a02_s01_e04_v03", "length": 38, "label": 2}, {"file_name": "a11_s01_e00_v03", "length": 14, "label": 9}, {"file_name": "a03_s03_e00_v03", "length": 41, "label": 3}, {"file_name": "a01_s04_e04_v03", "length": 21, "label": 1}, {"file_name": "a06_s01_e04_v03", "length": 16, "label": 6}, {"file_name": "a01_s10_e01_v03", "length": 24, "label": 1}, {"file_name": "a03_s09_e00_v03", "length": 26, "label": 3}, {"file_name": "a08_s10_e00_v03", "length": 71, "label": 7}, {"file_name": "a05_s10_e02_v03", "length": 34, "label": 5}, {"file_name": "a04_s10_e01_v03", "length": 16, "label": 4}, {"file_name": "a05_s03_e04_v03", "length": 14, "label": 5}, {"file_name": "a05_s07_e02_v03", "length": 26, "label": 5}, {"file_name": "a12_s02_e04_v03", "length": 16, "label": 10}, {"file_name": "a06_s02_e03_v03", "length": 17, "label": 6}, {"file_name": "a09_s01_e03_v03", "length": 41, "label": 8}, {"file_name": "a08_s04_e00_v03", "length": 49, "label": 7}, {"file_name": "a02_s10_e01_v03", "length": 32, "label": 2}, {"file_name": "a11_s04_e01_v03", "length": 21, "label": 9}, {"file_name": "a03_s05_e01_v03", "length": 39, "label": 3}, {"file_name": "a06_s07_e04_v03", "length": 21, "label": 6}, {"file_name": "a09_s09_e03_v03", "length": 56, "label": 8}, {"file_name": "a02_s06_e02_v03", "length": 21, "label": 2}, {"file_name": "a05_s01_e04_v03", "length": 21, "label": 5}, {"file_name": "a11_s03_e04_v03", "length": 26, "label": 9}, {"file_name": "a04_s08_e02_v03", "length": 21, "label": 4}, {"file_name": "a04_s09_e04_v03", "length": 21, "label": 4}, {"file_name": "a08_s07_e00_v03", "length": 51, "label": 7}, {"file_name": "a04_s01_e05_v03", "length": 16, "label": 4}, {"file_name": "a12_s07_e01_v03", "length": 16, "label": 10}, {"file_name": "a02_s01_e03_v03", "length": 40, "label": 2}, {"file_name": "a09_s04_e00_v03", "length": 35, "label": 8}, {"file_name": "a09_s01_e04_v03", "length": 37, "label": 8}, {"file_name": "a12_s08_e00_v03", "length": 16, "label": 10}, {"file_name": "a04_s06_e03_v03", "length": 16, "label": 4}, {"file_name": "a11_s06_e01_v03", "length": 21, "label": 9}, {"file_name": "a01_s10_e02_v03", "length": 26, "label": 1}, {"file_name": "a02_s10_e04_v03", "length": 29, "label": 2}, {"file_name": "a04_s07_e01_v03", "length": 21, "label": 4}, {"file_name": "a03_s04_e01_v03", "length": 39, "label": 3}, {"file_name": "a03_s01_e02_v03", "length": 31, "label": 3}, {"file_name": "a06_s09_e02_v03", "length": 26, "label": 6}, {"file_name": "a03_s07_e00_v03", "length": 21, "label": 3}, {"file_name": "a06_s04_e02_v03", "length": 21, "label": 6}, {"file_name": "a12_s04_e01_v03", "length": 16, "label": 10}, {"file_name": "a04_s06_e02_v03", "length": 21, "label": 4}, {"file_name": "a04_s04_e04_v03", "length": 21, "label": 4}, {"file_name": "a09_s04_e02_v03", "length": 37, "label": 8}, {"file_name": "a02_s02_e01_v03", "length": 26, "label": 2}, {"file_name": "a06_s09_e00_v03", "length": 21, "label": 6}, {"file_name": "a05_s09_e00_v03", "length": 28, "label": 5}, {"file_name": "a05_s03_e01_v03", "length": 17, "label": 5}, {"file_name": "a02_s05_e04_v03", "length": 29, "label": 2}, {"file_name": "a01_s06_e01_v03", "length": 21, "label": 1}, {"file_name": "a12_s04_e02_v03", "length": 13, "label": 10}, {"file_name": "a03_s05_e02_v03", "length": 36, "label": 3}, {"file_name": "a01_s03_e02_v03", "length": 37, "label": 1}, {"file_name": "a05_s08_e05_v03", "length": 21, "label": 5}, {"file_name": "a01_s03_e00_v03", "length": 29, "label": 1}, {"file_name": "a08_s06_e03_v03", "length": 120, "label": 7}, {"file_name": "a05_s09_e02_v03", "length": 26, "label": 5}, {"file_name": "a01_s02_e01_v03", "length": 27, "label": 1}, {"file_name": "a01_s03_e01_v03", "length": 33, "label": 1}, {"file_name": "a04_s03_e01_v03", "length": 16, "label": 4}, {"file_name": "a06_s06_e00_v03", "length": 21, "label": 6}, {"file_name": "a12_s06_e02_v03", "length": 18, "label": 10}, {"file_name": "a11_s03_e05_v03", "length": 26, "label": 9}, {"file_name": "a04_s10_e04_v03", "length": 16, "label": 4}, {"file_name": "a12_s03_e01_v03", "length": 11, "label": 10}, {"file_name": "a08_s04_e02_v03", "length": 67, "label": 7}, {"file_name": "a06_s04_e04_v03", "length": 13, "label": 6}, {"file_name": "a12_s06_e03_v03", "length": 17, "label": 10}, {"file_name": "a08_s01_e04_v03", "length": 71, "label": 7}, {"file_name": "a04_s03_e00_v03", "length": 14, "label": 4}, {"file_name": "a08_s01_e00_v03", "length": 51, "label": 7}, {"file_name": "a01_s03_e03_v03", "length": 41, "label": 1}, {"file_name": "a04_s01_e08_v03", "length": 16, "label": 4}, {"file_name": "a01_s04_e02_v03", "length": 26, "label": 1}, {"file_name": "a01_s10_e04_v03", "length": 26, "label": 1}, {"file_name": "a09_s02_e00_v03", "length": 41, "label": 8}, {"file_name": "a06_s07_e02_v03", "length": 16, "label": 6}, {"file_name": "a08_s07_e02_v03", "length": 46, "label": 7}, {"file_name": "a11_s10_e01_v03", "length": 36, "label": 9}, {"file_name": "a02_s07_e00_v03", "length": 31, "label": 2}, {"file_name": "a06_s08_e01_v03", "length": 16, "label": 6}, {"file_name": "a01_s10_e03_v03", "length": 31, "label": 1}, {"file_name": "a11_s02_e04_v03", "length": 35, "label": 9}, {"file_name": "a02_s09_e04_v03", "length": 1, "label": 2}, {"file_name": "a12_s03_e03_v03", "length": 21, "label": 10}, {"file_name": "a05_s01_e01_v03", "length": 21, "label": 5}, {"file_name": "a05_s08_e02_v03", "length": 16, "label": 5}, {"file_name": "a12_s09_e02_v03", "length": 23, "label": 10}, {"file_name": "a09_s08_e01_v03", "length": 48, "label": 8}, {"file_name": "a01_s08_e04_v03", "length": 23, "label": 1}, {"file_name": "a09_s09_e00_v03", "length": 56, "label": 8}, {"file_name": "a03_s10_e03_v03", "length": 13, "label": 3}, {"file_name": "a09_s02_e04_v03", "length": 36, "label": 8}, {"file_name": "a08_s01_e01_v03", "length": 61, "label": 7}, {"file_name": "a09_s10_e00_v03", "length": 54, "label": 8}, {"file_name": "a12_s09_e01_v03", "length": 18, "label": 10}, {"file_name": "a05_s01_e00_v03", "length": 20, "label": 5}, {"file_name": "a06_s02_e01_v03", "length": 16, "label": 6}, {"file_name": "a08_s08_e03_v03", "length": 62, "label": 7}, {"file_name": "a04_s04_e03_v03", "length": 21, "label": 4}, {"file_name": "a02_s10_e02_v03", "length": 31, "label": 2}, {"file_name": "a01_s03_e05_v03", "length": 31, "label": 1}, {"file_name": "a06_s03_e03_v03", "length": 19, "label": 6}, {"file_name": "a05_s07_e04_v03", "length": 21, "label": 5}, {"file_name": "a02_s10_e00_v03", "length": 38, "label": 2}, {"file_name": "a12_s04_e00_v03", "length": 16, "label": 10}, {"file_name": "a03_s04_e02_v03", "length": 27, "label": 3}, {"file_name": "a06_s02_e02_v03", "length": 21, "label": 6}, {"file_name": "a03_s04_e03_v03", "length": 31, "label": 3}, {"file_name": "a11_s08_e03_v03", "length": 12, "label": 9}, {"file_name": "a09_s07_e03_v03", "length": 44, "label": 8}, {"file_name": "a05_s03_e03_v03", "length": 14, "label": 5}, {"file_name": "a09_s10_e03_v03", "length": 54, "label": 8}, {"file_name": "a11_s06_e02_v03", "length": 18, "label": 9}, {"file_name": "a04_s04_e02_v03", "length": 11, "label": 4}, {"file_name": "a11_s08_e02_v03", "length": 21, "label": 9}, {"file_name": "a11_s07_e03_v03", "length": 21, "label": 9}, {"file_name": "a04_s01_e06_v03", "length": 19, "label": 4}, {"file_name": "a06_s01_e01_v03", "length": 21, "label": 6}, {"file_name": "a12_s06_e00_v03", "length": 11, "label": 10}, {"file_name": "a12_s03_e02_v03", "length": 18, "label": 10}, {"file_name": "a03_s04_e00_v03", "length": 26, "label": 3}, {"file_name": "a11_s01_e03_v03", "length": 18, "label": 9}, {"file_name": "a03_s08_e01_v03", "length": 21, "label": 3}, {"file_name": "a11_s04_e00_v03", "length": 31, "label": 9}, {"file_name": "a02_s05_e02_v03", "length": 26, "label": 2}, {"file_name": "a06_s06_e01_v03", "length": 19, "label": 6}, {"file_name": "a03_s03_e02_v03", "length": 32, "label": 3}, {"file_name": "a11_s07_e02_v03", "length": 16, "label": 9}, {"file_name": "a11_s01_e02_v03", "length": 15, "label": 9}]
else:
self.train_val = 'train'
self.data_dict = [{"file_name": "a05_s04_e02_v01", "length": 26, "label": 5}, {"file_name": "a01_s05_e04_v01", "length": 46, "label": 1}, {"file_name": "a03_s03_e04_v01", "length": 42, "label": 3}, {"file_name": "a08_s02_e01_v01", "length": 106, "label": 7}, {"file_name": "a03_s05_e03_v01", "length": 31, "label": 3}, {"file_name": "a06_s05_e01_v01", "length": 20, "label": 6}, {"file_name": "a12_s10_e01_v01", "length": 37, "label": 10}, {"file_name": "a01_s07_e03_v01", "length": 39, "label": 1}, {"file_name": "a03_s08_e02_v01", "length": 61, "label": 3}, {"file_name": "a11_s10_e03_v01", "length": 49, "label": 9}, {"file_name": "a11_s03_e00_v01", "length": 41, "label": 9}, {"file_name": "a03_s02_e00_v01", "length": 31, "label": 3}, {"file_name": "a11_s01_e04_v01", "length": 21, "label": 9}, {"file_name": "a04_s05_e04_v01", "length": 49, "label": 4}, {"file_name": "a09_s08_e04_v01", "length": 76, "label": 8}, {"file_name": "a09_s06_e01_v01", "length": 41, "label": 8}, {"file_name": "a09_s07_e01_v01", "length": 77, "label": 8}, {"file_name": "a02_s08_e01_v01", "length": 21, "label": 2}, {"file_name": "a01_s04_e01_v01", "length": 41, "label": 1}, {"file_name": "a02_s02_e02_v01", "length": 53, "label": 2}, {"file_name": "a02_s07_e05_v01", "length": 39, "label": 2}, {"file_name": "a06_s02_e00_v01", "length": 34, "label": 6}, {"file_name": "a03_s02_e02_v01", "length": 26, "label": 3}, {"file_name": "a09_s03_e04_v01", "length": 75, "label": 8}, {"file_name": "a04_s01_e02_v01", "length": 44, "label": 4}, {"file_name": "a12_s01_e01_v01", "length": 45, "label": 10}, {"file_name": "a02_s07_e03_v01", "length": 53, "label": 2}, {"file_name": "a05_s08_e04_v01", "length": 19, "label": 5}, {"file_name": "a02_s07_e02_v01", "length": 35, "label": 2}, {"file_name": "a04_s07_e02_v01", "length": 78, "label": 4}, {"file_name": "a01_s08_e03_v01", "length": 64, "label": 1}, {"file_name": "a08_s03_e01_v01", "length": 86, "label": 7}, {"file_name": "a04_s08_e03_v01", "length": 79, "label": 4}, {"file_name": "a03_s10_e00_v01", "length": 52, "label": 3}, {"file_name": "a04_s03_e03_v01", "length": 76, "label": 4}, {"file_name": "a11_s05_e02_v01", "length": 20, "label": 9}, {"file_name": "a06_s06_e02_v01", "length": 21, "label": 6}, {"file_name": "a01_s08_e06_v01", "length": 27, "label": 1}, {"file_name": "a03_s09_e03_v01", "length": 29, "label": 3}, {"file_name": "a09_s03_e00_v01", "length": 105, "label": 8}, {"file_name": "a09_s03_e03_v01", "length": 49, "label": 8}, {"file_name": "a04_s02_e02_v01", "length": 120, "label": 4}, {"file_name": "a08_s01_e02_v01", "length": 84, "label": 7}, {"file_name": "a04_s04_e00_v01", "length": 30, "label": 4}, {"file_name": "a03_s02_e03_v01", "length": 50, "label": 3}, {"file_name": "a05_s04_e00_v01", "length": 49, "label": 5}, {"file_name": "a05_s07_e03_v01", "length": 34, "label": 5}, {"file_name": "a02_s10_e05_v01", "length": 51, "label": 2}, {"file_name": "a06_s10_e00_v01", "length": 35, "label": 6}, {"file_name": "a11_s07_e00_v01", "length": 26, "label": 9}, {"file_name": "a03_s01_e01_v01", "length": 131, "label": 3}, {"file_name": "a04_s06_e01_v01", "length": 35, "label": 4}, {"file_name": "a08_s02_e04_v01", "length": 106, "label": 7}, {"file_name": "a09_s08_e03_v01", "length": 85, "label": 8}, {"file_name": "a05_s02_e02_v01", "length": 19, "label": 5}, {"file_name": "a04_s06_e04_v01", "length": 23, "label": 4}, {"file_name": "a05_s09_e03_v01", "length": 38, "label": 5}, {"file_name": "a03_s06_e02_v01", "length": 23, "label": 3}, {"file_name": "a01_s01_e00_v01", "length": 44, "label": 1}, {"file_name": "a06_s06_e03_v01", "length": 28, "label": 6}, {"file_name": "a06_s10_e02_v01", "length": 35, "label": 6}, {"file_name": "a02_s07_e04_v01", "length": 45, "label": 2}, {"file_name": "a09_s06_e00_v01", "length": 80, "label": 8}, {"file_name": "a04_s07_e04_v01", "length": 89, "label": 4}, {"file_name": "a04_s05_e09_v01", "length": 38, "label": 4}, {"file_name": "a05_s02_e01_v01", "length": 17, "label": 5}, {"file_name": "a01_s06_e04_v01", "length": 24, "label": 1}, {"file_name": "a04_s08_e01_v01", "length": 77, "label": 4}, {"file_name": "a01_s09_e00_v01", "length": 37, "label": 1}, {"file_name": "a08_s03_e03_v01", "length": 157, "label": 7}, {"file_name": "a12_s03_e00_v01", "length": 31, "label": 10}, {"file_name": "a11_s02_e03_v01", "length": 29, "label": 9}, {"file_name": "a12_s07_e02_v01", "length": 25, "label": 10}, {"file_name": "a11_s05_e01_v01", "length": 53, "label": 9}, {"file_name": "a05_s06_e01_v01", "length": 18, "label": 5}, {"file_name": "a03_s08_e06_v01", "length": 25, "label": 3}, {"file_name": "a06_s02_e04_v01", "length": 32, "label": 6}, {"file_name": "a06_s04_e00_v01", "length": 49, "label": 6}, {"file_name": "a05_s09_e01_v01", "length": 33, "label": 5}, {"file_name": "a11_s05_e03_v01", "length": 41, "label": 9}, {"file_name": "a11_s10_e04_v01", "length": 55, "label": 9}, {"file_name": "a03_s01_e00_v01", "length": 106, "label": 3}, {"file_name": "a03_s08_e04_v01", "length": 31, "label": 3}, {"file_name": "a11_s02_e01_v01", "length": 8, "label": 9}, {"file_name": "a04_s02_e00_v01", "length": 25, "label": 4}, {"file_name": "a11_s01_e01_v01", "length": 31, "label": 9}, {"file_name": "a02_s06_e03_v01", "length": 35, "label": 2}, {"file_name": "a12_s10_e03_v01", "length": 40, "label": 10}, {"file_name": "a01_s05_e02_v01", "length": 51, "label": 1}, {"file_name": "a01_s06_e00_v01", "length": 36, "label": 1}, {"file_name": "a05_s07_e01_v01", "length": 35, "label": 5}, {"file_name": "a01_s09_e01_v01", "length": 38, "label": 1}, {"file_name": "a02_s06_e00_v01", "length": 32, "label": 2}, {"file_name": "a11_s09_e00_v01", "length": 12, "label": 9}, {"file_name": "a03_s03_e01_v01", "length": 33, "label": 3}, {"file_name": "a03_s08_e00_v01", "length": 42, "label": 3}, {"file_name": "a06_s04_e01_v01", "length": 31, "label": 6}, {"file_name": "a02_s05_e01_v01", "length": 58, "label": 2}, {"file_name": "a03_s04_e04_v01", "length": 36, "label": 3}, {"file_name": "a01_s09_e02_v01", "length": 32, "label": 1}, {"file_name": "a08_s03_e04_v01", "length": 51, "label": 7}, {"file_name": "a01_s10_e00_v01", "length": 36, "label": 1}, {"file_name": "a01_s02_e02_v01", "length": 33, "label": 1}, {"file_name": "a09_s03_e01_v01", "length": 61, "label": 8}, {"file_name": "a05_s06_e00_v01", "length": 39, "label": 5}, {"file_name": "a05_s01_e02_v01", "length": 26, "label": 5}, {"file_name": "a03_s06_e04_v01", "length": 24, "label": 3}, {"file_name": "a02_s02_e04_v01", "length": 36, "label": 2}, {"file_name": "a06_s07_e03_v01", "length": 32, "label": 6}, {"file_name": "a04_s02_e04_v01", "length": 28, "label": 4}, {"file_name": "a04_s05_e02_v01", "length": 75, "label": 4}, {"file_name": "a02_s07_e01_v01", "length": 38, "label": 2}, {"file_name": "a03_s07_e03_v01", "length": 62, "label": 3}, {"file_name": "a12_s08_e01_v01", "length": 32, "label": 10}, {"file_name": "a05_s01_e03_v01", "length": 31, "label": 5}, {"file_name": "a02_s09_e02_v01", "length": 60, "label": 2}, {"file_name": "a05_s08_e03_v01", "length": 29, "label": 5}, {"file_name": "a04_s06_e00_v01", "length": 67, "label": 4}, {"file_name": "a09_s01_e02_v01", "length": 130, "label": 8}, {"file_name": "a04_s09_e02_v01", "length": 68, "label": 4}, {"file_name": "a03_s03_e03_v01", "length": 36, "label": 3}, {"file_name": "a08_s07_e03_v01", "length": 86, "label": 7}, {"file_name": "a08_s09_e02_v01", "length": 148, "label": 7}, {"file_name": "a08_s09_e00_v01", "length": 120, "label": 7}, {"file_name": "a06_s06_e04_v01", "length": 30, "label": 6}, {"file_name": "a01_s07_e04_v01", "length": 34, "label": 1}, {"file_name": "a04_s05_e08_v01", "length": 28, "label": 4}, {"file_name": "a08_s05_e04_v01", "length": 80, "label": 7}, {"file_name": "a05_s04_e01_v01", "length": 30, "label": 5}, {"file_name": "a04_s07_e00_v01", "length": 117, "label": 4}, {"file_name": "a05_s08_e01_v01", "length": 35, "label": 5}, {"file_name": "a11_s06_e03_v01", "length": 22, "label": 9}, {"file_name": "a01_s04_e03_v01", "length": 58, "label": 1}, {"file_name": "a12_s07_e03_v01", "length": 21, "label": 10}, {"file_name": "a01_s02_e04_v01", "length": 19, "label": 1}, {"file_name": "a04_s04_e05_v01", "length": 34, "label": 4}, {"file_name": "a03_s01_e03_v01", "length": 113, "label": 3}, {"file_name": "a12_s02_e02_v01", "length": 63, "label": 10}, {"file_name": "a05_s02_e03_v01", "length": 26, "label": 5}, {"file_name": "a03_s02_e04_v01", "length": 33, "label": 3}, {"file_name": "a08_s02_e03_v01", "length": 100, "label": 7}, {"file_name": "a08_s03_e02_v01", "length": 83, "label": 7}, {"file_name": "a09_s01_e01_v01", "length": 106, "label": 8}, {"file_name": "a02_s01_e01_v01", "length": 59, "label": 2}, {"file_name": "a08_s06_e00_v01", "length": 103, "label": 7}, {"file_name": "a04_s04_e09_v01", "length": 36, "label": 4}, {"file_name": "a12_s08_e02_v01", "length": 29, "label": 10}, {"file_name": "a02_s08_e00_v01", "length": 27, "label": 2}, {"file_name": "a01_s08_e02_v01", "length": 201, "label": 1}, {"file_name": "a09_s04_e01_v01", "length": 74, "label": 8}, {"file_name": "a04_s01_e04_v01", "length": 120, "label": 4}, {"file_name": "a04_s05_e03_v01", "length": 31, "label": 4}, {"file_name": "a08_s10_e03_v01", "length": 70, "label": 7}, {"file_name": "a02_s05_e00_v01", "length": 26, "label": 2}, {"file_name": "a06_s04_e03_v01", "length": 19, "label": 6}, {"file_name": "a06_s09_e03_v01", "length": 44, "label": 6}, {"file_name": "a05_s03_e02_v01", "length": 40, "label": 5}, {"file_name": "a06_s03_e04_v01", "length": 30, "label": 6}, {"file_name": "a06_s01_e03_v01", "length": 26, "label": 6}, {"file_name": "a11_s03_e01_v01", "length": 31, "label": 9}, {"file_name": "a09_s02_e01_v01", "length": 67, "label": 8}, {"file_name": "a02_s02_e00_v01", "length": 57, "label": 2}, {"file_name": "a01_s01_e03_v01", "length": 51, "label": 1}, {"file_name": "a08_s06_e02_v01", "length": 90, "label": 7}, {"file_name": "a12_s01_e03_v01", "length": 57, "label": 10}, {"file_name": "a06_s05_e04_v01", "length": 15, "label": 6}, {"file_name": "a09_s09_e01_v01", "length": 179, "label": 8}, {"file_name": "a04_s10_e03_v01", "length": 20, "label": 4}, {"file_name": "a06_s09_e04_v01", "length": 35, "label": 6}, {"file_name": "a02_s04_e01_v01", "length": 55, "label": 2}, {"file_name": "a12_s10_e04_v01", "length": 57, "label": 10}, {"file_name": "a04_s03_e05_v01", "length": 44, "label": 4}, {"file_name": "a06_s03_e01_v01", "length": 31, "label": 6}, {"file_name": "a02_s03_e04_v01", "length": 51, "label": 2}, {"file_name": "a11_s09_e02_v01", "length": 42, "label": 9}, {"file_name": "a08_s08_e02_v01", "length": 61, "label": 7}, {"file_name": "a03_s02_e01_v01", "length": 28, "label": 3}, {"file_name": "a12_s02_e00_v01", "length": 38, "label": 10}, {"file_name": "a12_s08_e03_v01", "length": 26, "label": 10}, {"file_name": "a02_s09_e03_v01", "length": 45, "label": 2}, {"file_name": "a09_s02_e02_v01", "length": 54, "label": 8}, {"file_name": "a05_s09_e04_v01", "length": 39, "label": 5}, {"file_name": "a04_s04_e06_v01", "length": 28, "label": 4}, {"file_name": "a01_s04_e00_v01", "length": 21, "label": 1}, {"file_name": "a08_s04_e03_v01", "length": 125, "label": 7}, {"file_name": "a08_s05_e01_v01", "length": 135, "label": 7}, {"file_name": "a02_s04_e03_v01", "length": 28, "label": 2}, {"file_name": "a04_s03_e04_v01", "length": 51, "label": 4}, {"file_name": "a12_s06_e01_v01", "length": 21, "label": 10}, {"file_name": "a11_s04_e03_v01", "length": 51, "label": 9}, {"file_name": "a05_s03_e00_v01", "length": 46, "label": 5}, {"file_name": "a12_s07_e00_v01", "length": 34, "label": 10}, {"file_name": "a06_s03_e02_v01", "length": 70, "label": 6}, {"file_name": "a03_s03_e05_v01", "length": 30, "label": 3}, {"file_name": "a11_s08_e01_v01", "length": 19, "label": 9}, {"file_name": "a05_s05_e04_v01", "length": 26, "label": 5}, {"file_name": "a06_s10_e01_v01", "length": 30, "label": 6}, {"file_name": "a04_s03_e02_v01", "length": 97, "label": 4}, {"file_name": "a02_s03_e03_v01", "length": 56, "label": 2}, {"file_name": "a09_s10_e04_v01", "length": 66, "label": 8}, {"file_name": "a04_s08_e04_v01", "length": 71, "label": 4}, {"file_name": "a11_s08_e00_v01", "length": 14, "label": 9}, {"file_name": "a02_s01_e00_v01", "length": 55, "label": 2}, {"file_name": "a04_s02_e03_v01", "length": 51, "label": 4}, {"file_name": "a04_s02_e01_v01", "length": 48, "label": 4}, {"file_name": "a06_s08_e00_v01", "length": 15, "label": 6}, {"file_name": "a08_s08_e01_v01", "length": 90, "label": 7}, {"file_name": "a02_s03_e01_v01", "length": 50, "label": 2}, {"file_name": "a11_s02_e02_v01", "length": 35, "label": 9}, {"file_name": "a09_s07_e02_v01", "length": 44, "label": 8}, {"file_name": "a02_s05_e03_v01", "length": 41, "label": 2}, {"file_name": "a01_s07_e02_v01", "length": 35, "label": 1}, {"file_name": "a06_s05_e03_v01", "length": 18, "label": 6}, {"file_name": "a12_s05_e03_v01", "length": 40, "label": 10}, {"file_name": "a03_s05_e00_v01", "length": 63, "label": 3}, {"file_name": "a09_s03_e02_v01", "length": 48, "label": 8}, {"file_name": "a09_s04_e04_v01", "length": 139, "label": 8}, {"file_name": "a11_s10_e00_v01", "length": 50, "label": 9}, {"file_name": "a04_s04_e01_v01", "length": 26, "label": 4}, {"file_name": "a01_s08_e05_v01", "length": 63, "label": 1}, {"file_name": "a02_s08_e02_v01", "length": 35, "label": 2}, {"file_name": "a01_s05_e00_v01", "length": 83, "label": 1}, {"file_name": "a11_s06_e00_v01", "length": 33, "label": 9}, {"file_name": "a05_s02_e00_v01", "length": 40, "label": 5}, {"file_name": "a02_s02_e03_v01", "length": 41, "label": 2}, {"file_name": "a09_s05_e02_v01", "length": 61, "label": 8}, {"file_name": "a05_s06_e02_v01", "length": 19, "label": 5}, {"file_name": "a08_s01_e03_v01", "length": 130, "label": 7}, {"file_name": "a08_s09_e01_v01", "length": 153, "label": 7}, {"file_name": "a02_s08_e04_v01", "length": 51, "label": 2}, {"file_name": "a06_s05_e02_v01", "length": 21, "label": 6}, {"file_name": "a01_s02_e03_v01", "length": 31, "label": 1}, {"file_name": "a11_s08_e05_v01", "length": 34, "label": 9}, {"file_name": "a03_s09_e02_v01", "length": 19, "label": 3}, {"file_name": "a04_s08_e00_v01", "length": 86, "label": 4}, {"file_name": "a03_s09_e01_v01", "length": 6, "label": 3}, {"file_name": "a08_s04_e01_v01", "length": 109, "label": 7}, {"file_name": "a12_s04_e03_v01", "length": 41, "label": 10}, {"file_name": "a04_s09_e03_v01", "length": 43, "label": 4}, {"file_name": "a12_s05_e00_v01", "length": 32, "label": 10}, {"file_name": "a11_s05_e04_v01", "length": 41, "label": 9}, {"file_name": "a05_s06_e03_v01", "length": 19, "label": 5}, {"file_name": "a09_s06_e02_v01", "length": 31, "label": 8}, {"file_name": "a06_s08_e05_v01", "length": 19, "label": 6}, {"file_name": "a03_s06_e03_v01", "length": 25, "label": 3}, {"file_name": "a12_s02_e03_v01", "length": 77, "label": 10}, {"file_name": "a11_s03_e03_v01", "length": 36, "label": 9}, {"file_name": "a04_s01_e00_v01", "length": 141, "label": 4}, {"file_name": "a04_s04_e08_v01", "length": 36, "label": 4}, {"file_name": "a03_s08_e03_v01", "length": 31, "label": 3}, {"file_name": "a02_s10_e03_v01", "length": 71, "label": 2}, {"file_name": "a04_s10_e00_v01", "length": 12, "label": 4}, {"file_name": "a08_s03_e00_v01", "length": 84, "label": 7}, {"file_name": "a02_s08_e03_v01", "length": 56, "label": 2}, {"file_name": "a01_s09_e03_v01", "length": 35, "label": 1}, {"file_name": "a01_s01_e04_v01", "length": 46, "label": 1}, {"file_name": "a01_s07_e00_v01", "length": 35, "label": 1}, {"file_name": "a02_s03_e00_v01", "length": 86, "label": 2}, {"file_name": "a01_s02_e00_v01", "length": 25, "label": 1}, {"file_name": "a03_s09_e04_v01", "length": 38, "label": 3}, {"file_name": "a01_s06_e02_v01", "length": 28, "label": 1}, {"file_name": "a03_s07_e02_v01", "length": 8, "label": 3}, {"file_name": "a04_s05_e05_v01", "length": 56, "label": 4}, {"file_name": "a08_s07_e01_v01", "length": 155, "label": 7}, {"file_name": "a04_s07_e03_v01", "length": 109, "label": 4}, {"file_name": "a08_s04_e04_v01", "length": 146, "label": 7}, {"file_name": "a08_s08_e00_v01", "length": 56, "label": 7}, {"file_name": "a02_s09_e00_v01", "length": 55, "label": 2}, {"file_name": "a06_s03_e00_v01", "length": 35, "label": 6}, {"file_name": "a04_s05_e07_v01", "length": 39, "label": 4}, {"file_name": "a09_s09_e04_v01", "length": 81, "label": 8}, {"file_name": "a05_s04_e04_v01", "length": 27, "label": 5}, {"file_name": "a09_s04_e03_v01", "length": 63, "label": 8}, {"file_name": "a01_s09_e04_v01", "length": 25, "label": 1}, {"file_name": "a05_s10_e00_v01", "length": 59, "label": 5}, {"file_name": "a09_s08_e02_v01", "length": 100, "label": 8}, {"file_name": "a11_s07_e01_v01", "length": 10, "label": 9}, {"file_name": "a06_s01_e00_v01", "length": 32, "label": 6}, {"file_name": "a12_s08_e04_v01", "length": 26, "label": 10}, {"file_name": "a08_s09_e04_v01", "length": 88, "label": 7}, {"file_name": "a12_s10_e02_v01", "length": 66, "label": 10}, {"file_name": "a04_s01_e01_v01", "length": 84, "label": 4}, {"file_name": "a01_s08_e01_v01", "length": 19, "label": 1}, {"file_name": "a09_s07_e00_v01", "length": 63, "label": 8}, {"file_name": "a04_s09_e00_v01", "length": 112, "label": 4}, {"file_name": "a08_s02_e02_v01", "length": 163, "label": 7}, {"file_name": "a09_s09_e02_v01", "length": 192, "label": 8}, {"file_name": "a09_s02_e03_v01", "length": 66, "label": 8}, {"file_name": "a11_s09_e01_v01", "length": 26, "label": 9}, {"file_name": "a03_s10_e01_v01", "length": 31, "label": 3}, {"file_name": "a11_s03_e02_v01", "length": 21, "label": 9}, {"file_name": "a11_s08_e04_v01", "length": 65, "label": 9}, {"file_name": "a06_s08_e02_v01", "length": 20, "label": 6}, {"file_name": "a11_s04_e04_v01", "length": 51, "label": 9}, {"file_name": "a12_s01_e00_v01", "length": 62, "label": 10}, {"file_name": "a02_s06_e04_v01", "length": 25, "label": 2}, {"file_name": "a06_s07_e01_v01", "length": 29, "label": 6}, {"file_name": "a05_s10_e03_v01", "length": 46, "label": 5}, {"file_name": "a09_s05_e04_v01", "length": 60, "label": 8}, {"file_name": "a03_s06_e00_v01", "length": 28, "label": 3}, {"file_name": "a12_s02_e01_v01", "length": 45, "label": 10}, {"file_name": "a08_s10_e02_v01", "length": 102, "label": 7}, {"file_name": "a08_s02_e00_v01", "length": 116, "label": 7}, {"file_name": "a06_s10_e03_v01", "length": 37, "label": 6}, {"file_name": "a11_s04_e02_v01", "length": 37, "label": 9}, {"file_name": "a08_s09_e03_v01", "length": 125, "label": 7}, {"file_name": "a12_s06_e04_v01", "length": 18, "label": 10}, {"file_name": "a01_s07_e01_v01", "length": 31, "label": 1}, {"file_name": "a05_s02_e04_v01", "length": 21, "label": 5}, {"file_name": "a09_s08_e00_v01", "length": 71, "label": 8}, {"file_name": "a02_s04_e04_v01", "length": 44, "label": 2}, {"file_name": "a06_s07_e00_v01", "length": 20, "label": 6}, {"file_name": "a04_s09_e01_v01", "length": 79, "label": 4}, {"file_name": "a09_s01_e00_v01", "length": 97, "label": 8}, {"file_name": "a08_s10_e01_v01", "length": 100, "label": 7}, {"file_name": "a11_s10_e02_v01", "length": 22, "label": 9}, {"file_name": "a09_s10_e02_v01", "length": 40, "label": 8}, {"file_name": "a03_s07_e04_v01", "length": 28, "label": 3}, {"file_name": "a05_s08_e00_v01", "length": 31, "label": 5}, {"file_name": "a05_s05_e03_v01", "length": 21, "label": 5}, {"file_name": "a11_s09_e03_v01", "length": 19, "label": 9}, {"file_name": "a12_s04_e04_v01", "length": 37, "label": 10}, {"file_name": "a04_s01_e03_v01", "length": 84, "label": 4}, {"file_name": "a04_s10_e02_v01", "length": 35, "label": 4}, {"file_name": "a06_s10_e04_v01", "length": 42, "label": 6}, {"file_name": "a01_s08_e00_v01", "length": 42, "label": 1}, {"file_name": "a03_s10_e02_v01", "length": 60, "label": 3}, {"file_name": "a03_s07_e01_v01", "length": 18, "label": 3}, {"file_name": "a05_s04_e03_v01", "length": 27, "label": 5}, {"file_name": "a01_s01_e02_v01", "length": 64, "label": 1}, {"file_name": "a05_s10_e04_v01", "length": 29, "label": 5}, {"file_name": "a06_s08_e03_v01", "length": 24, "label": 6}, {"file_name": "a02_s04_e02_v01", "length": 29, "label": 2}, {"file_name": "a12_s01_e04_v01", "length": 61, "label": 10}, {"file_name": "a02_s01_e02_v01", "length": 69, "label": 2}, {"file_name": "a12_s10_e00_v01", "length": 31, "label": 10}, {"file_name": "a11_s02_e00_v01", "length": 25, "label": 9}, {"file_name": "a02_s09_e01_v01", "length": 38, "label": 2}, {"file_name": "a12_s06_e05_v01", "length": 43, "label": 10}, {"file_name": "a02_s04_e00_v01", "length": 51, "label": 2}, {"file_name": "a12_s01_e02_v01", "length": 58, "label": 10}, {"file_name": "a04_s02_e05_v01", "length": 57, "label": 4}, {"file_name": "a03_s01_e04_v01", "length": 69, "label": 3}, {"file_name": "a01_s03_e04_v01", "length": 54, "label": 1}, {"file_name": "a01_s06_e03_v01", "length": 21, "label": 1}, {"file_name": "a02_s06_e01_v01", "length": 25, "label": 2}, {"file_name": "a12_s07_e04_v01", "length": 19, "label": 10}, {"file_name": "a08_s10_e04_v01", "length": 123, "label": 7}, {"file_name": "a02_s03_e02_v01", "length": 50, "label": 2}, {"file_name": "a09_s05_e06_v01", "length": 57, "label": 8}, {"file_name": "a05_s10_e01_v01", "length": 36, "label": 5}, {"file_name": "a09_s10_e01_v01", "length": 65, "label": 8}, {"file_name": "a08_s08_e04_v01", "length": 92, "label": 7}, {"file_name": "a06_s01_e02_v01", "length": 30, "label": 6}, {"file_name": "a01_s01_e01_v01", "length": 47, "label": 1}, {"file_name": "a06_s08_e04_v01", "length": 17, "label": 6}, {"file_name": "a09_s06_e03_v01", "length": 44, "label": 8}, {"file_name": "a06_s09_e01_v01", "length": 69, "label": 6}, {"file_name": "a08_s06_e01_v01", "length": 152, "label": 7}, {"file_name": "a02_s01_e04_v01", "length": 31, "label": 2}, {"file_name": "a11_s01_e00_v01", "length": 51, "label": 9}, {"file_name": "a05_s05_e02_v01", "length": 21, "label": 5}, {"file_name": "a03_s03_e00_v01", "length": 37, "label": 3}, {"file_name": "a01_s04_e04_v01", "length": 31, "label": 1}, {"file_name": "a06_s01_e04_v01", "length": 30, "label": 6}, {"file_name": "a09_s05_e05_v01", "length": 88, "label": 8}, {"file_name": "a01_s10_e01_v01", "length": 33, "label": 1}, {"file_name": "a03_s09_e00_v01", "length": 22, "label": 3}, {"file_name": "a08_s10_e00_v01", "length": 91, "label": 7}, {"file_name": "a05_s10_e02_v01", "length": 28, "label": 5}, {"file_name": "a03_s08_e05_v01", "length": 51, "label": 3}, {"file_name": "a04_s10_e01_v01", "length": 30, "label": 4}, {"file_name": "a05_s03_e04_v01", "length": 20, "label": 5}, {"file_name": "a05_s07_e02_v01", "length": 21, "label": 5}, {"file_name": "a12_s02_e04_v01", "length": 53, "label": 10}, {"file_name": "a06_s02_e03_v01", "length": 21, "label": 6}, {"file_name": "a09_s01_e03_v01", "length": 100, "label": 8}, {"file_name": "a08_s04_e00_v01", "length": 99, "label": 7}, {"file_name": "a02_s10_e01_v01", "length": 81, "label": 2}, {"file_name": "a11_s04_e01_v01", "length": 26, "label": 9}, {"file_name": "a03_s05_e01_v01", "length": 56, "label": 3}, {"file_name": "a06_s07_e04_v01", "length": 38, "label": 6}, {"file_name": "a09_s09_e03_v01", "length": 150, "label": 8}, {"file_name": "a02_s06_e02_v01", "length": 25, "label": 2}, {"file_name": "a05_s01_e04_v01", "length": 26, "label": 5}, {"file_name": "a11_s03_e04_v01", "length": 26, "label": 9}, {"file_name": "a04_s08_e02_v01", "length": 97, "label": 4}, {"file_name": "a04_s09_e04_v01", "length": 54, "label": 4}, {"file_name": "a08_s07_e00_v01", "length": 72, "label": 7}, {"file_name": "a04_s01_e05_v01", "length": 50, "label": 4}, {"file_name": "a12_s07_e01_v01", "length": 32, "label": 10}, {"file_name": "a02_s01_e03_v01", "length": 76, "label": 2}, {"file_name": "a11_s10_e05_v01", "length": 21, "label": 9}, {"file_name": "a09_s04_e00_v01", "length": 99, "label": 8}, {"file_name": "a09_s05_e01_v01", "length": 60, "label": 8}, {"file_name": "a09_s01_e04_v01", "length": 50, "label": 8}, {"file_name": "a12_s08_e00_v01", "length": 44, "label": 10}, {"file_name": "a04_s06_e03_v01", "length": 161, "label": 4}, {"file_name": "a05_s05_e00_v01", "length": 65, "label": 5}, {"file_name": "a11_s06_e01_v01", "length": 18, "label": 9}, {"file_name": "a01_s10_e02_v01", "length": 50, "label": 1}, {"file_name": "a04_s05_e01_v01", "length": 40, "label": 4}, {"file_name": "a02_s10_e04_v01", "length": 36, "label": 2}, {"file_name": "a02_s06_e05_v01", "length": 27, "label": 2}, {"file_name": "a11_s05_e00_v01", "length": 32, "label": 9}, {"file_name": "a04_s05_e06_v01", "length": 31, "label": 4}, {"file_name": "a04_s07_e01_v01", "length": 97, "label": 4}, {"file_name": "a03_s04_e01_v01", "length": 39, "label": 3}, {"file_name": "a03_s01_e02_v01", "length": 99, "label": 3}, {"file_name": "a06_s09_e02_v01", "length": 50, "label": 6}, {"file_name": "a03_s07_e00_v01", "length": 22, "label": 3}, {"file_name": "a08_s05_e05_v01", "length": 54, "label": 7}, {"file_name": "a06_s04_e02_v01", "length": 25, "label": 6}, {"file_name": "a12_s04_e01_v01", "length": 31, "label": 10}, {"file_name": "a09_s05_e00_v01", "length": 86, "label": 8}, {"file_name": "a04_s06_e02_v01", "length": 120, "label": 4}, {"file_name": "a04_s04_e04_v01", "length": 38, "label": 4}, {"file_name": "a09_s04_e02_v01", "length": 73, "label": 8}, {"file_name": "a02_s02_e01_v01", "length": 35, "label": 2}, {"file_name": "a06_s09_e00_v01", "length": 82, "label": 6}, {"file_name": "a05_s09_e00_v01", "length": 20, "label": 5}, {"file_name": "a05_s03_e01_v01", "length": 54, "label": 5}, {"file_name": "a02_s05_e04_v01", "length": 31, "label": 2}, {"file_name": "a01_s06_e01_v01", "length": 35, "label": 1}, {"file_name": "a01_s04_e05_v01", "length": 20, "label": 1}, {"file_name": "a12_s04_e02_v01", "length": 41, "label": 10}, {"file_name": "a03_s05_e02_v01", "length": 85, "label": 3}, {"file_name": "a03_s10_e04_v01", "length": 165, "label": 3}, {"file_name": "a01_s03_e02_v01", "length": 51, "label": 1}, {"file_name": "a05_s08_e05_v01", "length": 31, "label": 5}, {"file_name": "a01_s03_e00_v01", "length": 25, "label": 1}, {"file_name": "a08_s06_e03_v01", "length": 175, "label": 7}, {"file_name": "a04_s04_e07_v01", "length": 37, "label": 4}, {"file_name": "a05_s09_e02_v01", "length": 22, "label": 5}, {"file_name": "a01_s02_e01_v01", "length": 32, "label": 1}, {"file_name": "a01_s03_e01_v01", "length": 53, "label": 1}, {"file_name": "a04_s03_e01_v01", "length": 33, "label": 4}, {"file_name": "a06_s06_e00_v01", "length": 27, "label": 6}, {"file_name": "a12_s06_e02_v01", "length": 22, "label": 10}, {"file_name": "a04_s10_e04_v01", "length": 21, "label": 4}, {"file_name": "a12_s03_e01_v01", "length": 54, "label": 10}, {"file_name": "a08_s04_e02_v01", "length": 124, "label": 7}, {"file_name": "a06_s04_e04_v01", "length": 29, "label": 6}, {"file_name": "a12_s06_e03_v01", "length": 26, "label": 10}, {"file_name": "a08_s01_e04_v01", "length": 141, "label": 7}, {"file_name": "a04_s03_e00_v01", "length": 33, "label": 4}, {"file_name": "a12_s05_e02_v01", "length": 45, "label": 10}, {"file_name": "a08_s01_e00_v01", "length": 111, "label": 7}, {"file_name": "a01_s03_e03_v01", "length": 41, "label": 1}, {"file_name": "a01_s04_e02_v01", "length": 44, "label": 1}, {"file_name": "a06_s05_e00_v01", "length": 30, "label": 6}, {"file_name": "a01_s10_e04_v01", "length": 70, "label": 1}, {"file_name": "a08_s05_e00_v01", "length": 110, "label": 7}, {"file_name": "a09_s02_e00_v01", "length": 40, "label": 8}, {"file_name": "a12_s04_e05_v01", "length": 42, "label": 10}, {"file_name": "a06_s07_e02_v01", "length": 41, "label": 6}, {"file_name": "a08_s07_e02_v01", "length": 95, "label": 7}, {"file_name": "a11_s10_e01_v01", "length": 38, "label": 9}, {"file_name": "a02_s07_e00_v01", "length": 33, "label": 2}, {"file_name": "a06_s08_e01_v01", "length": 17, "label": 6}, {"file_name": "a01_s10_e03_v01", "length": 32, "label": 1}, {"file_name": "a11_s02_e04_v01", "length": 38, "label": 9}, {"file_name": "a12_s03_e03_v01", "length": 31, "label": 10}, {"file_name": "a05_s01_e01_v01", "length": 21, "label": 5}, {"file_name": "a05_s08_e02_v01", "length": 13, "label": 5}, {"file_name": "a09_s08_e01_v01", "length": 84, "label": 8}, {"file_name": "a01_s08_e04_v01", "length": 34, "label": 1}, {"file_name": "a09_s09_e00_v01", "length": 128, "label": 8}, {"file_name": "a03_s10_e03_v01", "length": 43, "label": 3}, {"file_name": "a09_s05_e03_v01", "length": 96, "label": 8}, {"file_name": "a09_s02_e04_v01", "length": 84, "label": 8}, {"file_name": "a08_s01_e01_v01", "length": 81, "label": 7}, {"file_name": "a09_s10_e00_v01", "length": 76, "label": 8}, {"file_name": "a04_s04_e10_v01", "length": 22, "label": 4}, {"file_name": "a05_s01_e00_v01", "length": 24, "label": 5}, {"file_name": "a06_s02_e01_v01", "length": 38, "label": 6}, {"file_name": "a08_s08_e03_v01", "length": 82, "label": 7}, {"file_name": "a04_s04_e03_v01", "length": 31, "label": 4}, {"file_name": "a12_s05_e04_v01", "length": 41, "label": 10}, {"file_name": "a05_s10_e05_v01", "length": 48, "label": 5}, {"file_name": "a02_s10_e02_v01", "length": 49, "label": 2}, {"file_name": "a06_s03_e03_v01", "length": 40, "label": 6}, {"file_name": "a05_s07_e04_v01", "length": 20, "label": 5}, {"file_name": "a02_s10_e00_v01", "length": 50, "label": 2}, {"file_name": "a08_s05_e03_v01", "length": 90, "label": 7}, {"file_name": "a12_s04_e00_v01", "length": 65, "label": 10}, {"file_name": "a03_s04_e02_v01", "length": 46, "label": 3}, {"file_name": "a06_s02_e02_v01", "length": 30, "label": 6}, {"file_name": "a03_s04_e03_v01", "length": 47, "label": 3}, {"file_name": "a11_s08_e03_v01", "length": 46, "label": 9}, {"file_name": "a09_s07_e03_v01", "length": 47, "label": 8}, {"file_name": "a05_s03_e03_v01", "length": 26, "label": 5}, {"file_name": "a09_s10_e03_v01", "length": 58, "label": 8}, {"file_name": "a01_s05_e03_v01", "length": 51, "label": 1}, {"file_name": "a11_s06_e02_v01", "length": 21, "label": 9}, {"file_name": "a05_s05_e01_v01", "length": 31, "label": 5}, {"file_name": "a01_s05_e01_v01", "length": 54, "label": 1}, {"file_name": "a04_s04_e02_v01", "length": 46, "label": 4}, {"file_name": "a11_s08_e02_v01", "length": 32, "label": 9}, {"file_name": "a11_s07_e03_v01", "length": 13, "label": 9}, {"file_name": "a06_s01_e01_v01", "length": 26, "label": 6}, {"file_name": "a06_s10_e05_v01", "length": 20, "label": 6}, {"file_name": "a12_s06_e00_v01", "length": 23, "label": 10}, {"file_name": "a12_s03_e02_v01", "length": 26, "label": 10}, {"file_name": "a08_s05_e02_v01", "length": 73, "label": 7}, {"file_name": "a03_s04_e00_v01", "length": 36, "label": 3}, {"file_name": "a11_s01_e03_v01", "length": 45, "label": 9}, {"file_name": "a03_s08_e01_v01", "length": 55, "label": 3}, {"file_name": "a11_s04_e00_v01", "length": 27, "label": 9}, {"file_name": "a04_s05_e00_v01", "length": 83, "label": 4}, {"file_name": "a12_s05_e01_v01", "length": 30, "label": 10}, {"file_name": "a02_s05_e02_v01", "length": 30, "label": 2}, {"file_name": "a06_s06_e01_v01", "length": 20, "label": 6}, {"file_name": "a03_s03_e02_v01", "length": 62, "label": 3}, {"file_name": "a11_s07_e02_v01", "length": 38, "label": 9}, {"file_name": "a11_s01_e02_v01", "length": 26, "label": 9}, {"file_name": "a05_s04_e02_v02", "length": 46, "label": 5}, {"file_name": "a12_s09_e04_v02", "length": 16, "label": 10}, {"file_name": "a03_s03_e04_v02", "length": 35, "label": 3}, {"file_name": "a08_s02_e01_v02", "length": 145, "label": 7}, {"file_name": "a03_s05_e03_v02", "length": 26, "label": 3}, {"file_name": "a06_s05_e01_v02", "length": 21, "label": 6}, {"file_name": "a12_s10_e01_v02", "length": 21, "label": 10}, {"file_name": "a01_s07_e03_v02", "length": 26, "label": 1}, {"file_name": "a03_s08_e02_v02", "length": 21, "label": 3}, {"file_name": "a11_s10_e03_v02", "length": 21, "label": 9}, {"file_name": "a04_s06_e05_v02", "length": 24, "label": 4}, {"file_name": "a11_s03_e00_v02", "length": 40, "label": 9}, {"file_name": "a03_s02_e00_v02", "length": 32, "label": 3}, {"file_name": "a11_s01_e04_v02", "length": 21, "label": 9}, {"file_name": "a04_s05_e04_v02", "length": 30, "label": 4}, {"file_name": "a09_s08_e04_v02", "length": 48, "label": 8}, {"file_name": "a09_s06_e01_v02", "length": 33, "label": 8}, {"file_name": "a09_s07_e01_v02", "length": 36, "label": 8}, {"file_name": "a02_s08_e01_v02", "length": 21, "label": 2}, {"file_name": "a01_s04_e01_v02", "length": 41, "label": 1}, {"file_name": "a02_s02_e02_v02", "length": 31, "label": 2}, {"file_name": "a02_s07_e05_v02", "length": 31, "label": 2}, {"file_name": "a06_s02_e00_v02", "length": 25, "label": 6}, {"file_name": "a03_s02_e02_v02", "length": 22, "label": 3}, {"file_name": "a11_s09_e04_v02", "length": 21, "label": 9}, {"file_name": "a09_s03_e04_v02", "length": 61, "label": 8}, {"file_name": "a04_s01_e02_v02", "length": 37, "label": 4}, {"file_name": "a12_s01_e01_v02", "length": 47, "label": 10}, {"file_name": "a02_s07_e03_v02", "length": 9, "label": 2}, {"file_name": "a05_s08_e04_v02", "length": 21, "label": 5}, {"file_name": "a02_s07_e02_v02", "length": 31, "label": 2}, {"file_name": "a04_s07_e02_v02", "length": 18, "label": 4}, {"file_name": "a01_s08_e03_v02", "length": 31, "label": 1}, {"file_name": "a08_s03_e01_v02", "length": 81, "label": 7}, {"file_name": "a04_s08_e03_v02", "length": 16, "label": 4}, {"file_name": "a03_s10_e00_v02", "length": 17, "label": 3}, {"file_name": "a04_s03_e03_v02", "length": 44, "label": 4}, {"file_name": "a11_s05_e02_v02", "length": 29, "label": 9}, {"file_name": "a06_s06_e02_v02", "length": 18, "label": 6}, {"file_name": "a09_s03_e00_v02", "length": 88, "label": 8}, {"file_name": "a09_s03_e03_v02", "length": 58, "label": 8}, {"file_name": "a04_s02_e02_v02", "length": 104, "label": 4}, {"file_name": "a08_s01_e02_v02", "length": 83, "label": 7}, {"file_name": "a04_s04_e00_v02", "length": 46, "label": 4}, {"file_name": "a03_s02_e03_v02", "length": 39, "label": 3}, {"file_name": "a05_s04_e00_v02", "length": 19, "label": 5}, {"file_name": "a05_s07_e03_v02", "length": 16, "label": 5}, {"file_name": "a06_s10_e00_v02", "length": 26, "label": 6}, {"file_name": "a11_s07_e00_v02", "length": 26, "label": 9}, {"file_name": "a03_s01_e01_v02", "length": 24, "label": 3}, {"file_name": "a04_s06_e01_v02", "length": 16, "label": 4}, {"file_name": "a08_s02_e04_v02", "length": 102, "label": 7}, {"file_name": "a09_s08_e03_v02", "length": 41, "label": 8}, {"file_name": "a05_s07_e00_v02", "length": 16, "label": 5}, {"file_name": "a05_s02_e02_v02", "length": 27, "label": 5}, {"file_name": "a04_s06_e04_v02", "length": 21, "label": 4}, {"file_name": "a05_s09_e03_v02", "length": 21, "label": 5}, {"file_name": "a03_s06_e02_v02", "length": 15, "label": 3}, {"file_name": "a01_s01_e00_v02", "length": 30, "label": 1}, {"file_name": "a06_s06_e03_v02", "length": 13, "label": 6}, {"file_name": "a06_s10_e02_v02", "length": 21, "label": 6}, {"file_name": "a02_s07_e04_v02", "length": 36, "label": 2}, {"file_name": "a09_s06_e00_v02", "length": 68, "label": 8}, {"file_name": "a04_s07_e04_v02", "length": 21, "label": 4}, {"file_name": "a05_s02_e01_v02", "length": 36, "label": 5}, {"file_name": "a01_s06_e04_v02", "length": 17, "label": 1}, {"file_name": "a04_s08_e01_v02", "length": 21, "label": 4}, {"file_name": "a01_s09_e00_v02", "length": 31, "label": 1}, {"file_name": "a08_s03_e03_v02", "length": 71, "label": 7}, {"file_name": "a12_s03_e00_v02", "length": 41, "label": 10}, {"file_name": "a11_s02_e03_v02", "length": 26, "label": 9}, {"file_name": "a12_s07_e02_v02", "length": 11, "label": 10}, {"file_name": "a11_s05_e01_v02", "length": 35, "label": 9}, {"file_name": "a05_s06_e01_v02", "length": 14, "label": 5}, {"file_name": "a06_s02_e04_v02", "length": 14, "label": 6}, {"file_name": "a06_s04_e00_v02", "length": 18, "label": 6}, {"file_name": "a05_s09_e01_v02", "length": 31, "label": 5}, {"file_name": "a11_s05_e03_v02", "length": 34, "label": 9}, {"file_name": "a03_s01_e00_v02", "length": 33, "label": 3}, {"file_name": "a11_s02_e01_v02", "length": 32, "label": 9}, {"file_name": "a04_s02_e00_v02", "length": 57, "label": 4}, {"file_name": "a11_s01_e01_v02", "length": 26, "label": 9}, {"file_name": "a02_s06_e03_v02", "length": 21, "label": 2}, {"file_name": "a12_s10_e03_v02", "length": 21, "label": 10}, {"file_name": "a01_s05_e02_v02", "length": 19, "label": 1}, {"file_name": "a01_s06_e00_v02", "length": 21, "label": 1}, {"file_name": "a05_s07_e01_v02", "length": 21, "label": 5}, {"file_name": "a01_s09_e01_v02", "length": 26, "label": 1}, {"file_name": "a02_s06_e00_v02", "length": 18, "label": 2}, {"file_name": "a11_s09_e00_v02", "length": 11, "label": 9}, {"file_name": "a03_s03_e01_v02", "length": 47, "label": 3}, {"file_name": "a03_s08_e00_v02", "length": 22, "label": 3}, {"file_name": "a06_s04_e01_v02", "length": 21, "label": 6}, {"file_name": "a02_s05_e01_v02", "length": 34, "label": 2}, {"file_name": "a03_s04_e04_v02", "length": 29, "label": 3}, {"file_name": "a01_s09_e02_v02", "length": 22, "label": 1}, {"file_name": "a08_s03_e04_v02", "length": 59, "label": 7}, {"file_name": "a01_s10_e00_v02", "length": 28, "label": 1}, {"file_name": "a01_s02_e02_v02", "length": 23, "label": 1}, {"file_name": "a09_s03_e01_v02", "length": 42, "label": 8}, {"file_name": "a05_s06_e00_v02", "length": 23, "label": 5}, {"file_name": "a05_s01_e02_v02", "length": 31, "label": 5}, {"file_name": "a02_s02_e04_v02", "length": 28, "label": 2}, {"file_name": "a06_s07_e03_v02", "length": 21, "label": 6}, {"file_name": "a04_s02_e04_v02", "length": 23, "label": 4}, {"file_name": "a04_s05_e02_v02", "length": 29, "label": 4}, {"file_name": "a02_s07_e01_v02", "length": 31, "label": 2}, {"file_name": "a04_s02_e06_v02", "length": 28, "label": 4}, {"file_name": "a03_s07_e03_v02", "length": 11, "label": 3}, {"file_name": "a12_s08_e01_v02", "length": 14, "label": 10}, {"file_name": "a05_s01_e03_v02", "length": 31, "label": 5}, {"file_name": "a02_s09_e02_v02", "length": 43, "label": 2}, {"file_name": "a05_s08_e03_v02", "length": 26, "label": 5}, {"file_name": "a04_s06_e00_v02", "length": 18, "label": 4}, {"file_name": "a09_s01_e02_v02", "length": 67, "label": 8}, {"file_name": "a12_s09_e00_v02", "length": 21, "label": 10}, {"file_name": "a04_s09_e02_v02", "length": 16, "label": 4}, {"file_name": "a03_s03_e03_v02", "length": 43, "label": 3}, {"file_name": "a08_s07_e03_v02", "length": 54, "label": 7}, {"file_name": "a08_s09_e02_v02", "length": 76, "label": 7}, {"file_name": "a08_s09_e00_v02", "length": 71, "label": 7}, {"file_name": "a06_s06_e04_v02", "length": 16, "label": 6}, {"file_name": "a01_s07_e04_v02", "length": 21, "label": 1}, {"file_name": "a08_s05_e04_v02", "length": 45, "label": 7}, {"file_name": "a05_s04_e01_v02", "length": 26, "label": 5}, {"file_name": "a04_s07_e00_v02", "length": 23, "label": 4}, {"file_name": "a05_s08_e01_v02", "length": 21, "label": 5}, {"file_name": "a11_s06_e03_v02", "length": 17, "label": 9}, {"file_name": "a01_s04_e03_v02", "length": 34, "label": 1}, {"file_name": "a11_s06_e04_v02", "length": 8, "label": 9}, {"file_name": "a12_s07_e03_v02", "length": 16, "label": 10}, {"file_name": "a01_s02_e04_v02", "length": 21, "label": 1}, {"file_name": "a04_s04_e05_v02", "length": 132, "label": 4}, {"file_name": "a03_s01_e03_v02", "length": 36, "label": 3}, {"file_name": "a12_s02_e02_v02", "length": 38, "label": 10}, {"file_name": "a03_s06_e01_v02", "length": 17, "label": 3}, {"file_name": "a05_s02_e03_v02", "length": 28, "label": 5}, {"file_name": "a03_s02_e04_v02", "length": 23, "label": 3}, {"file_name": "a08_s02_e03_v02", "length": 113, "label": 7}, {"file_name": "a08_s03_e02_v02", "length": 67, "label": 7}, {"file_name": "a09_s01_e01_v02", "length": 55, "label": 8}, {"file_name": "a02_s01_e01_v02", "length": 30, "label": 2}, {"file_name": "a08_s06_e00_v02", "length": 86, "label": 7}, {"file_name": "a12_s08_e02_v02", "length": 16, "label": 10}, {"file_name": "a02_s08_e00_v02", "length": 26, "label": 2}, {"file_name": "a01_s08_e02_v02", "length": 33, "label": 1}, {"file_name": "a09_s04_e01_v02", "length": 74, "label": 8}, {"file_name": "a04_s01_e04_v02", "length": 26, "label": 4}, {"file_name": "a04_s05_e03_v02", "length": 31, "label": 4}, {"file_name": "a08_s10_e03_v02", "length": 61, "label": 7}, {"file_name": "a02_s05_e00_v02", "length": 28, "label": 2}, {"file_name": "a06_s04_e03_v02", "length": 24, "label": 6}, {"file_name": "a06_s09_e03_v02", "length": 21, "label": 6}, {"file_name": "a05_s03_e02_v02", "length": 21, "label": 5}, {"file_name": "a06_s03_e04_v02", "length": 12, "label": 6}, {"file_name": "a06_s01_e03_v02", "length": 16, "label": 6}, {"file_name": "a11_s03_e01_v02", "length": 23, "label": 9}, {"file_name": "a09_s02_e01_v02", "length": 33, "label": 8}, {"file_name": "a02_s02_e00_v02", "length": 42, "label": 2}, {"file_name": "a01_s01_e03_v02", "length": 39, "label": 1}, {"file_name": "a08_s06_e02_v02", "length": 83, "label": 7}, {"file_name": "a12_s01_e03_v02", "length": 41, "label": 10}, {"file_name": "a06_s05_e04_v02", "length": 16, "label": 6}, {"file_name": "a01_s04_e06_v02", "length": 24, "label": 1}, {"file_name": "a09_s09_e01_v02", "length": 41, "label": 8}, {"file_name": "a04_s10_e03_v02", "length": 16, "label": 4}, {"file_name": "a06_s09_e04_v02", "length": 16, "label": 6}, {"file_name": "a02_s04_e01_v02", "length": 31, "label": 2}, {"file_name": "a12_s10_e04_v02", "length": 14, "label": 10}, {"file_name": "a04_s03_e05_v02", "length": 42, "label": 4}, {"file_name": "a06_s03_e01_v02", "length": 25, "label": 6}, {"file_name": "a02_s03_e04_v02", "length": 62, "label": 2}, {"file_name": "a11_s09_e02_v02", "length": 25, "label": 9}, {"file_name": "a08_s08_e02_v02", "length": 53, "label": 7}, {"file_name": "a03_s02_e01_v02", "length": 36, "label": 3}, {"file_name": "a12_s02_e00_v02", "length": 50, "label": 10}, {"file_name": "a12_s08_e03_v02", "length": 13, "label": 10}, {"file_name": "a02_s09_e03_v02", "length": 31, "label": 2}, {"file_name": "a09_s02_e02_v02", "length": 46, "label": 8}, {"file_name": "a05_s09_e04_v02", "length": 21, "label": 5}, {"file_name": "a01_s04_e00_v02", "length": 26, "label": 1}, {"file_name": "a08_s04_e03_v02", "length": 121, "label": 7}, {"file_name": "a08_s05_e01_v02", "length": 59, "label": 7}, {"file_name": "a12_s09_e03_v02", "length": 16, "label": 10}, {"file_name": "a02_s04_e03_v02", "length": 31, "label": 2}, {"file_name": "a04_s03_e04_v02", "length": 49, "label": 4}, {"file_name": "a12_s06_e01_v02", "length": 16, "label": 10}, {"file_name": "a11_s04_e03_v02", "length": 32, "label": 9}, {"file_name": "a05_s03_e00_v02", "length": 22, "label": 5}, {"file_name": "a12_s07_e00_v02", "length": 18, "label": 10}, {"file_name": "a06_s03_e02_v02", "length": 16, "label": 6}, {"file_name": "a03_s03_e05_v02", "length": 33, "label": 3}, {"file_name": "a11_s08_e01_v02", "length": 22, "label": 9}, {"file_name": "a05_s05_e04_v02", "length": 17, "label": 5}, {"file_name": "a06_s10_e01_v02", "length": 17, "label": 6}, {"file_name": "a04_s03_e02_v02", "length": 108, "label": 4}, {"file_name": "a02_s03_e03_v02", "length": 56, "label": 2}, {"file_name": "a09_s10_e04_v02", "length": 36, "label": 8}, {"file_name": "a04_s08_e04_v02", "length": 36, "label": 4}, {"file_name": "a11_s08_e00_v02", "length": 35, "label": 9}, {"file_name": "a02_s01_e00_v02", "length": 39, "label": 2}, {"file_name": "a04_s02_e03_v02", "length": 45, "label": 4}, {"file_name": "a04_s02_e01_v02", "length": 113, "label": 4}, {"file_name": "a06_s08_e00_v02", "length": 19, "label": 6}, {"file_name": "a08_s08_e01_v02", "length": 49, "label": 7}, {"file_name": "a02_s03_e01_v02", "length": 45, "label": 2}, {"file_name": "a11_s02_e02_v02", "length": 33, "label": 9}, {"file_name": "a09_s07_e02_v02", "length": 29, "label": 8}, {"file_name": "a02_s05_e03_v02", "length": 21, "label": 2}, {"file_name": "a01_s07_e02_v02", "length": 23, "label": 1}, {"file_name": "a06_s05_e03_v02", "length": 15, "label": 6}, {"file_name": "a12_s05_e03_v02", "length": 33, "label": 10}, {"file_name": "a03_s05_e00_v02", "length": 20, "label": 3}, {"file_name": "a09_s03_e02_v02", "length": 58, "label": 8}, {"file_name": "a09_s04_e04_v02", "length": 138, "label": 8}, {"file_name": "a11_s10_e00_v02", "length": 21, "label": 9}, {"file_name": "a04_s04_e01_v02", "length": 35, "label": 4}, {"file_name": "a02_s08_e02_v02", "length": 21, "label": 2}, {"file_name": "a01_s05_e00_v02", "length": 27, "label": 1}, {"file_name": "a04_s01_e07_v02", "length": 34, "label": 4}, {"file_name": "a11_s06_e00_v02", "length": 27, "label": 9}, {"file_name": "a05_s02_e00_v02", "length": 36, "label": 5}, {"file_name": "a02_s02_e03_v02", "length": 29, "label": 2}, {"file_name": "a09_s05_e02_v02", "length": 51, "label": 8}, {"file_name": "a05_s06_e02_v02", "length": 16, "label": 5}, {"file_name": "a08_s01_e03_v02", "length": 80, "label": 7}, {"file_name": "a08_s09_e01_v02", "length": 62, "label": 7}, {"file_name": "a02_s08_e04_v02", "length": 36, "label": 2}, {"file_name": "a06_s05_e02_v02", "length": 21, "label": 6}, {"file_name": "a01_s02_e03_v02", "length": 24, "label": 1}, {"file_name": "a03_s09_e02_v02", "length": 26, "label": 3}, {"file_name": "a04_s08_e00_v02", "length": 31, "label": 4}, {"file_name": "a12_s03_e04_v02", "length": 46, "label": 10}, {"file_name": "a08_s04_e01_v02", "length": 126, "label": 7}, {"file_name": "a12_s04_e03_v02", "length": 35, "label": 10}, {"file_name": "a04_s09_e03_v02", "length": 26, "label": 4}, {"file_name": "a12_s05_e00_v02", "length": 31, "label": 10}, {"file_name": "a11_s05_e04_v02", "length": 25, "label": 9}, {"file_name": "a05_s06_e03_v02", "length": 30, "label": 5}, {"file_name": "a09_s06_e02_v02", "length": 39, "label": 8}, {"file_name": "a12_s02_e03_v02", "length": 27, "label": 10}, {"file_name": "a11_s03_e03_v02", "length": 21, "label": 9}, {"file_name": "a11_s07_e04_v02", "length": 17, "label": 9}, {"file_name": "a04_s01_e00_v02", "length": 43, "label": 4}, {"file_name": "a03_s08_e03_v02", "length": 14, "label": 3}, {"file_name": "a04_s10_e00_v02", "length": 21, "label": 4}, {"file_name": "a08_s03_e00_v02", "length": 116, "label": 7}, {"file_name": "a02_s08_e03_v02", "length": 21, "label": 2}, {"file_name": "a01_s09_e03_v02", "length": 24, "label": 1}, {"file_name": "a01_s01_e04_v02", "length": 29, "label": 1}, {"file_name": "a01_s07_e00_v02", "length": 21, "label": 1}, {"file_name": "a02_s03_e00_v02", "length": 46, "label": 2}, {"file_name": "a01_s02_e00_v02", "length": 23, "label": 1}, {"file_name": "a03_s09_e04_v02", "length": 21, "label": 3}, {"file_name": "a01_s06_e02_v02", "length": 21, "label": 1}, {"file_name": "a03_s07_e02_v02", "length": 17, "label": 3}, {"file_name": "a03_s05_e04_v02", "length": 39, "label": 3}, {"file_name": "a08_s07_e01_v02", "length": 104, "label": 7}, {"file_name": "a04_s07_e03_v02", "length": 21, "label": 4}, {"file_name": "a08_s04_e04_v02", "length": 124, "label": 7}, {"file_name": "a08_s08_e00_v02", "length": 58, "label": 7}, {"file_name": "a02_s09_e00_v02", "length": 37, "label": 2}, {"file_name": "a06_s03_e00_v02", "length": 24, "label": 6}, {"file_name": "a09_s09_e04_v02", "length": 36, "label": 8}, {"file_name": "a05_s04_e04_v02", "length": 21, "label": 5}, {"file_name": "a09_s04_e03_v02", "length": 61, "label": 8}, {"file_name": "a01_s09_e04_v02", "length": 28, "label": 1}, {"file_name": "a05_s10_e00_v02", "length": 26, "label": 5}, {"file_name": "a09_s08_e02_v02", "length": 36, "label": 8}, {"file_name": "a11_s07_e01_v02", "length": 15, "label": 9}, {"file_name": "a06_s01_e00_v02", "length": 21, "label": 6}, {"file_name": "a12_s08_e04_v02", "length": 14, "label": 10}, {"file_name": "a08_s09_e04_v02", "length": 56, "label": 7}, {"file_name": "a12_s10_e02_v02", "length": 16, "label": 10}, {"file_name": "a04_s01_e01_v02", "length": 83, "label": 4}, {"file_name": "a01_s08_e01_v02", "length": 26, "label": 1}, {"file_name": "a09_s07_e00_v02", "length": 31, "label": 8}, {"file_name": "a04_s09_e00_v02", "length": 26, "label": 4}, {"file_name": "a08_s02_e02_v02", "length": 134, "label": 7}, {"file_name": "a09_s09_e02_v02", "length": 57, "label": 8}, {"file_name": "a09_s02_e03_v02", "length": 46, "label": 8}, {"file_name": "a11_s09_e01_v02", "length": 14, "label": 9}, {"file_name": "a03_s10_e01_v02", "length": 11, "label": 3}, {"file_name": "a11_s03_e02_v02", "length": 36, "label": 9}, {"file_name": "a11_s08_e04_v02", "length": 16, "label": 9}, {"file_name": "a06_s08_e02_v02", "length": 16, "label": 6}, {"file_name": "a12_s01_e00_v02", "length": 21, "label": 10}, {"file_name": "a02_s06_e04_v02", "length": 21, "label": 2}, {"file_name": "a06_s07_e01_v02", "length": 21, "label": 6}, {"file_name": "a05_s10_e03_v02", "length": 21, "label": 5}, {"file_name": "a09_s05_e04_v02", "length": 66, "label": 8}, {"file_name": "a03_s06_e00_v02", "length": 23, "label": 3}, {"file_name": "a12_s02_e01_v02", "length": 40, "label": 10}, {"file_name": "a08_s10_e02_v02", "length": 56, "label": 7}, {"file_name": "a08_s02_e00_v02", "length": 111, "label": 7}, {"file_name": "a06_s10_e03_v02", "length": 21, "label": 6}, {"file_name": "a11_s04_e02_v02", "length": 33, "label": 9}, {"file_name": "a08_s09_e03_v02", "length": 66, "label": 7}, {"file_name": "a12_s06_e04_v02", "length": 11, "label": 10}, {"file_name": "a01_s07_e01_v02", "length": 27, "label": 1}, {"file_name": "a05_s02_e04_v02", "length": 22, "label": 5}, {"file_name": "a09_s08_e00_v02", "length": 41, "label": 8}, {"file_name": "a02_s04_e04_v02", "length": 33, "label": 2}, {"file_name": "a06_s07_e00_v02", "length": 15, "label": 6}, {"file_name": "a04_s09_e01_v02", "length": 21, "label": 4}, {"file_name": "a09_s01_e00_v02", "length": 42, "label": 8}, {"file_name": "a08_s10_e01_v02", "length": 91, "label": 7}, {"file_name": "a11_s10_e02_v02", "length": 56, "label": 9}, {"file_name": "a09_s10_e02_v02", "length": 41, "label": 8}, {"file_name": "a03_s07_e04_v02", "length": 11, "label": 3}, {"file_name": "a05_s08_e00_v02", "length": 26, "label": 5}, {"file_name": "a05_s05_e03_v02", "length": 25, "label": 5}, {"file_name": "a11_s09_e03_v02", "length": 11, "label": 9}, {"file_name": "a12_s04_e04_v02", "length": 36, "label": 10}, {"file_name": "a04_s01_e03_v02", "length": 30, "label": 4}, {"file_name": "a04_s10_e02_v02", "length": 21, "label": 4}, {"file_name": "a06_s10_e04_v02", "length": 21, "label": 6}, {"file_name": "a01_s08_e00_v02", "length": 21, "label": 1}, {"file_name": "a03_s10_e02_v02", "length": 28, "label": 3}, {"file_name": "a03_s07_e01_v02", "length": 11, "label": 3}, {"file_name": "a05_s04_e03_v02", "length": 22, "label": 5}, {"file_name": "a01_s01_e02_v02", "length": 31, "label": 1}, {"file_name": "a05_s10_e04_v02", "length": 21, "label": 5}, {"file_name": "a06_s08_e03_v02", "length": 21, "label": 6}, {"file_name": "a02_s04_e02_v02", "length": 33, "label": 2}, {"file_name": "a04_s01_e09_v02", "length": 33, "label": 4}, {"file_name": "a12_s01_e04_v02", "length": 37, "label": 10}, {"file_name": "a02_s01_e02_v02", "length": 28, "label": 2}, {"file_name": "a12_s10_e00_v02", "length": 21, "label": 10}, {"file_name": "a11_s02_e00_v02", "length": 40, "label": 9}, {"file_name": "a02_s09_e01_v02", "length": 40, "label": 2}, {"file_name": "a02_s04_e00_v02", "length": 46, "label": 2}, {"file_name": "a12_s01_e02_v02", "length": 27, "label": 10}, {"file_name": "a04_s02_e05_v02", "length": 61, "label": 4}, {"file_name": "a03_s01_e04_v02", "length": 36, "label": 3}, {"file_name": "a01_s03_e04_v02", "length": 46, "label": 1}, {"file_name": "a02_s06_e01_v02", "length": 16, "label": 2}, {"file_name": "a12_s07_e04_v02", "length": 11, "label": 10}, {"file_name": "a12_s03_e05_v02", "length": 33, "label": 10}, {"file_name": "a08_s10_e04_v02", "length": 66, "label": 7}, {"file_name": "a02_s03_e02_v02", "length": 58, "label": 2}, {"file_name": "a05_s06_e04_v02", "length": 21, "label": 5}, {"file_name": "a05_s10_e01_v02", "length": 21, "label": 5}, {"file_name": "a09_s10_e01_v02", "length": 49, "label": 8}, {"file_name": "a08_s08_e04_v02", "length": 61, "label": 7}, {"file_name": "a06_s01_e02_v02", "length": 11, "label": 6}, {"file_name": "a01_s01_e01_v02", "length": 28, "label": 1}, {"file_name": "a06_s08_e04_v02", "length": 21, "label": 6}, {"file_name": "a09_s06_e03_v02", "length": 47, "label": 8}, {"file_name": "a06_s09_e01_v02", "length": 16, "label": 6}, {"file_name": "a08_s06_e01_v02", "length": 116, "label": 7}, {"file_name": "a02_s01_e04_v02", "length": 38, "label": 2}, {"file_name": "a11_s01_e00_v02", "length": 31, "label": 9}, {"file_name": "a05_s05_e02_v02", "length": 17, "label": 5}, {"file_name": "a03_s03_e00_v02", "length": 41, "label": 3}, {"file_name": "a01_s04_e04_v02", "length": 34, "label": 1}, {"file_name": "a06_s01_e04_v02", "length": 21, "label": 6}, {"file_name": "a09_s05_e05_v02", "length": 48, "label": 8}, {"file_name": "a01_s10_e01_v02", "length": 21, "label": 1}, {"file_name": "a03_s09_e00_v02", "length": 26, "label": 3}, {"file_name": "a08_s10_e00_v02", "length": 67, "label": 7}, {"file_name": "a05_s10_e02_v02", "length": 21, "label": 5}, {"file_name": "a04_s10_e01_v02", "length": 23, "label": 4}, {"file_name": "a05_s03_e04_v02", "length": 26, "label": 5}, {"file_name": "a05_s07_e02_v02", "length": 36, "label": 5}, {"file_name": "a12_s02_e04_v02", "length": 37, "label": 10}, {"file_name": "a04_s02_e07_v02", "length": 47, "label": 4}, {"file_name": "a06_s02_e03_v02", "length": 13, "label": 6}, {"file_name": "a09_s01_e03_v02", "length": 56, "label": 8}, {"file_name": "a08_s04_e00_v02", "length": 86, "label": 7}, {"file_name": "a02_s10_e01_v02", "length": 32, "label": 2}, {"file_name": "a11_s04_e01_v02", "length": 15, "label": 9}, {"file_name": "a03_s05_e01_v02", "length": 39, "label": 3}, {"file_name": "a06_s07_e04_v02", "length": 19, "label": 6}, {"file_name": "a09_s09_e03_v02", "length": 51, "label": 8}, {"file_name": "a02_s06_e02_v02", "length": 21, "label": 2}, {"file_name": "a05_s01_e04_v02", "length": 21, "label": 5}, {"file_name": "a11_s03_e04_v02", "length": 12, "label": 9}, {"file_name": "a04_s08_e02_v02", "length": 21, "label": 4}, {"file_name": "a04_s09_e04_v02", "length": 36, "label": 4}, {"file_name": "a08_s07_e00_v02", "length": 53, "label": 7}, {"file_name": "a04_s01_e05_v02", "length": 37, "label": 4}, {"file_name": "a12_s07_e01_v02", "length": 14, "label": 10}, {"file_name": "a02_s01_e03_v02", "length": 40, "label": 2}, {"file_name": "a09_s04_e00_v02", "length": 84, "label": 8}, {"file_name": "a09_s05_e01_v02", "length": 65, "label": 8}, {"file_name": "a09_s01_e04_v02", "length": 65, "label": 8}, {"file_name": "a12_s08_e00_v02", "length": 13, "label": 10}, {"file_name": "a04_s06_e03_v02", "length": 12, "label": 4}, {"file_name": "a05_s05_e00_v02", "length": 41, "label": 5}, {"file_name": "a11_s06_e01_v02", "length": 17, "label": 9}, {"file_name": "a01_s10_e02_v02", "length": 26, "label": 1}, {"file_name": "a04_s05_e01_v02", "length": 26, "label": 4}, {"file_name": "a08_s05_e06_v02", "length": 24, "label": 7}, {"file_name": "a02_s10_e04_v02", "length": 29, "label": 2}, {"file_name": "a11_s05_e00_v02", "length": 27, "label": 9}, {"file_name": "a04_s07_e01_v02", "length": 21, "label": 4}, {"file_name": "a03_s04_e01_v02", "length": 39, "label": 3}, {"file_name": "a03_s01_e02_v02", "length": 31, "label": 3}, {"file_name": "a06_s09_e02_v02", "length": 16, "label": 6}, {"file_name": "a03_s07_e00_v02", "length": 21, "label": 3}, {"file_name": "a11_s05_e05_v02", "length": 29, "label": 9}, {"file_name": "a08_s05_e05_v02", "length": 44, "label": 7}, {"file_name": "a06_s04_e02_v02", "length": 41, "label": 6}, {"file_name": "a12_s04_e01_v02", "length": 36, "label": 10}, {"file_name": "a09_s05_e00_v02", "length": 70, "label": 8}, {"file_name": "a04_s06_e02_v02", "length": 16, "label": 4}, {"file_name": "a04_s04_e04_v02", "length": 53, "label": 4}, {"file_name": "a09_s04_e02_v02", "length": 61, "label": 8}, {"file_name": "a02_s02_e01_v02", "length": 26, "label": 2}, {"file_name": "a06_s09_e00_v02", "length": 16, "label": 6}, {"file_name": "a05_s09_e00_v02", "length": 21, "label": 5}, {"file_name": "a05_s03_e01_v02", "length": 28, "label": 5}, {"file_name": "a02_s05_e04_v02", "length": 29, "label": 2}, {"file_name": "a01_s06_e01_v02", "length": 24, "label": 1}, {"file_name": "a01_s04_e05_v02", "length": 29, "label": 1}, {"file_name": "a12_s04_e02_v02", "length": 23, "label": 10}, {"file_name": "a03_s05_e02_v02", "length": 36, "label": 3}, {"file_name": "a01_s03_e02_v02", "length": 61, "label": 1}, {"file_name": "a05_s04_e05_v02", "length": 21, "label": 5}, {"file_name": "a01_s03_e00_v02", "length": 26, "label": 1}, {"file_name": "a08_s06_e03_v02", "length": 103, "label": 7}, {"file_name": "a05_s09_e02_v02", "length": 21, "label": 5}, {"file_name": "a01_s02_e01_v02", "length": 21, "label": 1}, {"file_name": "a01_s03_e01_v02", "length": 42, "label": 1}, {"file_name": "a04_s03_e01_v02", "length": 29, "label": 4}, {"file_name": "a06_s06_e00_v02", "length": 16, "label": 6}, {"file_name": "a12_s06_e02_v02", "length": 26, "label": 10}, {"file_name": "a12_s03_e01_v02", "length": 44, "label": 10}, {"file_name": "a08_s04_e02_v02", "length": 116, "label": 7}, {"file_name": "a06_s04_e04_v02", "length": 20, "label": 6}, {"file_name": "a12_s06_e03_v02", "length": 14, "label": 10}, {"file_name": "a08_s01_e04_v02", "length": 81, "label": 7}, {"file_name": "a04_s03_e00_v02", "length": 28, "label": 4}, {"file_name": "a12_s05_e02_v02", "length": 28, "label": 10}, {"file_name": "a08_s01_e00_v02", "length": 146, "label": 7}, {"file_name": "a01_s03_e03_v02", "length": 53, "label": 1}, {"file_name": "a04_s01_e08_v02", "length": 83, "label": 4}, {"file_name": "a01_s04_e02_v02", "length": 26, "label": 1}, {"file_name": "a06_s05_e00_v02", "length": 30, "label": 6}, {"file_name": "a01_s10_e04_v02", "length": 21, "label": 1}, {"file_name": "a08_s05_e00_v02", "length": 61, "label": 7}, {"file_name": "a09_s02_e00_v02", "length": 32, "label": 8}, {"file_name": "a12_s04_e05_v02", "length": 29, "label": 10}, {"file_name": "a06_s07_e02_v02", "length": 21, "label": 6}, {"file_name": "a08_s07_e02_v02", "length": 40, "label": 7}, {"file_name": "a11_s10_e01_v02", "length": 31, "label": 9}, {"file_name": "a02_s07_e00_v02", "length": 31, "label": 2}, {"file_name": "a06_s08_e01_v02", "length": 16, "label": 6}, {"file_name": "a01_s10_e03_v02", "length": 25, "label": 1}, {"file_name": "a11_s02_e04_v02", "length": 35, "label": 9}, {"file_name": "a02_s09_e04_v02", "length": 1, "label": 2}, {"file_name": "a12_s03_e03_v02", "length": 39, "label": 10}, {"file_name": "a05_s01_e01_v02", "length": 24, "label": 5}, {"file_name": "a05_s08_e02_v02", "length": 16, "label": 5}, {"file_name": "a12_s09_e02_v02", "length": 21, "label": 10}, {"file_name": "a09_s08_e01_v02", "length": 40, "label": 8}, {"file_name": "a01_s08_e04_v02", "length": 21, "label": 1}, {"file_name": "a09_s09_e00_v02", "length": 51, "label": 8}, {"file_name": "a03_s10_e03_v02", "length": 13, "label": 3}, {"file_name": "a09_s05_e03_v02", "length": 46, "label": 8}, {"file_name": "a09_s02_e04_v02", "length": 49, "label": 8}, {"file_name": "a08_s01_e01_v02", "length": 91, "label": 7}, {"file_name": "a09_s10_e00_v02", "length": 41, "label": 8}, {"file_name": "a12_s09_e01_v02", "length": 16, "label": 10}, {"file_name": "a05_s01_e00_v02", "length": 26, "label": 5}, {"file_name": "a06_s02_e01_v02", "length": 13, "label": 6}, {"file_name": "a08_s08_e03_v02", "length": 56, "label": 7}, {"file_name": "a04_s04_e03_v02", "length": 61, "label": 4}, {"file_name": "a12_s05_e04_v02", "length": 36, "label": 10}, {"file_name": "a02_s10_e02_v02", "length": 31, "label": 2}, {"file_name": "a06_s03_e03_v02", "length": 16, "label": 6}, {"file_name": "a05_s07_e04_v02", "length": 21, "label": 5}, {"file_name": "a02_s10_e00_v02", "length": 38, "label": 2}, {"file_name": "a08_s05_e03_v02", "length": 46, "label": 7}, {"file_name": "a12_s04_e00_v02", "length": 46, "label": 10}, {"file_name": "a03_s04_e02_v02", "length": 27, "label": 3}, {"file_name": "a06_s02_e02_v02", "length": 11, "label": 6}, {"file_name": "a03_s04_e03_v02", "length": 31, "label": 3}, {"file_name": "a11_s08_e03_v02", "length": 21, "label": 9}, {"file_name": "a09_s07_e03_v02", "length": 35, "label": 8}, {"file_name": "a05_s03_e03_v02", "length": 26, "label": 5}, {"file_name": "a09_s10_e03_v02", "length": 31, "label": 8}, {"file_name": "a11_s06_e02_v02", "length": 16, "label": 9}, {"file_name": "a05_s05_e01_v02", "length": 23, "label": 5}, {"file_name": "a01_s05_e01_v02", "length": 35, "label": 1}, {"file_name": "a04_s04_e02_v02", "length": 34, "label": 4}, {"file_name": "a11_s08_e02_v02", "length": 17, "label": 9}, {"file_name": "a11_s07_e03_v02", "length": 21, "label": 9}, {"file_name": "a04_s01_e06_v02", "length": 31, "label": 4}, {"file_name": "a06_s01_e01_v02", "length": 21, "label": 6}, {"file_name": "a12_s03_e02_v02", "length": 39, "label": 10}, {"file_name": "a08_s05_e02_v02", "length": 51, "label": 7}, {"file_name": "a03_s04_e00_v02", "length": 26, "label": 3}, {"file_name": "a11_s01_e03_v02", "length": 31, "label": 9}, {"file_name": "a03_s08_e01_v02", "length": 21, "label": 3}, {"file_name": "a11_s04_e00_v02", "length": 32, "label": 9}, {"file_name": "a04_s05_e00_v02", "length": 36, "label": 4}, {"file_name": "a12_s05_e01_v02", "length": 31, "label": 10}, {"file_name": "a02_s05_e02_v02", "length": 26, "label": 2}, {"file_name": "a06_s06_e01_v02", "length": 16, "label": 6}, {"file_name": "a03_s03_e02_v02", "length": 32, "label": 3}, {"file_name": "a11_s07_e02_v02", "length": 21, "label": 9}, {"file_name": "a11_s01_e02_v02", "length": 21, "label": 9}]
self.nw_ucla_root = 'data/NW-UCLA/all_sqe/'
self.time_steps = 52
self.bone = [(1, 2), (2, 3), (3, 3), (4, 3), (5, 3), (6, 5), (7, 6), (8, 7), (9, 3), (10, 9), (11, 10),
(12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19)]
self.label = []
for index in range(len(self.data_dict)):
info = self.data_dict[index]
self.label.append(int(info['label']) - 1)
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.normalization = normalization
self.use_mmap = use_mmap
self.repeat = repeat
self.load_data()
if normalization:
self.get_mean_map()
def load_data(self):
# data: N C V T M
self.data = []
for data in self.data_dict:
file_name = data['file_name']
with open(self.nw_ucla_root + file_name + '.json', 'r') as f:
json_file = json.load(f)
skeletons = json_file['skeletons']
value = np.array(skeletons)
self.data.append(value)
def get_mean_map(self):
data = self.data
N, C, T, V, M = data.shape
self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0)
self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))
def __len__(self):
return len(self.data_dict)*self.repeat
def __iter__(self):
return self
def rand_view_transform(self,X, agx, agy, s):
agx = math.radians(agx)
agy = math.radians(agy)
Rx = np.asarray([[1,0,0], [0,math.cos(agx),math.sin(agx)], [0, -math.sin(agx),math.cos(agx)]])
Ry = np.asarray([[math.cos(agy), 0, -math.sin(agy)], [0,1,0], [math.sin(agy), 0, math.cos(agy)]])
Ss = np.asarray([[s,0,0],[0,s,0],[0,0,s]])
X0 = np.dot(np.reshape(X,(-1,3)), np.dot(Ry,np.dot(Rx,Ss)))
X = np.reshape(X0, X.shape)
return X
def __getitem__(self, index):
label = self.label[index % len(self.data_dict)]
value = self.data[index % len(self.data_dict)]
if self.train_val == 'train':
random.random()
agx = random.randint(-60, 60)
agy = random.randint(-60, 60)
s = random.uniform(0.5, 1.5)
center = value[0,1,:]
value = value - center
scalerValue = self.rand_view_transform(value, agx, agy, s)
scalerValue = np.reshape(scalerValue, (-1, 3))
scalerValue = (scalerValue - np.min(scalerValue,axis=0)) / (np.max(scalerValue,axis=0) - np.min(scalerValue,axis=0))
scalerValue = scalerValue*2-1
scalerValue = np.reshape(scalerValue, (-1, 20, 3))
data = np.zeros( (self.time_steps, 20, 3) )
value = scalerValue[:,:,:]
length = value.shape[0]
random_idx = random.sample(list(np.arange(length))*100, self.time_steps)
random_idx.sort()
data[:,:,:] = value[random_idx,:,:]
data[:,:,:] = value[random_idx,:,:]
else:
random.random()
agx = 0
agy = 0
s = 1.0
center = value[0,1,:]
value = value - center
scalerValue = self.rand_view_transform(value, agx, agy, s)
scalerValue = np.reshape(scalerValue, (-1, 3))
scalerValue = (scalerValue - np.min(scalerValue,axis=0)) / (np.max(scalerValue,axis=0) - np.min(scalerValue,axis=0))
scalerValue = scalerValue*2-1
scalerValue = np.reshape(scalerValue, (-1, 20, 3))
data = np.zeros( (self.time_steps, 20, 3) )
value = scalerValue[:,:,:]
length = value.shape[0]
idx = np.linspace(0,length-1,self.time_steps).astype(np.int)
data[:,:,:] = value[idx,:,:] # T,V,C
if 'bone' in self.data_path:
data_bone = np.zeros_like(data)
for bone_idx in range(20):
data_bone[:, self.bone[bone_idx][0] - 1, :] = data[:, self.bone[bone_idx][0] - 1, :] - data[:, self.bone[bone_idx][1] - 1, :]
data = data_bone
if 'motion' in self.data_path:
data_motion = np.zeros_like(data)
data_motion[:-1, :, :] = data[1:, :, :] - data[:-1, :, :]
data = data_motion
data = np.transpose(data, (2, 0, 1))
C,T,V = data.shape
data = np.reshape(data,(C,T,V,1))
return data, label, index
def top_k(self, score, top_k):
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | 94,902 | 603.477707 | 61,388 | py |
SkeletonGCL | SkeletonGCL-main/feeders/tools.py | import random
import matplotlib.pyplot as plt
import numpy as np
import pdb
import torch
import torch.nn.functional as F
def valid_crop_resize(data_numpy,valid_frame_num,p_interval,window):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
begin = 0
end = valid_frame_num
valid_size = end - begin
#crop
if len(p_interval) == 1:
p = p_interval[0]
bias = int((1-p) * valid_size/2)
data = data_numpy[:, begin+bias:end-bias, :, :]# center_crop
cropped_length = data.shape[1]
else:
p = np.random.rand(1)*(p_interval[1]-p_interval[0])+p_interval[0]
cropped_length = np.minimum(np.maximum(int(np.floor(valid_size*p)),64), valid_size)# constraint cropped_length lower bound as 64
bias = np.random.randint(0,valid_size-cropped_length+1)
data = data_numpy[:, begin+bias:begin+bias+cropped_length, :, :]
if data.shape[1] == 0:
print(cropped_length, bias, valid_size)
# resize
data = torch.tensor(data,dtype=torch.float)
data = data.permute(0, 2, 3, 1).contiguous().view(C * V * M, cropped_length)
data = data[None, None, :, :]
data = F.interpolate(data, size=(C * V * M, window), mode='bilinear',align_corners=False).squeeze() # could perform both up sample and down sample
data = data.contiguous().view(C, V, M, window).permute(0, 3, 1, 2).contiguous().numpy()
return data
def downsample(data_numpy, step, random_sample=True):
# input: C,T,V,M
begin = np.random.randint(step) if random_sample else 0
return data_numpy[:, begin::step, :, :]
def temporal_slice(data_numpy, step):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
return data_numpy.reshape(C, T / step, step, V, M).transpose(
(0, 1, 3, 2, 4)).reshape(C, T / step, V, step * M)
def mean_subtractor(data_numpy, mean):
# input: C,T,V,M
# naive version
if mean == 0:
return
C, T, V, M = data_numpy.shape
valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
begin = valid_frame.argmax()
end = len(valid_frame) - valid_frame[::-1].argmax()
data_numpy[:, :end, :, :] = data_numpy[:, :end, :, :] - mean
return data_numpy
def auto_pading(data_numpy, size, random_pad=False):
C, T, V, M = data_numpy.shape
if T < size:
begin = random.randint(0, size - T) if random_pad else 0
data_numpy_paded = np.zeros((C, size, V, M))
data_numpy_paded[:, begin:begin + T, :, :] = data_numpy
return data_numpy_paded
else:
return data_numpy
def random_choose(data_numpy, size, auto_pad=True):
# input: C,T,V,M 随机选择其中一段,不是很合理。因为有0
C, T, V, M = data_numpy.shape
if T == size:
return data_numpy
elif T < size:
if auto_pad:
return auto_pading(data_numpy, size, random_pad=True)
else:
return data_numpy
else:
begin = random.randint(0, T - size)
return data_numpy[:, begin:begin + size, :, :]
def random_move(data_numpy,
angle_candidate=[-10., -5., 0., 5., 10.],
scale_candidate=[0.9, 1.0, 1.1],
transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2],
move_time_candidate=[1]):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
move_time = random.choice(move_time_candidate)
node = np.arange(0, T, T * 1.0 / move_time).round().astype(int)
node = np.append(node, T)
num_node = len(node)
A = np.random.choice(angle_candidate, num_node)
S = np.random.choice(scale_candidate, num_node)
T_x = np.random.choice(transform_candidate, num_node)
T_y = np.random.choice(transform_candidate, num_node)
a = np.zeros(T)
s = np.zeros(T)
t_x = np.zeros(T)
t_y = np.zeros(T)
# linspace
for i in range(num_node - 1):
a[node[i]:node[i + 1]] = np.linspace(
A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180
s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1],
node[i + 1] - node[i])
t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1],
node[i + 1] - node[i])
t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1],
node[i + 1] - node[i])
theta = np.array([[np.cos(a) * s, -np.sin(a) * s],
[np.sin(a) * s, np.cos(a) * s]])
# perform transformation
for i_frame in range(T):
xy = data_numpy[0:2, i_frame, :, :]
new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1))
new_xy[0] += t_x[i_frame]
new_xy[1] += t_y[i_frame]
data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M)
return data_numpy
def random_shift(data_numpy):
C, T, V, M = data_numpy.shape
data_shift = np.zeros(data_numpy.shape)
valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
begin = valid_frame.argmax()
end = len(valid_frame) - valid_frame[::-1].argmax()
size = end - begin
bias = random.randint(0, T - size)
data_shift[:, bias:bias + size, :, :] = data_numpy[:, begin:end, :, :]
return data_shift
def _rot(rot):
"""
rot: T,3
"""
cos_r, sin_r = rot.cos(), rot.sin() # T,3
zeros = torch.zeros(rot.shape[0], 1) # T,1
ones = torch.ones(rot.shape[0], 1) # T,1
r1 = torch.stack((ones, zeros, zeros),dim=-1) # T,1,3
rx2 = torch.stack((zeros, cos_r[:,0:1], sin_r[:,0:1]), dim = -1) # T,1,3
rx3 = torch.stack((zeros, -sin_r[:,0:1], cos_r[:,0:1]), dim = -1) # T,1,3
rx = torch.cat((r1, rx2, rx3), dim = 1) # T,3,3
ry1 = torch.stack((cos_r[:,1:2], zeros, -sin_r[:,1:2]), dim =-1)
r2 = torch.stack((zeros, ones, zeros),dim=-1)
ry3 = torch.stack((sin_r[:,1:2], zeros, cos_r[:,1:2]), dim =-1)
ry = torch.cat((ry1, r2, ry3), dim = 1)
rz1 = torch.stack((cos_r[:,2:3], sin_r[:,2:3], zeros), dim =-1)
r3 = torch.stack((zeros, zeros, ones),dim=-1)
rz2 = torch.stack((-sin_r[:,2:3], cos_r[:,2:3],zeros), dim =-1)
rz = torch.cat((rz1, rz2, r3), dim = 1)
rot = rz.matmul(ry).matmul(rx)
return rot
def random_rot(data_numpy, theta=0.3):
"""
data_numpy: C,T,V,M
"""
data_torch = torch.from_numpy(data_numpy)
C, T, V, M = data_torch.shape
data_torch = data_torch.permute(1, 0, 2, 3).contiguous().view(T, C, V*M) # T,3,V*M
rot = torch.zeros(3).uniform_(-theta, theta)
rot = torch.stack([rot, ] * T, dim=0)
rot = _rot(rot) # T,3,3
data_torch = torch.matmul(rot, data_torch)
data_torch = data_torch.view(T, C, V, M).permute(1, 0, 2, 3).contiguous()
return data_torch
def openpose_match(data_numpy):
C, T, V, M = data_numpy.shape
assert (C == 3)
score = data_numpy[2, :, :, :].sum(axis=1)
# the rank of body confidence in each frame (shape: T-1, M)
rank = (-score[0:T - 1]).argsort(axis=1).reshape(T - 1, M)
# data of frame 1
xy1 = data_numpy[0:2, 0:T - 1, :, :].reshape(2, T - 1, V, M, 1)
# data of frame 2
xy2 = data_numpy[0:2, 1:T, :, :].reshape(2, T - 1, V, 1, M)
# square of distance between frame 1&2 (shape: T-1, M, M)
distance = ((xy2 - xy1) ** 2).sum(axis=2).sum(axis=0)
# match pose
forward_map = np.zeros((T, M), dtype=int) - 1
forward_map[0] = range(M)
for m in range(M):
choose = (rank == m)
forward = distance[choose].argmin(axis=1)
for t in range(T - 1):
distance[t, :, forward[t]] = np.inf
forward_map[1:][choose] = forward
assert (np.all(forward_map >= 0))
# string data
for t in range(T - 1):
forward_map[t + 1] = forward_map[t + 1][forward_map[t]]
# generate data
new_data_numpy = np.zeros(data_numpy.shape)
for t in range(T):
new_data_numpy[:, t, :, :] = data_numpy[:, t, :, forward_map[
t]].transpose(1, 2, 0)
data_numpy = new_data_numpy
# score sort
trace_score = data_numpy[2, :, :, :].sum(axis=1).sum(axis=0)
rank = (-trace_score).argsort()
data_numpy = data_numpy[:, :, :, rank]
return data_numpy
| 8,189 | 33.851064 | 150 | py |
SkeletonGCL | SkeletonGCL-main/feeders/feeder_ntu.py | import numpy as np
import torch
from torch.utils.data import Dataset
from feeders import tools
class Feeder(Dataset):
def __init__(self, data_path, label_path=None, p_interval=1, split='train', random_choose=False, random_shift=False,
random_move=False, random_rot=False, window_size=-1, normalization=False, debug=False, use_mmap=False,
bone=False, vel=False):
"""
:param data_path:
:param label_path:
:param split: training set or test set
:param random_choose: If true, randomly choose a portion of the input sequence
:param random_shift: If true, randomly pad zeros at the begining or end of sequence
:param random_move:
:param random_rot: rotate skeleton around xyz axis
:param window_size: The length of the output sequence
:param normalization: If true, normalize input sequence
:param debug: If true, only use the first 100 samples
:param use_mmap: If true, use mmap mode to load data, which can save the running memory
:param bone: use bone modality or not
:param vel: use motion modality or not
:param only_label: only load label for ensemble score compute
"""
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.split = split
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.normalization = normalization
self.use_mmap = use_mmap
self.p_interval = p_interval
self.random_rot = random_rot
self.bone = bone
self.vel = vel
self.load_data()
if normalization:
self.get_mean_map()
def load_data(self):
# data: N C V T M
npz_data = np.load(self.data_path)
if self.split == 'train':
self.data = npz_data['x_train']
self.label = np.where(npz_data['y_train'] > 0)[1]
self.sample_name = ['train_' + str(i) for i in range(len(self.data))]
elif self.split == 'test':
self.data = npz_data['x_test']
self.label = np.where(npz_data['y_test'] > 0)[1]
self.sample_name = ['test_' + str(i) for i in range(len(self.data))]
else:
raise NotImplementedError('data split only supports train/test')
N, T, _ = self.data.shape
self.data = self.data.reshape((N, T, 2, 25, 3)).transpose(0, 4, 1, 3, 2)
def get_mean_map(self):
data = self.data
N, C, T, V, M = data.shape
self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0)
self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))
def __len__(self):
return len(self.label)
def __iter__(self):
return self
def __getitem__(self, index):
data_numpy = self.data[index]
label = self.label[index]
data_numpy = np.array(data_numpy)
valid_frame_num = np.sum(data_numpy.sum(0).sum(-1).sum(-1) != 0)
# reshape Tx(MVC) to CTVM
data_numpy = tools.valid_crop_resize(data_numpy, valid_frame_num, self.p_interval, self.window_size)
if self.random_rot:
data_numpy = tools.random_rot(data_numpy)
if self.bone:
from .bone_pairs import ntu_pairs
bone_data_numpy = np.zeros_like(data_numpy)
for v1, v2 in ntu_pairs:
bone_data_numpy[:, :, v1 - 1] = data_numpy[:, :, v1 - 1] - data_numpy[:, :, v2 - 1]
data_numpy = bone_data_numpy
if self.vel:
data_numpy[:, :-1] = data_numpy[:, 1:] - data_numpy[:, :-1]
data_numpy[:, -1] = 0
return data_numpy, label, index
# data_numpy_list = []
# for i in range(2):
# # reshape Tx(MVC) to CTVM
# data_numpy_ = tools.valid_crop_resize(data_numpy, valid_frame_num, self.p_interval, self.window_size)
# if self.random_rot:
# data_numpy_ = tools.random_rot(data_numpy_)
# if self.bone:
# from .bone_pairs import ntu_pairs
# bone_data_numpy = np.zeros_like(data_numpy_)
# for v1, v2 in ntu_pairs:
# bone_data_numpy[:, :, v1 - 1] = data_numpy_[:, :, v1 - 1] - data_numpy_[:, :, v2 - 1]
# data_numpy_ = bone_data_numpy
# if self.vel:
# data_numpy_[:, :-1] = data_numpy_[:, 1:] - data_numpy_[:, :-1]
# data_numpy_[:, -1] = 0
# data_numpy_list.append(data_numpy_)
# if self.split == 'train':
# data_numpy = torch.stack(data_numpy_list, 0) # 2, C, T, V, M
# else:
# data_numpy = data_numpy_list[0]
# return data_numpy, label, index
def top_k(self, score, top_k):
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | 5,311 | 41.496 | 120 | py |
STDEN | STDEN-main/stden_train.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.stden_supervisor import STDENSupervisor
import numpy as np
import torch
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
adj_mx = load_graph_data(graph_pkl_filename)
supervisor = STDENSupervisor(adj_mx=adj_mx, **supervisor_config)
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('-r', '--random_seed', type=int, default=2021, help="Random seed for reproduction.")
args = parser.parse_args()
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
main(args)
| 1,156 | 29.447368 | 108 | py |
STDEN | STDEN-main/stden_eval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.stden_supervisor import STDENSupervisor
import numpy as np
import torch
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
adj_mx = load_graph_data(graph_pkl_filename)
supervisor = STDENSupervisor(adj_mx=adj_mx, **supervisor_config)
horizon = supervisor_config['model'].get('horizon')
extract_latent = supervisor_config['model'].get('save_latent')
supervisor.eval_more(dataset='test',
save=args.save_pred,
seq_len=np.arange(1, horizon+1, 1),
extract_latent=extract_latent)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('-r', '--random_seed', type=int, default=2021, help="Random seed for reproduction.")
parser.add_argument('--save_pred', action='store_true', help='Save the prediction.')
args = parser.parse_args()
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
main(args)
| 1,577 | 34.863636 | 108 | py |
STDEN | STDEN-main/model/diffeq_solver.py | import torch
import torch.nn as nn
import time
from torchdiffeq import odeint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DiffeqSolver(nn.Module):
def __init__(self, odefunc, method, latent_dim,
odeint_rtol = 1e-4, odeint_atol = 1e-5):
nn.Module.__init__(self)
self.ode_method = method
self.odefunc = odefunc
self.latent_dim = latent_dim
self.rtol = odeint_rtol
self.atol = odeint_atol
def forward(self, first_point, time_steps_to_pred):
"""
Decoder the trajectory through the ODE Solver.
:param time_steps_to_pred: horizon
:param first_point: (n_traj_samples, batch_size, num_nodes * latent_dim)
:return: pred_y: # shape (horizon, n_traj_samples, batch_size, self.num_nodes * self.output_dim)
"""
n_traj_samples, batch_size = first_point.size()[0], first_point.size()[1]
first_point = first_point.reshape(n_traj_samples * batch_size, -1) # reduce the complexity by merging dimension
# pred_y shape: (horizon, n_traj_samples * batch_size, num_nodes * latent_dim)
start_time = time.time()
self.odefunc.nfe = 0
pred_y = odeint(self.odefunc,
first_point,
time_steps_to_pred,
rtol=self.rtol,
atol=self.atol,
method=self.ode_method)
time_fe = time.time() - start_time
# pred_y shape: (horizon, n_traj_samples, batch_size, num_nodes * latent_dim)
pred_y = pred_y.reshape(pred_y.size()[0], n_traj_samples, batch_size, -1)
# assert(pred_y.size()[1] == n_traj_samples)
# assert(pred_y.size()[2] == batch_size)
return pred_y, (self.odefunc.nfe, time_fe)
| 1,877 | 37.326531 | 119 | py |
STDEN | STDEN-main/model/ode_func.py | import numpy as np
import torch
import torch.nn as nn
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = nn.Parameter(torch.empty(*shape, device=device))
nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = nn.Parameter(torch.empty(length, device=device))
nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class ODEFunc(nn.Module):
def __init__(self, num_units, latent_dim, adj_mx, gcn_step, num_nodes,
gen_layers=1, nonlinearity='tanh', filter_type="default"):
"""
:param num_units: dimensionality of the hidden layers
:param latent_dim: dimensionality used for ODE (input and output). Analog of a continous latent state
:param adj_mx:
:param gcn_step:
:param num_nodes:
:param gen_layers: hidden layers in each ode func.
:param nonlinearity:
:param filter_type: default
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super(ODEFunc, self).__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
self._num_nodes = num_nodes
self._num_units = num_units # hidden dimension
self._latent_dim = latent_dim
self._gen_layers = gen_layers
self.nfe = 0
self._filter_type = filter_type
if(self._filter_type == "unkP"):
ode_func_net = utils.create_net(latent_dim, latent_dim, n_units=num_units)
utils.init_network_weights(ode_func_net)
self.gradient_net = ode_func_net
else:
self._gcn_step = gcn_step
self._gconv_params = LayerParams(self, 'gconv')
self._supports = []
supports = []
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, t_local, y, backwards = False):
"""
Perform one step in solving ODE. Given current data point y and current time point t_local, returns gradient dy/dt at this time point
t_local: current time point
y: value at the current time point, shape (B, num_nodes * latent_dim)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * latent_dim)`.
"""
self.nfe += 1
grad = self.get_ode_gradient_nn(t_local, y)
if backwards:
grad = -grad
return grad
def get_ode_gradient_nn(self, t_local, inputs):
if(self._filter_type == "unkP"):
grad = self._fc(inputs)
elif (self._filter_type == "IncP"):
grad = - self.ode_func_net(inputs)
else: # default is diffusion process
# theta shape: (B, num_nodes * latent_dim)
theta = torch.sigmoid(self._gconv(inputs, self._latent_dim, bias_start=1.0))
grad = - theta * self.ode_func_net(inputs)
return grad
def ode_func_net(self, inputs):
c = inputs
for i in range(self._gen_layers):
c = self._gconv(c, self._num_units)
c = self._activation(c)
c = self._gconv(c, self._latent_dim)
c = self._activation(c)
return c
def _fc(self, inputs):
batch_size = inputs.size()[0]
grad = self.gradient_net(inputs.view(batch_size * self._num_nodes, self._latent_dim))
return grad.reshape(batch_size, self._num_nodes * self._latent_dim) # (batch_size, num_nodes, latent_dim)
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _gconv(self, inputs, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
# state = torch.reshape(state, (batch_size, self._num_nodes, -1))
# inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs.size(2)
x = inputs
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._gcn_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._gcn_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._gcn_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,912 | 40.644578 | 135 | py |
STDEN | STDEN-main/model/stden_supervisor.py | import os
import time
from random import SystemRandom
import numpy as np
import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
from lib import utils
from model.stden_model import STDENModel
from lib.metrics import masked_mae_loss, masked_mape_loss, masked_rmse_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class STDENSupervisor:
def __init__(self, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = utils.get_log_dir(kwargs)
self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self._logger.info('Scaler mean: {:.6f}, std {:.6f}.'.format(self.standard_scaler.mean, self.standard_scaler.std))
self.num_edges = (adj_mx > 0.).sum()
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# setup model
stden_model = STDENModel(adj_mx, self._logger, **self._model_kwargs)
self.stden_model = stden_model.cuda() if torch.cuda.is_available() else stden_model
self._logger.info("Model created")
self.experimentID = self._train_kwargs.get('load', 0)
if self.experimentID == 0:
# Make a new experiment ID
self.experimentID = int(SystemRandom().random()*100000)
self.ckpt_path = os.path.join("ckpt/", "experiment_" + str(self.experimentID))
self._epoch_num = self._train_kwargs.get('epoch', 0)
if self._epoch_num > 0:
self._logger.info('Loading model...')
self.load_model()
def save_model(self, epoch):
model_dir = self.ckpt_path
if not os.path.exists(model_dir):
os.makedirs(model_dir)
config = dict(self._kwargs)
config['model_state_dict'] = self.stden_model.state_dict()
config['epoch'] = epoch
model_path = os.path.join(model_dir, 'epo{}.tar'.format(epoch))
torch.save(config, model_path)
self._logger.info("Saved model at {}".format(epoch))
return model_path
def load_model(self):
self._setup_graph()
model_path = os.path.join(self.ckpt_path, 'epo{}.tar'.format(self._epoch_num))
assert os.path.exists(model_path), 'Weights at epoch %d not found' % self._epoch_num
checkpoint = torch.load(model_path, map_location='cpu')
self.stden_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model at {}".format(self._epoch_num))
def _setup_graph(self):
with torch.no_grad():
self.stden_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.stden_model(x)
break
def train(self, **kwargs):
self._logger.info('Model mode: train')
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.stden_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches: {}".format(num_batches))
batches_seen = num_batches * self._epoch_num
# used for nfe
c = []
res, keys = [], []
for epoch_num in range(self._epoch_num, epochs):
self.stden_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
c.clear() #nfe
for i, (x, y) in enumerate(train_iterator):
if(i >= num_batches):
break
optimizer.zero_grad()
x, y = self._prepare_data(x, y)
output, fe = self.stden_model(x, y, batches_seen)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters
optimizer = torch.optim.Adam(self.stden_model.parameters(), lr=base_lr, eps=epsilon)
loss = self._compute_loss(y, output)
self._logger.debug("FE: number - {}, time - {:.3f} s, err - {:.3f}".format(*fe, loss.item()))
c.append([*fe, loss.item()])
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1 # global step in tensorboard
loss.backward()
# gradient clipping
torch.nn.utils.clip_grad_norm_(self.stden_model.parameters(), self.max_grad_norm)
optimizer.step()
del x, y, output, loss # del make these memory no-labeled trash
torch.cuda.empty_cache() # empty_cache() recycle no-labeled trash
# used for nfe
res.append(pd.DataFrame(c, columns=['nfe', 'time', 'err']))
keys.append(epoch_num)
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
self._writer.add_scalar('training loss',
np.mean(losses),
batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
if bool(self._model_kwargs.get('nfe', False)):
res = pd.concat(res, keys=keys)
# self._logger.info("res.shape: ", res.shape)
res.index.names = ['epoch', 'iter']
filter_type = self._model_kwargs.get('filter_type', 'unknown')
atol = float(self._model_kwargs.get('odeint_atol', 1e-5))
rtol = float(self._model_kwargs.get('odeint_rtol', 1e-5))
nfe_file = os.path.join(
self._data_kwargs.get('dataset_dir', 'data'),
'nfe_{}_a{}_r{}.pkl'.format(filter_type, int(atol*1e5), int(rtol*1e5)))
res.to_pickle(nfe_file)
# res.to_csv(nfe_file)
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_edges, input_dim)
:param y: shape (batch_size, horizon, num_edges, input_dim)
:returns x shape (seq_len, batch_size, num_edges, input_dim)
y shape (horizon, batch_size, num_edges, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_edges, input_dim)
:param y: shape (horizon, batch_size, num_edges, input_dim)
:return: x: shape (seq_len, batch_size, num_edges * input_dim)
y: shape (horizon, batch_size, num_edges * output_dim)
"""
batch_size = x.size(1)
self._logger.debug("size of x {}".format(x.size()))
x = x.view(self.seq_len, batch_size, self.num_edges * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_edges * self.output_dim)
return x, y
def _compute_loss(self, y_true, y_predicted):
y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
def _compute_loss_eval(self, y_true, y_predicted):
y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true).item(), masked_mape_loss(y_predicted, y_true).item(), masked_rmse_loss(y_predicted, y_true).item()
def evaluate(self, dataset='val', batches_seen=0, save=False):
"""
Computes mae rmse mape loss and the predict if save
:return: mean L1Loss
"""
with torch.no_grad():
self.stden_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
mae_losses = []
mape_losses = []
rmse_losses = []
y_dict = None
if(save):
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output, fe = self.stden_model(x)
mae, mape, rmse = self._compute_loss_eval(y, output)
mae_losses.append(mae)
mape_losses.append(mape)
rmse_losses.append(rmse)
if(save):
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = {
'mae': np.mean(mae_losses),
'mape': np.mean(mape_losses),
'rmse': np.mean(rmse_losses)
}
self._logger.info('Evaluation: - mae - {:.4f} - mape - {:.4f} - rmse - {:.4f}'.format(mean_loss['mae'], mean_loss['mape'], mean_loss['rmse']))
self._writer.add_scalar('{} loss'.format(dataset), mean_loss['mae'], batches_seen)
if(save):
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
# self._logger.debug("y_preds shape: {}, y_truth shape {}".format(y_preds.shape, y_truths.shape))
for t in range(y_preds.shape[0]):
y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
y_truths_scaled.append(y_truth)
y_preds_scaled.append(y_pred)
y_preds_scaled = np.stack(y_preds_scaled)
y_truths_scaled = np.stack(y_truths_scaled)
y_dict = {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
# save_dir = self._data_kwargs.get('dataset_dir', 'data')
# save_path = os.path.join(save_dir, 'pred.npz')
# np.savez(save_path, prediction=y_preds_scaled, turth=y_truths_scaled)
return mean_loss['mae'], y_dict
def eval_more(self, dataset='val', save=False, seq_len=[3, 6, 9, 12], extract_latent=False):
"""
Computes mae rmse mape loss and the prediction if `save` is set True.
"""
self._logger.info('Model mode: Evaluation')
with torch.no_grad():
self.stden_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
mae_losses = []
mape_losses = []
rmse_losses = []
if(save):
y_truths = []
y_preds = []
if(extract_latent):
latents = []
# used for nfe
c = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output, fe = self.stden_model(x)
mae, mape, rmse = [], [], []
for seq in seq_len:
_mae, _mape, _rmse = self._compute_loss_eval(y[seq-1], output[seq-1])
mae.append(_mae)
mape.append(_mape)
rmse.append(_rmse)
mae_losses.append(mae)
mape_losses.append(mape)
rmse_losses.append(rmse)
c.append([*fe, np.mean(mae)])
if(save):
y_truths.append(y.cpu())
y_preds.append(output.cpu())
if(extract_latent):
latents.append(self.stden_model.latent_feat.cpu())
mean_loss = {
'mae': np.mean(mae_losses, axis=0),
'mape': np.mean(mape_losses, axis=0),
'rmse': np.mean(rmse_losses, axis=0)
}
for i, seq in enumerate(seq_len):
self._logger.info('Evaluation seq {}: - mae - {:.4f} - mape - {:.4f} - rmse - {:.4f}'.format(
seq, mean_loss['mae'][i], mean_loss['mape'][i], mean_loss['rmse'][i]))
if(save):
# shape (horizon, num_sapmles, feat_dim)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds_scaled = self.standard_scaler.inverse_transform(y_preds)
y_truths_scaled = self.standard_scaler.inverse_transform(y_truths)
save_dir = self._data_kwargs.get('dataset_dir', 'data')
save_path = os.path.join(save_dir, 'pred_{}_{}.npz'.format(self.experimentID, self._epoch_num))
np.savez_compressed(save_path, prediction=y_preds_scaled, turth=y_truths_scaled)
if(extract_latent):
# concatenate on batch dimension
latents = np.concatenate(latents, axis=1)
# Shape of latents (horizon, num_samples, self.num_edges * self.output_dim)
save_dir = self._data_kwargs.get('dataset_dir', 'data')
filter_type = self._model_kwargs.get('filter_type', 'unknown')
save_path = os.path.join(save_dir, '{}_latent_{}_{}.npz'.format(filter_type, self.experimentID, self._epoch_num))
np.savez_compressed(save_path, latent=latents)
if bool(self._model_kwargs.get('nfe', False)):
res = pd.DataFrame(c, columns=['nfe', 'time', 'err'])
res.index.name = 'iter'
filter_type = self._model_kwargs.get('filter_type', 'unknown')
atol = float(self._model_kwargs.get('odeint_atol', 1e-5))
rtol = float(self._model_kwargs.get('odeint_rtol', 1e-5))
nfe_file = os.path.join(
self._data_kwargs.get('dataset_dir', 'data'),
'nfe_{}_a{}_r{}.pkl'.format(filter_type, int(atol*1e5), int(rtol*1e5)))
res.to_pickle(nfe_file)
| 17,713 | 41.684337 | 154 | py |
STDEN | STDEN-main/model/stden_model.py | import time
import torch
import torch.nn as nn
from torch.nn.modules.rnn import GRU
from model.ode_func import ODEFunc
from model.diffeq_solver import DiffeqSolver
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class EncoderAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.num_nodes = adj_mx.shape[0]
self.num_edges = (adj_mx > 0.).sum()
self.gcn_step = int(model_kwargs.get('gcn_step', 2))
self.filter_type = model_kwargs.get('filter_type', 'default')
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.latent_dim = int(model_kwargs.get('latent_dim', 4))
class STDENModel(nn.Module, EncoderAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
nn.Module.__init__(self)
EncoderAttrs.__init__(self, adj_mx, **model_kwargs)
self._logger = logger
####################################################
# recognition net
####################################################
self.encoder_z0 = Encoder_z0_RNN(adj_mx, **model_kwargs)
####################################################
# ode solver
####################################################
self.n_traj_samples = int(model_kwargs.get('n_traj_samples', 1))
self.ode_method = model_kwargs.get('ode_method', 'dopri5')
self.atol = float(model_kwargs.get('odeint_atol', 1e-4))
self.rtol = float(model_kwargs.get('odeint_rtol', 1e-3))
self.num_gen_layer = int(model_kwargs.get('gen_layers', 1))
self.ode_gen_dim = int(model_kwargs.get('gen_dim', 64))
ode_set_str = "ODE setting --latent {} --samples {} --method {} \
--atol {:6f} --rtol {:6f} --gen_layer {} --gen_dim {}".format(\
self.latent_dim, self.n_traj_samples, self.ode_method, \
self.atol, self.rtol, self.num_gen_layer, self.ode_gen_dim)
odefunc = ODEFunc(self.ode_gen_dim, # hidden dimension
self.latent_dim,
adj_mx,
self.gcn_step,
self.num_nodes,
filter_type=self.filter_type
).to(device)
self.diffeq_solver = DiffeqSolver(odefunc,
self.ode_method,
self.latent_dim,
odeint_rtol=self.rtol,
odeint_atol=self.atol
)
self._logger.info(ode_set_str)
self.save_latent = bool(model_kwargs.get('save_latent', False))
self.latent_feat = None # used to extract the latent feature
####################################################
# decoder
####################################################
self.horizon = int(model_kwargs.get('horizon', 1))
self.out_feat = int(model_kwargs.get('output_dim', 1))
self.decoder = Decoder(
self.out_feat,
adj_mx,
self.num_nodes,
self.num_edges,
).to(device)
##########################################
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_edges * input_dim)
:param labels: shape (horizon, batch_size, num_edges * output_dim)
:param batches_seen: batches seen till now
:return: outputs: (self.horizon, batch_size, self.num_edges * self.output_dim)
"""
perf_time = time.time()
# shape: [1, batch, num_nodes * latent_dim]
first_point_mu, first_point_std = self.encoder_z0(inputs)
self._logger.debug("Recognition complete with {:.1f}s".format(time.time() - perf_time))
# sample 'n_traj_samples' trajectory
perf_time = time.time()
means_z0 = first_point_mu.repeat(self.n_traj_samples, 1, 1)
sigma_z0 = first_point_std.repeat(self.n_traj_samples, 1, 1)
first_point_enc = utils.sample_standard_gaussian(means_z0, sigma_z0)
time_steps_to_predict = torch.arange(start=0, end=self.horizon, step=1).float().to(device)
time_steps_to_predict = time_steps_to_predict / len(time_steps_to_predict)
# Shape of sol_ys (horizon, n_traj_samples, batch_size, self.num_nodes * self.latent_dim)
sol_ys, fe = self.diffeq_solver(first_point_enc, time_steps_to_predict)
self._logger.debug("ODE solver complete with {:.1f}s".format(time.time() - perf_time))
if(self.save_latent):
# Shape of latent_feat (horizon, batch_size, self.num_nodes * self.latent_dim)
self.latent_feat = torch.mean(sol_ys.detach(), axis=1)
perf_time = time.time()
outputs = self.decoder(sol_ys)
self._logger.debug("Decoder complete with {:.1f}s".format(time.time() - perf_time))
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs, fe
class Encoder_z0_RNN(nn.Module, EncoderAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
EncoderAttrs.__init__(self, adj_mx, **model_kwargs)
self.recg_type = model_kwargs.get('recg_type', 'gru') # gru
if(self.recg_type == 'gru'):
# gru settings
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.gru_rnn = GRU(self.input_dim, self.rnn_units).to(device)
else:
raise NotImplementedError("The recognition net only support 'gru'.")
# hidden to z0 settings
self.inv_grad = utils.graph_grad(adj_mx).transpose(-2, -1)
self.inv_grad[self.inv_grad != 0.] = 0.5
self.hiddens_to_z0 = nn.Sequential(
nn.Linear(self.rnn_units, 50),
nn.Tanh(),
nn.Linear(50, self.latent_dim * 2),)
utils.init_network_weights(self.hiddens_to_z0)
def forward(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_edges * input_dim)
:return: mean, std: # shape (n_samples=1, batch_size, self.latent_dim)
"""
if(self.recg_type == 'gru'):
# shape of outputs: (seq_len, batch, num_senor * rnn_units)
seq_len, batch_size = inputs.size(0), inputs.size(1)
inputs = inputs.reshape(seq_len, batch_size, self.num_edges, self.input_dim)
inputs = inputs.reshape(seq_len, batch_size * self.num_edges, self.input_dim)
outputs, _ = self.gru_rnn(inputs)
last_output = outputs[-1]
# (batch_size, num_edges, rnn_units)
last_output = torch.reshape(last_output, (batch_size, self.num_edges, -1))
last_output = torch.transpose(last_output, (-2, -1))
# (batch_size, num_nodes, rnn_units)
last_output = torch.matmul(last_output, self.inv_grad).transpose(-2, -1)
else:
raise NotImplementedError("The recognition net only support 'gru'.")
mean, std = utils.split_last_dim(self.hiddens_to_z0(last_output))
mean = mean.reshape(batch_size, -1) # (batch_size, num_nodes * latent_dim)
std = std.reshape(batch_size, -1) # (batch_size, num_nodes * latent_dim)
std = std.abs()
assert(not torch.isnan(mean).any())
assert(not torch.isnan(std).any())
return mean.unsqueeze(0), std.unsqueeze(0) # for n_sample traj
class Decoder(nn.Module):
def __init__(self, output_dim, adj_mx, num_nodes, num_edges):
super(Decoder, self).__init__()
self.num_nodes = num_nodes
self.num_edges = num_edges
self.grap_grad = utils.graph_grad(adj_mx)
self.output_dim = output_dim
def forward(self, inputs):
"""
:param inputs: (horizon, n_traj_samples, batch_size, num_nodes * latent_dim)
:return outputs: (horizon, batch_size, num_edges * output_dim), average result of n_traj_samples.
"""
assert(len(inputs.size()) == 4)
horizon, n_traj_samples, batch_size = inputs.size()[:3]
inputs = inputs.reshape(horizon, n_traj_samples, batch_size, self.num_nodes, -1).transpose(-2, -1)
latent_dim = inputs.size(-2)
# transform z with shape `(..., num_nodes)` to f with shape `(..., num_edges)`.
outputs = torch.matmul(inputs, self.grap_grad)
outputs = outputs.reshape(horizon, n_traj_samples, batch_size, latent_dim, self.num_edges, self.output_dim)
outputs = torch.mean(
torch.mean(outputs, axis=3),
axis=1
)
outputs = outputs.reshape(horizon, batch_size, -1)
return outputs
| 8,910 | 42.048309 | 115 | py |
STDEN | STDEN-main/lib/utils.py | import logging
import numpy as np
import os
import time
import scipy.sparse as sp
import sys
import torch
import torch.nn as nn
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
if shuffle:
permutation = np.random.permutation(self.size)
xs, ys = xs[permutation], ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def calculate_random_walk_matrix(adj_mx):
adj_mx = sp.coo_matrix(adj_mx)
d = np.array(adj_mx.sum(1))
d_inv = np.power(d, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
random_walk_mx = d_mat_inv.dot(adj_mx).tocoo()
return random_walk_mx
def config_logging(log_dir, log_filename='info.log', level=logging.INFO):
# Add file handler and stdout handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create the log directory if necessary.
try:
os.makedirs(log_dir)
except OSError:
pass
file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
file_handler.setFormatter(formatter)
file_handler.setLevel(level=level)
# Add console handler.
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
console_handler.setLevel(level=level)
logging.basicConfig(handlers=[file_handler, console_handler], level=level)
def get_logger(log_dir, name, log_filename='info.log', level=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(level)
# Add file handler and stdout handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
file_handler.setFormatter(formatter)
# Add console handler.
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# Add google cloud log handler
logger.info('Log directory: %s', log_dir)
return logger
def get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
filter_type = kwargs['model'].get('filter_type')
gcn_step = kwargs['model'].get('gcn_step')
horizon = kwargs['model'].get('horizon')
latent_dim = kwargs['model'].get('latent_dim')
n_traj_samples = kwargs['model'].get('n_traj_samples')
ode_method = kwargs['model'].get('ode_method')
seq_len = kwargs['model'].get('seq_len')
rnn_units = kwargs['model'].get('rnn_units')
recg_type = kwargs['model'].get('recg_type')
if filter_type == 'unkP':
filter_type_abbr = 'UP'
elif filter_type == 'IncP':
filter_type_abbr = 'NV'
else:
filter_type_abbr = 'DF'
run_id = 'STDEN_%s-%d_%s-%d_L-%d_N-%d_M-%s_bs-%d_%d-%d_%s/' % (
recg_type, rnn_units, filter_type_abbr, gcn_step, latent_dim, n_traj_samples, ode_method, batch_size, seq_len, horizon, time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('log_base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def load_dataset(dataset_dir, batch_size, val_batch_size=None, **kwargs):
if('BJ' in dataset_dir):
data = dict(np.load(os.path.join(dataset_dir, 'flow.npz'))) # convert readonly NpzFile to writable dict Object
for category in ['train', 'val', 'test']:
data['x_' + category] = data['x_' + category] #[..., :4] # ignore the time index
else:
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'].mean(), std=data['x_train'].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category] = scaler.transform(data['x_' + category])
data['y_' + category] = scaler.transform(data['y_' + category])
data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size, shuffle=True)
data['val_loader'] = DataLoader(data['x_val'], data['y_val'], val_batch_size, shuffle=False)
data['test_loader'] = DataLoader(data['x_test'], data['y_test'], val_batch_size, shuffle=False)
data['scaler'] = scaler
return data
def load_graph_data(pkl_filename):
adj_mx = np.load(pkl_filename)
return adj_mx
def graph_grad(adj_mx):
"""Fetch the graph gradient operator."""
num_nodes = adj_mx.shape[0]
num_edges = (adj_mx > 0.).sum()
grad = torch.zeros(num_nodes, num_edges)
e = 0
for i in range(num_nodes):
for j in range(num_nodes):
if adj_mx[i, j] == 0:
continue
grad[i, e] = 1.
grad[j, e] = -1.
e += 1
return grad
def init_network_weights(net, std = 0.1):
"""
Just for nn.Linear net.
"""
for m in net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, val=0)
def split_last_dim(data):
last_dim = data.size()[-1]
last_dim = last_dim//2
res = data[..., :last_dim], data[..., last_dim:]
return res
def get_device(tensor):
device = torch.device("cpu")
if tensor.is_cuda:
device = tensor.get_device()
return device
def sample_standard_gaussian(mu, sigma):
device = get_device(mu)
d = torch.distributions.normal.Normal(torch.Tensor([0.]).to(device), torch.Tensor([1.]).to(device))
r = d.sample(mu.size()).squeeze(-1)
return r * sigma.float() + mu.float()
def create_net(n_inputs, n_outputs, n_layers = 0,
n_units = 100, nonlinear = nn.Tanh):
layers = [nn.Linear(n_inputs, n_units)]
for i in range(n_layers):
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_units))
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_outputs))
return nn.Sequential(*layers) | 7,962 | 33.925439 | 160 | py |
STDEN | STDEN-main/lib/metrics.py | import torch
def masked_mae_loss(y_pred, y_true):
y_true[y_true < 1e-4] = 0
mask = (y_true != 0).float()
mask /= mask.mean() # assign the sample weights of zeros to nonzero-values
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mape_loss(y_pred, y_true):
y_true[y_true < 1e-4] = 0
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs((y_pred - y_true) / y_true)
loss = loss * mask
loss[loss != loss] = 0
return loss.mean()
def masked_rmse_loss(y_pred, y_true):
y_true[y_true < 1e-4] = 0
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.pow(y_pred - y_true, 2)
loss = loss * mask
loss[loss != loss] = 0
return torch.sqrt(loss.mean())
| 896 | 28.9 | 88 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/gaze_prediction_and_evaluation.py | """
The code for computing the saliency metrics is adapted from
https://github.com/tarunsharma1/saliency_metrics/blob/master/salience_metrics.py
"""
import os
import argparse
import time
import shutil
import math
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import functional as F
import torchvision
import numbers
import network
from bdda import BDDA
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score
parser = argparse.ArgumentParser(description='Feature Training and Test')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--best', default='', type=str, metavar='PATH', help='path to best checkpoint (default: none)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--no_train', action='store_true', default=False)
parser.add_argument('--gridheight', default=16, type=int, metavar='N',
help='number of rows in grid')
parser.add_argument('--gridwidth', default=16, type=int, metavar='N',
help='number of columns in grid ')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--traingrid', default='', type=str, metavar='PATH', help='path to txt with grid entries for training images')
parser.add_argument('--valgrid', default='', type=str, metavar='PATH', help='path to txt with grid entries for validation images')
parser.add_argument('--testgrid', default='', type=str, metavar='PATH', help='path to txt with grid entries for test images')
parser.add_argument('--yolo5bb', metavar='DIR', help='path to folder of yolo5 bounding box txt files')
parser.add_argument('--visualizations', metavar='DIR', help='path to folder for visalization of predicted gaze maps and target')
parser.add_argument('--threshhold', default=0.5, type=float, metavar='N', help='threshold for object-level evaluation')
parser.add_argument('--lstm', default=False, action='store_true', help='use lstm module')
parser.add_argument('--convlstm', default=False, action='store_true', help='use convlstm module')
parser.add_argument('--sequence', default=6, type=int, metavar='N', help='sequence length for lstm module')
def main():
args = parser.parse_args()
dim = args.gridwidth*args.gridheight
th = 1/dim
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
model = network.Net(args.gridheight, args.gridwidth)
if args.lstm:
model = network.LstmNet(args.gridheight, args.gridwidth)
if args.convlstm:
model = network.ConvLSTMNet(args.gridheight, args.gridwidth, args.sequence)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# define loss function (criterion) and optimizer
criterion = nn.BCEWithLogitsLoss().cuda(args.gpu)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
betas=(0.9, 0.999), eps=1e-08,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'], False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
if not args.no_train:
traindir = os.path.join(args.data, 'training')
valdir = os.path.join(args.data, 'validation')
train_dataset = BDDA("training", args.traingrid, traindir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
val_dataset = BDDA("validation", args.valgrid, valdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle= True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
testdir = os.path.join(args.data,'test')
test_dataset = BDDA("test", args.testgrid, testdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
best_loss = 1000000
if not args.no_train:
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
loss1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = loss1 < best_loss
best_loss = min(loss1, best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, is_best, args.best)
if args.best:
if os.path.isfile(args.best):
print("=> loading checkpoint '{}'".format(args.best))
checkpoint = torch.load(args.best)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'], False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.best, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
test(test_loader, model, criterion, args)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
if is_best:
torch.save(state, filename)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
i, len(val_loader), batch_time=batch_time, loss=losses))
return loss
def test(test_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
cc_losses = AverageMeter()
model.eval()
tp = 0
fp = 0
fn = 0
all_count = 0
hm_max_values = []
gt = []
i = 0
heightfactor = 576//args.gridheight
widthfactor = 1024//args.gridwidth
smoothing = GaussianSmoothing(1, 5, 1).cuda(args.gpu)
with torch.no_grad():
end = time.time()
for i, (input, target, gaze_gt, img_names) in enumerate(test_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
gaze_gt = gaze_gt.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
output = torch.sigmoid(output)
heatmap = grid2heatmap(output,[heightfactor,widthfactor],[args.gridheight,args.gridwidth],args)
heatmap = F.interpolate(heatmap, size=[36, 64], mode='bilinear', align_corners=False)
heatmap = smoothing(heatmap)
heatmap = F.pad(heatmap, (2, 2, 2, 2), mode='constant')
heatmap = heatmap.view(heatmap.size(0),-1)
heatmap = F.softmax(heatmap,dim=1)
# normalize
heatmap -= heatmap.min(1, keepdim=True)[0]
heatmap /= heatmap.max(1, keepdim=True)[0]
heatmap = heatmap.view(-1,1,36,64)
for j in range(heatmap.size(0)):
img_name = img_names[j]
heatmap_img = heatmap[j] # predicted gaze map
gt_img = gaze_gt[j] # original gaze map
##### compute object-level metrics
filename = os.path.join(args.yolo5bb, img_name+".txt")
if os.path.exists(filename):
with open(filename) as f:
for linestring in f:
all_count += 1
line = linestring.split()
width = float(line[3])
height = float(line[4])
x_center = float(line[1])
y_center = float(line[2])
x_min, x_max, y_min, y_max = bb_mapping(x_center, y_center, width, height)
# find maximum pixel value within object bounding box
gt_obj = gt_img[0, y_min:y_max+1, x_min:x_max+1]
gt_obj_max = torch.max(gt_obj)
heatmap_obj = heatmap_img[0, y_min:y_max+1, x_min:x_max+1]
heatmap_obj_max = torch.max(heatmap_obj)
# object is recognized if maximum pixel value is higher than th
gt_obj_recogn = gt_obj_max > 0.15
hm_obj_recogn = heatmap_obj_max > args.threshhold
hm_max_values.append(heatmap_obj_max)
if gt_obj_recogn:
gt.append(1)
else:
gt.append(0)
if (hm_obj_recogn and gt_obj_recogn):
tp +=1
elif (hm_obj_recogn and not gt_obj_recogn):
fp += 1
elif (not hm_obj_recogn and gt_obj_recogn):
fn += 1
visualization(heatmap_img.cpu(), gt_img.cpu(), args.visualizations, img_name)
kld = kl(heatmap, gaze_gt)
c = cc(heatmap,gaze_gt)
losses.update(loss.item(), input.size(0))
kld_losses.update(kld, input.size(0))
cc_losses.update(c, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
precision = tp/(tp+fp)
recall = tp/(tp+fn)
tn = all_count-tp-fp-fn
acc = (tp+tn)/all_count
f1 = 2*precision*recall/(precision+recall)
print('Object-level results:')
print('tp:', tp, 'fp:', fp, 'tn:', tn, 'fn:', fn, 'sum:', all_count)
print('prec:', precision, 'recall:', recall, 'f1', f1, 'acc', acc)
print('AUC:', roc_auc_score(gt, hm_max_values))
def bb_mapping(x_center_rel, y_center_rel, width_rel, height_rel, img_width = 64, img_height = 36):
"""
Compute absolute bounding boxes values for given image size and given relative parameters
:param x_center_rel: relative x value of bb center
:param y_center_rel: relative y value of bb center
:param width_rel: relative width
:param height_rel: relative height
:return: absolute values of bb borders
"""
width_abs = width_rel*img_width
height_abs = height_rel*img_height
x_center_abs = x_center_rel*img_width
y_center_abs = y_center_rel*img_height
x_min = int(math.floor(x_center_abs - 0.5 * width_abs))
x_max = int(math.floor(x_center_abs + 0.5 * width_abs))
y_min = int(math.floor(y_center_abs - 0.5 * height_abs))
y_max = int(math.floor(y_center_abs + 0.5 * height_abs))
bb = [x if x>=0 else 0 for x in [x_min, x_max, y_min, y_max]]
return bb
def grid2heatmap(grid, size, num_grid, args):
"""
Rearrange and expand gridvector of size (gridheight*gridwidth) to size (576 x 1024) by duplicating values
:param grid: output vector
:param size: (H,W) of one expanded grid cell
:param num_grids: (H,W) = grid dimension
:param args: parser arguments
:return: 2D grid of size (576 x 1024)
"""
new_heatmap = torch.zeros(grid.size(0),size[0]*num_grid[0],size[1]*num_grid[1])
for i, item in enumerate(grid):
idx = torch.nonzero(item)
if idx.nelement() == 0:
print('Empty')
continue
for x in idx:
test = new_heatmap[i,x//num_grid[1]*size[0]:(x//num_grid[1]+1)*size[0],x%num_grid[1]*size[1]:(x%num_grid[1]+1)*size[1]]
new_heatmap[i,x//num_grid[1]*size[0]:(x//num_grid[1]+1)*size[0],x%num_grid[1]*size[1]:(x%num_grid[1]+1)*size[1]] = item[x]
output = new_heatmap.unsqueeze(1).cuda(args.gpu)
return output
def cc(s_map_all,gt_all):
eps = 1e-07
bs = s_map_all.size()[0]
r = 0
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map_norm = (s_map - torch.mean(s_map))/(eps + torch.std(s_map))
gt_norm = (gt - torch.mean(gt))/(eps + torch.std(gt))
a = s_map_norm.cpu()
b = gt_norm.cpu()
r += torch.sum(a*b) / (torch.sqrt(torch.sum(a*a) * torch.sum(b*b))+eps)
return r/bs
def kl(s_map_all, gt_all):
dims = len(s_map_all.size())
bs = s_map_all.size()[0]
eps = torch.tensor(1e-07)
kl = 0
if dims > 3:
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map = s_map/(torch.sum(s_map)*1.0 + eps)
gt = gt/(torch.sum(gt)*1.0 + eps)
gt = gt.to('cpu')
s_map = s_map.to('cpu')
kl += torch.sum(gt * torch.log(eps + gt/(s_map + eps)))
return kl/bs
def normalizeData(data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def visualization(heatmap, gt, path, nr):
heatmap = torchvision.transforms.functional.to_pil_image(heatmap)
gt = torchvision.transforms.functional.to_pil_image(gt)
heatmap.save(os.path.join(path, '%s_pred.png'%nr))
gt.save(os.path.join(path, '%s_gt.png'%nr))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def kullback_leibler_divergence(y_true, y_pred, eps=1e-7):
"""
Kullback-Leiber divergence (sec 4.2.3 of [1]). Assumes shape (b, 1, h, w) for all tensors.
:param y_true: groundtruth.
:param y_pred: prediction.
:param eps: regularization epsilon.
:return: loss value (one symbolic value per batch element).
"""
P = y_pred
P = P / (eps + torch.sum(P, dim=[1, 2, 3], keepdim=True))
Q = y_true
Q = Q / (eps + torch.sum(Q, dim=[1, 2, 3], keepdim=True))
kld = torch.sum(Q * torch.log(eps + Q/(eps + P)), dim=[1, 2, 3])
return kld
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / (2 * std)) ** 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, groups=self.groups)
if __name__ == '__main__':
main()
| 22,026 | 36.717466 | 134 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/extract_features.py | """
The following code is adapted from the file detect.py of https://github.com/ultralytics/yolov5 (Release 5.0)
"""
import os
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
save_output = SaveOutput()
hook_handles = []
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
def detect(opt):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://', 'https://'))
# Directories
save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
names = model.module.names if hasattr(model, 'module') else model.names # get class names
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride)
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
for path, img, im0s, vid_cap in dataset:
count = 0
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
# prepare feature extraction
model.model[22].register_forward_hook(get_activation('after22')) # 22 is before last BottleneckCSP
pred = model(img, augment=opt.augment)[0]
# save extracted features
imagename = (path.split('/')[-1]).split('.')[0]
tensor = activation['after22'].data.cpu()
torch.save(tensor, os.path.join(opt.features, imagename +'.pt'))
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms,
max_det=opt.max_det)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
print('s', s)
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or opt.save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}')
plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness)
if opt.save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path += '.mp4'
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--features', metavar='DIR', help='path to folder where to save features')
opt = parser.parse_args()
print(opt)
check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect(opt=opt)
strip_optimizer(opt.weights)
else:
detect(opt=opt)
| 10,476 | 45.358407 | 118 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/network.py | """
The convolutional LSTM is adapted from
https://github.com/yaorong0921/Driver-Intention-Prediction/blob/master/models/convolution_lstm.py
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self, gridwidth, gridheight):
super().__init__()
self.conv1 = nn.Conv2d(512, 16, (1, 1), stride=(1, 1))
self.pool = nn.AdaptiveAvgPool2d((6,10))
self.fc3 = nn.Linear(960, gridheight*gridwidth)
def forward(self, x):
x = torch.squeeze(x)
x = x.float()
x = self.conv1(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.fc3(x)
return x
class LstmNet(nn.Module):
def __init__(self, gridwidth, gridheight):
super().__init__()
self.conv1 = nn.Conv2d(512, 16, (1, 1), stride=(1, 1))
self.pool = nn.AdaptiveAvgPool2d((6,10))
self.lstm = nn.LSTM(
input_size=16*6*10,
hidden_size=256,
num_layers=1,
batch_first=True)
self.fc3 = nn.Linear(256, gridheight*gridwidth)
def forward(self, x):
x = torch.squeeze(x)
x = x.float()
batch_size, timesteps, C, H, W = x.size()
c_in = x.view(batch_size * timesteps, C, H, W)
c_out = self.pool(self.conv1(c_in))
r_in = c_out.view(batch_size, timesteps, -1)
r_out, (h_n, h_c) = self.lstm(r_in)
return self.fc3(r_out[:, -1, :])
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size):
super(ConvLSTMCell, self).__init__()
assert hidden_channels % 2 == 0
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_features = 4
self.padding = int((kernel_size - 1) / 2)
self.Wxi = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whi = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxf = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whf = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxc = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whc = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxo = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Who = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wci = None
self.Wcf = None
self.Wco = None
def forward(self, x, h, c):
ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)
cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)
cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))
co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)
ch = co * torch.tanh(cc)
return ch, cc
def init_hidden(self, batch_size, hidden, shape):
if self.Wci is None:
self.Wci = Variable(torch.zeros(1, hidden, shape[0], shape[1]))
self.Wcf = Variable(torch.zeros(1, hidden, shape[0], shape[1]))
self.Wco = Variable(torch.zeros(1, hidden, shape[0], shape[1]))
else:
assert shape[0] == self.Wci.size()[2], 'Input Height Mismatched!'
assert shape[1] == self.Wci.size()[3], 'Input Width Mismatched!'
return (Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])),
Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])))
class ConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
def __init__(self, input_channels, hidden_channels, kernel_size, step=1, effective_step=[1]):
super(ConvLSTM, self).__init__()
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self.effective_step = effective_step
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size)
setattr(self, name, cell)
self._all_layers.append(cell)
def forward(self, input):
internal_state = []
outputs = []
for step in range(self.step):
x = input
for i in range(self.num_layers):
# all cells are initialized in the first step
name = 'cell{}'.format(i)
if step == 0:
bsize, _, height, width = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],
shape=(height, width))
internal_state.append((h, c))
# do forward
(h, c) = internal_state[i]
x, new_c = getattr(self, name)(x, h, c)
internal_state[i] = (x, new_c)
# only record effective steps
if step in self.effective_step:
outputs.append(x)
return outputs, (x, new_c)
class ConvLSTMNet(nn.Module):
def __init__(self, gridheight, gridwidth, seqlen):
super().__init__()
self.conv1 = nn.Conv2d(512, 128, (1, 1), stride=(1, 1))
self.pool = nn.AdaptiveAvgPool2d((6,10))
self.convlstm = ConvLSTM(input_channels=128, hidden_channels=[16], kernel_size=3, step=seqlen,
effective_step=[seqlen-1])
self.fc3 = nn.Linear(960, gridheight*gridwidth)
def forward(self, x):
x = torch.squeeze(x)
x = x.float()
batch_size, timesteps, C, H, W = x.size()
c_in = x.view(batch_size * timesteps, C, H, W)
c_out = self.conv1(c_in)
c_out = self.pool(c_out)
output_convlstm, _ = self.convlstm(c_out)
x = output_convlstm[0]
x = x.view(batch_size, timesteps, -1)
x = self.fc3(x[:, -1, :])
return x
| 6,594 | 38.491018 | 119 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/bdda.py | import os
import numpy as np
import math
import torch
from torch.utils.data import Dataset
import cv2
from utils.utils import *
import torchvision
from PIL import Image
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def img_id(self):
return (self._data[0]) # image index starts with 1
@ property
def grids(self):
grid=[]
for item in self._data[1:]:
grid.append(float(item))
return grid
class BDDA(Dataset):
"""
BDDA feature class.
"""
def __init__(self, subset, file, feature_path, threshold, gazemap_path, lstm, seqlen):
"""
Args:
"""
self.subset = subset
self.file = file
self.feature_path = feature_path
self.gazemap_path = gazemap_path
self.threshold = threshold
self.mean = torch.zeros(1024)
self.std = torch.ones(1024)
self.lstm = lstm
self.seqlen = seqlen
self._parse_list()
self.transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
def _parse_list(self):
self.img_list = []
tmp = [x.strip().split(',') for x in open(self.file)]
img_list = [VideoRecord(item) for item in tmp]
if self.lstm:
self.img_dict = {}
clips = list(set([x.split('_')[0] for x in open(self.file)]))
for clip in clips:
self.img_dict[clip] = []
for item in img_list:
img_name = item.img_id.split('.')[0]
feature_name = img_name + ".pt"
clip = item.img_id.split('.')[0].split('_')[0]
img_nr = item.img_id.split('.')[0].split('_')[1]
grid = item.grids
feature_path = os.path.join(self.feature_path,feature_name)
if os.path.exists(feature_path) and not all(math.isnan(y) for y in grid):
self.img_list.append(item)
self.img_dict[clip].append(img_nr)
else:
print('error loading feature:', feature_path)
for key in self.img_dict:
self.img_dict[key].sort()
print('video number in %s: %d'%(self.subset,(len(self.img_list))))
else:
for item in img_list:
img_name = item.img_id.split('.')[0]
feature_name = img_name + ".pt"
grid = item.grids
feature_path = os.path.join(self.feature_path,feature_name)
if os.path.exists(feature_path) and not all(math.isnan(y) for y in grid):
self.img_list.append(item)
else:
print('error loading feature:', feature_path)
print('video number in %s: %d'%(self.subset,(len(self.img_list))))
def _normalizeData(self, data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
"""
"""
if self.lstm:
record = self.img_list[index]
img_name = record.img_id.split('.')[0]
feature_name = img_name + ".pt"
clip = record.img_id.split('.')[0].split('_')[0]
img_nr = record.img_id.split('.')[0].split('_')[1]
dict_idx = self.img_dict[clip].index(img_nr)
feature_path = os.path.join(self.feature_path,feature_name)
feature = torch.load(feature_path)
# create list with previous features, last one is original
feature_list = []
first = dict_idx-(self.seqlen-1)
duplicate = 0
if first < 0:
duplicate = abs(first) # if there are not enough previous features, we duplicate original to get seqlen
first = 0
for idx in range(first, dict_idx+1):
feature_name2 = clip+'_'+self.img_dict[clip][idx]+ ".pt"
feature_path2 = os.path.join(self.feature_path,feature_name2)
feature2 = torch.load(feature_path2)
feature_list.append(feature2)
if duplicate:
for i in range(duplicate):
feature_list.append(feature)
feature = torch.stack(feature_list)
else:
record = self.img_list[index]
img_name = record.img_id.split('.')[0]
feature_name = img_name + ".pt"
feature_path = os.path.join(self.feature_path,feature_name)
feature = torch.load(feature_path)
if self.subset == 'training':
feature = feature + torch.randn(512,12,20)
# set grid values <= 1/gridsize to 0, others to 1
grid = np.array(record.grids)
grid[grid>self.threshold] = 1.0
grid[grid<=self.threshold] = 0.0
grid = grid.astype(np.float32)
if self.subset == 'test':
name = record.img_id.split('_')
gaze_file = name[0] + '_pure_hm_' + name[1]
gaze_gt = Image.open(os.path.join(self.gazemap_path, gaze_file)).convert('L').crop((0,96,1024,672)) #left,top,right,bottom
gaze_gt = self.transform(gaze_gt)
gaze_gt = self._normalizeData(gaze_gt)
return feature, grid, gaze_gt, img_name
else:
return feature, grid
| 5,490 | 31.684524 | 134 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/More files/evaluation_otherModel.py | import os
import argparse
import time
import shutil
import math
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import functional as F
import torchvision
import numbers
import network
from bdda_otherModels import BDDA
import numpy as np
from PIL import Image
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score
parser = argparse.ArgumentParser(description='Evalutaion of given Predictions')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--yolo5bb', metavar='DIR', help='path to folder of yolo5 bounding box txt files')
parser.add_argument('--predictions', metavar='DIR', help='path to predicted gaze maps folder')
parser.add_argument('--visualizations', metavar='DIR', help='path to folder for visalization of predicted gaze maps and target')
parser.add_argument('--threshhold', default=0.5, type=float, metavar='N', help='threshold for object-level evaluation')
def main():
args = parser.parse_args()
dim = 256
th = 1/dim
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
testdir = os.path.join(args.data,'test')
test_dataset = BDDA("test", testdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
test(test_loader, args)
def test(test_loader, args):
batch_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
cc_losses = AverageMeter()
tp = 0
fp = 0
fn = 0
all_count = 0
hm_max_values = []
gt = []
i = 0
transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
with torch.no_grad():
end = time.time()
for i, (gaze_gt, img_names) in enumerate(test_loader):
if args.gpu is not None:
gaze_gt = gaze_gt.cuda(args.gpu, non_blocking=True)
first = True
for img in img_names:
heatfile = img+ '.jpg'
heatmap = Image.open(os.path.join(args.predictions ,heatfile))#.convert('L')#.crop((0,96,1024,672)) #left,top,right,bottom
heatmap = transform(heatmap)
heatmap = normalizeData(heatmap)
if first:
heatmap_batch = heatmap[None]
first = False
else:
heatmap_batch = torch.cat((heatmap_batch, heatmap[None]), 0)
heatmap = heatmap_batch
for j in range(heatmap.size(0)):
img_name = img_names[j]
heatmap_img = heatmap[j] # predicted gaze map
gt_img = gaze_gt[j] # original gaze map
##### compute object-level metrics
filename = os.path.join(args.yolo5bb, img_name+".txt")
if os.path.exists(filename):
with open(filename) as f:
for linestring in f:
all_count += 1
line = linestring.split()
width = float(line[3])
height = float(line[4])
x_center = float(line[1])
y_center = float(line[2])
x_min, x_max, y_min, y_max = bb_mapping(x_center, y_center, width, height)
# find maximum pixel value within object bounding box
gt_obj = gt_img[0, y_min:y_max+1, x_min:x_max+1]
gt_obj_max = torch.max(gt_obj)
heatmap_obj = heatmap_img[0, y_min:y_max+1, x_min:x_max+1]
heatmap_obj_max = torch.max(heatmap_obj)
print(heatmap_obj_max)
# object is recognized if maximum pixel value is higher than th
gt_obj_recogn = gt_obj_max > 0.15
hm_obj_recogn = heatmap_obj_max > args.threshhold
hm_max_values.append(heatmap_obj_max)
if gt_obj_recogn:
gt.append(1)
else:
gt.append(0)
if (hm_obj_recogn and gt_obj_recogn):
tp +=1
elif (hm_obj_recogn and not gt_obj_recogn):
fp += 1
elif (not hm_obj_recogn and gt_obj_recogn):
fn += 1
visualization(heatmap_img.cpu(), gt_img.cpu(), args.visualizations, img_name)
kld = kl(heatmap, gaze_gt)
c = cc(heatmap,gaze_gt)
kld_losses.update(kld, input.size(0))
cc_losses.update(c, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
precision = tp/(tp+fp)
recall = tp/(tp+fn)
tn = all_count-tp-fp-fn
acc = (tp+tn)/all_count
f1 = 2*precision*recall/(precision+recall)
print('Object-level results:')
print('tp:', tp, 'fp:', fp, 'tn:', tn, 'fn:', fn, 'sum:', all_count)
print('prec:', precision, 'recall:', recall, 'f1', f1, 'acc', acc)
print('AUC:', roc_auc_score(gt, hm_max_values))
def bb_mapping(x_center_rel, y_center_rel, width_rel, height_rel, img_width = 64, img_height = 36):
"""
Compute absolute bounding boxes values for given image size and given relative parameters
:param x_center_rel: relative x value of bb center
:param y_center_rel: relative y value of bb center
:param width_rel: relative width
:param height_rel: relative height
:return: absolute values of bb borders
"""
width_abs = width_rel*img_width
height_abs = height_rel*img_height
x_center_abs = x_center_rel*img_width
y_center_abs = y_center_rel*img_height
x_min = int(math.floor(x_center_abs - 0.5 * width_abs))
x_max = int(math.floor(x_center_abs + 0.5 * width_abs))
y_min = int(math.floor(y_center_abs - 0.5 * height_abs))
y_max = int(math.floor(y_center_abs + 0.5 * height_abs))
bb = [x if x>=0 else 0 for x in [x_min, x_max, y_min, y_max]]
return bb
def cc(s_map_all,gt_all):
eps = 1e-07
bs = s_map_all.size()[0]
r = 0
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map_norm = (s_map - torch.mean(s_map))/(eps + torch.std(s_map))
gt_norm = (gt - torch.mean(gt))/(eps + torch.std(gt))
a = s_map_norm.cpu()
b = gt_norm.cpu()
r += torch.sum(a*b) / (torch.sqrt(torch.sum(a*a) * torch.sum(b*b))+eps)
return r/bs
def kl(s_map_all, gt_all):
dims = len(s_map_all.size())
bs = s_map_all.size()[0]
eps = torch.tensor(1e-07)
kl = 0
if dims > 3:
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map = s_map/(torch.sum(s_map)*1.0 + eps)
gt = gt/(torch.sum(gt)*1.0 + eps)
gt = gt.to('cpu')
s_map = s_map.to('cpu')
kl += torch.sum(gt * torch.log(eps + gt/(s_map + eps)))
return kl/bs
def normalizeData(data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def visualization(heatmap, gt, path, nr):
heatmap = torchvision.transforms.functional.to_pil_image(heatmap)
gt = torchvision.transforms.functional.to_pil_image(gt)
heatmap.save(os.path.join(path, '%s_pred.png'%nr))
gt.save(os.path.join(path, '%s_gt.png'%nr))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 9,879 | 34.285714 | 138 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/More files/compute_BDDA_baseline.py | import os
from PIL import Image
import numpy as np
import math
import argparse
import os
import numpy as np
import math
import torch
from torch.utils.data import Dataset
import cv2
from utils.utils import *
import torchvision
from PIL import Image
parser = argparse.ArgumentParser(description='Create average baseline for given gaze map images')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
def main():
transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
args = parser.parse_args()
count = 0
for root, dirs, files in os.walk(args.gazemaps):
for item in files:
gt = Image.open(os.path.join(args.gazemaps,item)).convert('L').crop((0,96,1024,672)) #left,top,right,bottom
gt = np.array(transform(gt))
gt = normalizeData(gt)
if np.isnan(np.sum(gt)):
continue
if count == 0:
sum = gt
else:
sum += gt
count += 1
if count%500 == 0:
print("Count: %d"%count)
sum = normalizeData(sum)
a_file = open("avgBaseline.txt", "w")
for row in sum:
np.savetxt(a_file, row)
a_file.close()
def normalizeData(s_map):
norm_s_map = (s_map - np.min(s_map))/((np.max(s_map)-np.min(s_map))*1.0)
return norm_s_map
if __name__ == '__main__':
main()
| 1,331 | 22.368421 | 110 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/More files/flops_counter.py | '''
Copyright (C) 2019 Sovrasov V. - All Rights Reserved
* You may use, distribute and modify this code under the
* terms of the MIT license.
* You should have received a copy of the MIT license with
* this file. If not visit https://opensource.org/licenses/MIT
'''
# this script can be used to evaluate model complexity with the following lines:
#===========================================================================================
#flops, params = get_model_complexity_info(model, (input-channel,input-height,input-width), as_strings=True, print_per_layer_stat=True)
#print('{:<30} {:<8}'.format('Computational complexity: ', flops))
#print('{:<30} {:<8}'.format('Number of parameters: ', params))
#===========================================================================================
import sys
import torch
import torch.nn as nn
import numpy as np
def get_model_complexity_info(model, input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None, ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = flops_model(batch)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
if print_per_layer_stat:
print_model_with_flops(flops_model, flops_count, params_count, ost=ost)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
else:
if units == 'M':
return str(round(params_num / 10.**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10.**3, precision)) + ' ' + units
else:
return str(params_num)
def print_model_with_flops(model, total_flops, total_params, units='GMac',
precision=3, ost=sys.stdout):
def accumulate_params(self):
return get_model_parameters_number(self)
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
if total_params == 0:
return ', '.join([params_to_string(accumulated_params_num, units='M', precision=precision),
'{:.3%} Params'.format(0),
flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
else:
return ', '.join([params_to_string(accumulated_params_num, units='M', precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters())# if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
# ---- Internal functions
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
output_last_dim = output.shape[-1] # pytorch checks dimensions, so here we don't care much
module.__flops__ += int(np.prod(input.shape) * output_last_dim)
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
input_height, input_width = input.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = kernel_height * kernel_width * in_channels * filters_per_channel
active_elements_count = batch_size * input_height * input_width
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
output_height, output_width = output.shape[2:]
bias_flops = out_channels * batch_size * output_height * output_height
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
if conv_module.__mask__ is not None:
# (b, 1, h, w)
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
pass
print('Warning! No positional inputs found for a module, assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
MODULES_MAPPING = {
# convolutions
torch.nn.Conv1d: conv_flops_counter_hook,
torch.nn.Conv2d: conv_flops_counter_hook,
torch.nn.Conv3d: conv_flops_counter_hook,
# activations
torch.nn.ReLU: relu_flops_counter_hook,
torch.nn.PReLU: relu_flops_counter_hook,
torch.nn.ELU: relu_flops_counter_hook,
torch.nn.LeakyReLU: relu_flops_counter_hook,
torch.nn.ReLU6: relu_flops_counter_hook,
torch.nn.SiLU : relu_flops_counter_hook,
# poolings
torch.nn.MaxPool1d: pool_flops_counter_hook,
torch.nn.AvgPool1d: pool_flops_counter_hook,
torch.nn.AvgPool2d: pool_flops_counter_hook,
torch.nn.MaxPool2d: pool_flops_counter_hook,
torch.nn.MaxPool3d: pool_flops_counter_hook,
torch.nn.AvgPool3d: pool_flops_counter_hook,
nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
# BNs
torch.nn.BatchNorm1d: bn_flops_counter_hook,
torch.nn.BatchNorm2d: bn_flops_counter_hook,
torch.nn.BatchNorm3d: bn_flops_counter_hook,
# FC
torch.nn.Linear: linear_flops_counter_hook,
# Upscale
torch.nn.Upsample: upsample_flops_counter_hook,
# Deconvolution
torch.nn.ConvTranspose2d: deconv_flops_counter_hook,
}
def is_supported_instance(module):
if type(module) in MODULES_MAPPING:
return True
return False
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
else:
print('missing module', module)
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| 14,874 | 33.512761 | 139 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/More files/evaluation_BDDA_baseline.py | import os
import argparse
import time
import shutil
import math
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import functional as F
import torchvision
import numbers
import network
from bdda_otherModels import BDDA
import numpy as np
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score
parser = argparse.ArgumentParser(description='Baseline Evaluation')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--yolo5bb', metavar='DIR', help='path to folder of yolo5 bounding box txt files')
parser.add_argument('--baseline', metavar='DIR', help='path to txt file with baseline')
parser.add_argument('--visualizations', metavar='DIR', help='path to folder for visalization of predicted gaze maps and target')
parser.add_argument('--threshhold', default=0.5, type=float, metavar='N', help='threshold for object-level evaluation')
def main():
dim = 256
th = 1/dim
args = parser.parse_args()
testdir = os.path.join(args.data,'test')
test_dataset = BDDA("test", args.testgrid, testdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
test(test_loader, args)
def test(test_loader, args):
batch_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
cc_losses = AverageMeter()
tp = 0
fp = 0
fn = 0
all_count = 0
hm_max_values = []
gt = []
i = 0
#smoothing = GaussianSmoothing(1, 5, 1).cuda(args.gpu)
with torch.no_grad():
end = time.time()
for i, (gaze_gt, img_names) in enumerate(test_loader):
if args.gpu is not None:
gaze_gt = gaze_gt.cuda(args.gpu, non_blocking=True)
heatmap = torch.from_numpy(np.loadtxt(args.baseline)).unsqueeze(0)
heatmap = heatmap.unsqueeze(0).repeat(gaze_gt.size(0), 1, 1, 1)
for j in range(heatmap.size(0)):
img_name = img_names[j]
heatmap_img = heatmap[j] # predicted gaze map
gt_img = gaze_gt[j] # original gaze map
filename = os.path.join(args.yolo5bb, img_name+".txt")
if os.path.exists(filename):
with open(filename) as f:
for linestring in f:
all_count += 1
line = linestring.split()
width = float(line[3])
height = float(line[4])
x_center = float(line[1])
y_center = float(line[2])
x_min, x_max, y_min, y_max = bb_mapping(x_center, y_center, width, height)
# find maximum pixel value within object bounding box
gt_obj = gt_img[0, y_min:y_max+1, x_min:x_max+1]
gt_obj_max = torch.max(gt_obj)
heatmap_obj = heatmap_img[0, y_min:y_max+1, x_min:x_max+1]
heatmap_obj_max = torch.max(heatmap_obj)
# object is recognized if maximum pixel value is higher than th
gt_obj_recogn = gt_obj_max > 0.15
hm_obj_recogn = heatmap_obj_max > args.threshhold
hm_max_values.append(heatmap_obj_max)
if gt_obj_recogn:
gt.append(1)
else:
gt.append(0)
if (hm_obj_recogn and gt_obj_recogn):
tp +=1
elif (hm_obj_recogn and not gt_obj_recogn):
fp += 1
elif (not hm_obj_recogn and gt_obj_recogn):
fn += 1
visualization(heatmap_img.cpu(), gt_img.cpu(), args.visualizations, img_name)
kld = kl(heatmap, gaze_gt)
c = cc(heatmap,gaze_gt)
kld_losses.update(kld, heatmap.size(0))
cc_losses.update(c, heatmap.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
precision = tp/(tp+fp)
recall = tp/(tp+fn)
tn = all_count-tp-fp-fn
acc = (tp+tn)/all_count
f1 = 2*precision*recall/(precision+recall)
print('Object-level results:')
print('tp:', tp, 'fp:', fp, 'tn:', tn, 'fn:', fn, 'sum:', all_count)
print('prec:', precision, 'recall:', recall, 'f1', f1, 'acc', acc)
print('AUC:', roc_auc_score(gt, hm_max_values))
def bb_mapping(x_center_rel, y_center_rel, width_rel, height_rel, img_width = 64, img_height = 36):
"""
Compute absolute bounding boxes values for given image size and given relative parameters
:param x_center_rel: relative x value of bb center
:param y_center_rel: relative y value of bb center
:param width_rel: relative width
:param height_rel: relative height
:return: absolute values of bb borders
"""
width_abs = width_rel*img_width
height_abs = height_rel*img_height
x_center_abs = x_center_rel*img_width
y_center_abs = y_center_rel*img_height
x_min = int(math.floor(x_center_abs - 0.5 * width_abs))
x_max = int(math.floor(x_center_abs + 0.5 * width_abs))
y_min = int(math.floor(y_center_abs - 0.5 * height_abs))
y_max = int(math.floor(y_center_abs + 0.5 * height_abs))
bb = [x if x>=0 else 0 for x in [x_min, x_max, y_min, y_max]]
return bb
def cc(s_map_all,gt_all):
eps = 1e-07
bs = s_map_all.size()[0]
r = 0
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map_norm = (s_map - torch.mean(s_map))/(eps + torch.std(s_map))
gt_norm = (gt - torch.mean(gt))/(eps + torch.std(gt))
a = s_map_norm.cpu()
b = gt_norm.cpu()
r += torch.sum(a*b) / (torch.sqrt(torch.sum(a*a) * torch.sum(b*b))+eps)
return r/bs
def kl(s_map_all, gt_all):
dims = len(s_map_all.size())
bs = s_map_all.size()[0]
eps = torch.tensor(1e-07)
kl = 0
if dims > 3:
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map = s_map/(torch.sum(s_map)*1.0 + eps)
gt = gt/(torch.sum(gt)*1.0 + eps)
gt = gt.to('cpu')
s_map = s_map.to('cpu')
kl += torch.sum(gt * torch.log(eps + gt/(s_map + eps)))
return kl/bs
def normalizeData(data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def visualization(heatmap, gt, path, nr):
heatmap = torchvision.transforms.functional.to_pil_image(heatmap)
gt = torchvision.transforms.functional.to_pil_image(gt)
heatmap.save(os.path.join(path, '%s_pred.png'%nr))
gt.save(os.path.join(path, '%s_gt.png'%nr))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 8,598 | 34.097959 | 128 | py |
driver-gaze-yolov5 | driver-gaze-yolov5-main/More files/bdda_otherModels.py | import os
import numpy as np
import math
import torch
from torch.utils.data import Dataset
import cv2
from utils.utils import *
import torchvision
from PIL import Image
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def img_id(self):
return (self._data[0]) # image index starts with 1
@ property
def grids(self):
grid=[]
for item in self._data[1:]:
grid.append(float(item))
return grid
class BDDA(Dataset):
"""
BDDA feature class.
"""
def __init__(self, file, threshold, gazemap_path):
"""
Args:
"""
self.file = file
self.gazemap_path = gazemap_path
self.threshold = threshold
self.mean = torch.zeros(1024)
self.std = torch.ones(1024)
self._parse_list()
self.transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
def _parse_list(self):
self.img_list = []
tmp = [x.strip().split(',') for x in open(self.file)]
img_list = [VideoRecord(item) for item in tmp]
self.img_list = img_list
def _normalizeData(self, data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
"""
"""
record = self.img_list[index]
img_name = record.img_id.split('.')[0]
name = record.img_id.split('_')
gaze_file = name[0] + '_pure_hm_' + name[1]
gaze_gt = Image.open(os.path.join(self.gazemap_path, gaze_file)).convert('L').crop((0,96,1024,672)) #left,top,right,bottom
gaze_gt = self.transform(gaze_gt)
gaze_gt = self._normalizeData(gaze_gt)
return gaze_gt, img_name
| 1,891 | 24.226667 | 130 | py |
NMTGMinor | NMTGMinor-master/preprocess_classify.py | #!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import h5py as h5
import numpy as np
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict(lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
# note: no need for special tokens for the target (labels)
vocab = onmt.Dict(lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="h5", output_format="raw"):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
print('[INFO] Processing %s ...' % src_file)
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
# don't use bos_word and eos_word here
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
if len(src_sizes) != len(tgt_sizes):
print("Warning: data size mismatched.")
else:
tgt = None
tgt_sizes = None
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def main():
dicts = {}
# maybe not necessary
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict:
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
# tgt_langs = opt.train_tgt_lang.split("|")
langs = src_langs
langs = sorted(list(set(langs)))
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
print(dicts['langs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# the target "dictionary" contains a list of labels
if opt.asr:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.asr:
print('Preparing for acoustic classification model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads)
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
# train = dict()
# train['src'], train['tgt'] =
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads)
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
else:
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
start = time.time()
print('Binarizing data to train translation models...')
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
# SAVE DATA
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem', 'scp']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
| 33,644 | 42.024297 | 118 | py |
NMTGMinor | NMTGMinor-master/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='NMTGMinor',
version='0.1',
author='quanpn90',
author_email='ngoc.pham@kit.edu',
url='https://github.com/quanpn90/NMTGMinor',
license='MIT',
scripts=[
'flask_online.py',
'online.py',
'preprocess.py',
'train.py',
'translate_distributed.py',
'translate.py',
],
packages=find_packages(),
install_requires=['torch', 'torchaudio', 'soundfile'])
| 532 | 25.65 | 60 | py |
NMTGMinor | NMTGMinor-master/classify.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.predictor import Predictor
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-sub_model', required=False, default="",
help='Path to (secondary) model .pt file')
parser.add_argument('-pretrained_classifier', required=False, default="",
help='Path to external classifier model .pt file')
parser.add_argument('-streaming', action="store_true",
help="""Use streaming mode (for model with streaming)""")
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-sub_src', required=False, default="",
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-past_src', required=False, default="",
help='Past Source sequence to decode (one line per sequence)')
parser.add_argument('-src_lang', default='src',
help='Source language')
parser.add_argument('-tgt_lang', default='tgt',
help='Target language')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=256,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-no_bos_gold', action="store_true",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-no_repeat_ngram_size', type=int, default=0,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-no_buffering', action='store_true',
help='To remove buffering for transformer models (slower but more memory)')
parser.add_argument('-src_align_right', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-dynamic_quantile', type=int, default=0,
help='To use int8 in decoding (for linear and LSTM layers only).')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fast_translate', action='store_true',
help='Using the fast decoder')
parser.add_argument('-global_search', action='store_true',
help='Using the global beam search for streaming')
parser.add_argument('-dynamic_max_len', action='store_true',
help='Using the fast decoder')
parser.add_argument('-dynamic_max_len_scale', type=float, default=5.0,
help='Using the fast decoder')
parser.add_argument('-dynamic_min_len_scale', type=float, default=0.0,
help='Using the fast decoder')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def report_score(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / (words_total + 1e-9),
name, math.exp(-score_total / (words_total + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def get_sentence_from_tokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
pred_score_total, pred_words_total, gold_score_total, gold_words_total = 0, 0, 0, 0
src_batches = []
src_batch, tgt_batch = [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
in_file = None
if opt.src == "stdin":
in_file = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "h5":
in_file = h5.File(opt.src, 'r')
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
# import kaldiio
# from kaldiio import ReadHelper
from onmt.data.audio_utils import ArkLoader
audio_data = open(opt.src)
scp_reader = ArkLoader()
else:
in_file = open(opt.src)
# if opt.streaming:
# if opt.batch_size != 1:
# opt.batch_size = 1
# print("Warning: Streaming only works with batch size 1")
#
# if opt.global_search:
# print(" Using global search algorithm ")
# from onmt.inference.global_translator import GlobalStreamTranslator
# translator = GlobalStreamTranslator(opt)
# else:
# translator = StreamTranslator(opt)
# else:
# if opt.fast_translate:
# translator = FastTranslator(opt)
#
# # TODO: load sub model
# else:
# translator = onmt.Translator(opt)
predictor = Predictor(opt)
# Audio processing for the source batch
if opt.encoder_type == "audio":
"""
For Audio we will have to group samples by the total number of frames in the source
"""
past_audio_data = open(opt.past_src) if opt.past_src else None
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
concats = opt.concat.split("|")
n_models = len(opt.model.split("|"))
if len(concats) == 1:
concats = concats * n_models
assert len(concats) == n_models, "The number of models must match the number of concat configs"
for j, _ in enumerate(concats):
src_batches.append(list()) # We assign different inputs for each model in the ensemble
if past_audio_data:
past_src_batches.append(list())
sub_src = open(opt.sub_src) if opt.sub_src else None
sub_src_batch = list()
while True:
try:
scp_path = next(audio_data).strip().split()[1]
line = scp_reader.load_mat(scp_path)
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
if past_line: past_line = past_line[0::opt.stride]
line = torch.from_numpy(line)
past_line = torch.from_numpy(past_line) if past_audio_data else None
original_line = line
src_length = line.size(0)
"""
Handling different concatenation size for different models, to make ensembling possible
"""
if _is_oversized(src_batches[0], src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batches[0]), len(tgt_batch))
pred_score = predictor.predict(src_batches)
count = get_result(pred_score, predictor, count, outF)
# count, pred_score, pred_words, gold_score, goldWords = \
# translate_batch(opt, tgtF, count, outF, translator,
# src_batches[0], tgt_batch, pred_batch,
# pred_score,
# pred_length, gold_score,
# num_gold_words,
# all_gold_scores, opt.input_type)
# pred_score_total += pred_score
# pred_words_total += pred_words
# gold_score_total += gold_score
# gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
# handling different concatenation settings (for example 4|1|4)
for j, concat_ in enumerate(concats):
concat = int(concat_)
line = original_line
# TODO: move this block to function
if concat != 1:
add = (concat - line.size()[0] % concat) % concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // concat, line.size()[1] * concat))
if past_audio_data:
add = (concat - past_line.size()[0] % concat) % concat
z = torch.FloatTensor(add, past_line.size()[1]).zero_()
past_line = torch.cat((past_line, z), 0)
past_line = past_line.reshape((past_line.size()[0] // concat, past_line.size()[1] * concat))
src_batches[j].append(line)
if past_audio_data: past_src_batches[j].append(past_line)
# read the "sub" input which is text based
# this is done for ensemble between a speech model and a text based model
if opt.sub_src:
sline = sub_src.readline().strip()
if opt.input_type == 'word':
src_tokens = sline.split()
elif opt.input_type == 'char':
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
# catch the last batch
if len(src_batches[0]) != 0:
print("Batch size:", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_score = predictor.predict(src_batches)
count = get_result(pred_score, predictor, count, outF)
src_batch, tgt_batch = [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
# Text processing for MT
else:
raise NotImplementedError
if tgtF:
tgtF.close()
def get_result(pred_score, predictor, count, outF):
tgt_dict = predictor.tgt_dict.idxToLabel
for b in range(len(pred_score)):
count += 1
out_string = "PRED %d " % count
for i in range(len(pred_score[b])):
prob = pred_score[b][i] * 100
label = tgt_dict[i]
out_string += "%s: %.2f ; " % (label, prob)
print(out_string)
outF.write(out_string + '\n')
outF.flush()
return count
#print("PRED SCORE", pred_score[b])
#
# pred_score_total = sum(score[0].item() for score in pred_score)
# pred_words_total = sum(len(x[0]) for x in pred_batch)
# gold_score_total = 0
# gold_words_total = 0
# if tgtF is not None:
# gold_score_total = sum(gold_score).item()
# gold_words_total = num_gold_words
#
# for b in range(len(pred_batch)):
#
# count += 1
#
# if not opt.print_nbest:
# outF.write(get_sentence_from_tokens(pred_batch[b][0], input_type) + '\n')
# outF.flush()
# else:
# for n in range(opt.n_best):
# idx = n
# output_sent = get_sentence_from_tokens(pred_batch[b][idx], input_type)
# out_str = "%s ||| %.4f" % (output_sent, pred_score[b][idx])
# outF.write(out_str + '\n')
# outF.flush()
#
# if opt.verbose:
# if opt.encoder_type == "text":
# src_sent = " ".join(src_batch[b])
# print('SRC %d: %s' % (count, src_sent))
# print('PRED %d: %s' % (count, get_sentence_from_tokens(pred_batch[b][0], input_type)))
# print("PRED SCORE: %.4f" % pred_score[b][0])
#
# if tgtF is not None:
# tgt_sent = get_sentence_from_tokens(tgt_batch[b], input_type)
# if translator.tgt_dict.lower:
# tgt_sent = tgt_sent.lower()
# print('GOLD %d: %s ' % (count, tgt_sent))
# print("GOLD SCORE: %.4f" % gold_score[b])
# print()
# if opt.print_nbest:
# print('\n BEST HYP:')
# for n in range(opt.n_best):
# idx = n
# out_str = "%s ||| %.4f" % (" ".join(pred_batch[b][idx]), pred_score[b][idx])
# print(out_str)
# print('')
#
# return count, pred_score_total, pred_words_total, gold_score_total, gold_words_total
if __name__ == "__main__":
main()
| 17,820 | 39.687215 | 119 | py |
NMTGMinor | NMTGMinor-master/eval_autoencoder.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from ae.Evaluator import Evaluator
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-autoencoder', required=True,
help='Path to model .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-src_img_dir', default="",
help='Source image directory')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=2048,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-representation', type=str, default="EncoderHiddenState",
help="Representation for Autoencoder")
parser.add_argument('-auto_encoder_hidden_size', type=int, default=100,
help="Hidden size of autoencoder")
parser.add_argument('-auto_encoder_drop_out', type=float, default=0,
help="Use drop_out in autoencoder")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / wordsTotal,
name, math.exp(-scoreTotal / wordsTotal)))
def addone(f):
for line in f:
yield line
yield None
def lenPenalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def getSentenceFromTokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
srcBatch, tgtBatch = [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
evaluator = Evaluator(opt)
if (opt.src == "stdin"):
inFile = sys.stdin
opt.batch_size = 1
elif (opt.encoder_type == "audio"):
inFile = h5.File(opt.src, 'r')
else:
inFile = open(opt.src)
if (opt.encoder_type == "audio"):
for i in range(len(inFile)):
if (opt.stride == 1):
line = torch.from_numpy(np.array(inFile[str(i)]))
else:
line = torch.from_numpy(np.array(inFile[str(i)])[0::opt.stride])
if (opt.concat != 1):
add = (opt.concat - line.size()[0] % opt.concat) % opt.concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] / opt.concat, line.size()[1] * opt.concat))
if line is not None:
# ~ srcTokens = line.split()
srcBatch += [line]
if tgtF:
# ~ tgtTokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgtTokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgtTokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgtTokens]
if len(srcBatch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
r = evaluator.evalASR(srcBatch,tgtBatch)
if(opt.representation == "EncoderHiddenState"):
outputResults(srcBatch,r,outF)
elif(opt.representation == "DecoderHiddenState" or opt.representation == "Probabilities"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputResults(tgtBatch,r,outF)
elif(opt.representation == "EncoderDecoderHiddenState"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputAlignment(srcBatch,tgtBatch,r,outF)
srcBatch, tgtBatch = [], []
if len(srcBatch) != 0:
r = evaluator.evalASR(srcBatch,tgtBatch)
if(opt.representation == "EncoderHiddenState"):
outputResults(srcBatch,r,outF)
elif(opt.representation == "DecoderHiddenState" or opt.representation == "Probabilities"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputResults(tgtBatch,r,outF)
elif(opt.representation == "EncoderDecoderHiddenState"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputAlignment(srcBatch,tgtBatch,r,outF)
else:
for line in addone(inFile):
if line is not None:
if opt.input_type == 'word':
srcTokens = line.split()
elif opt.input_type == 'char':
srcTokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
srcBatch += [srcTokens]
if tgtF:
# ~ tgtTokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgtTokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgtTokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgtTokens]
if len(srcBatch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
r = evaluator.eval(srcBatch,tgtBatch)
if(opt.representation == "EncoderHiddenState"):
outputResults(srcBatch,r,outF)
elif(opt.representation == "DecoderHiddenState" or opt.representation == "Probabilities"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputResults(tgtBatch,r,outF)
elif(opt.representation == "EncoderDecoderHiddenState"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputAlignment(srcBatch,tgtBatch,r,outF)
srcBatch, tgtBatch = [], []
if tgtF:
tgtF.close()
def outputResults(srcBatch,r,outF):
x=0
j=0
out= []
for i in range(len(srcBatch)):
out.append([])
while(x < r.size(0)):
for i in range(len(srcBatch)):
if(j < len(srcBatch[i])):
out[i].append(str(r[x].item()))
x+=1
j += 1
for i in range(len(out)):
for j in range(len(out[i])):
outF.write(out[i][j])
outF.write(' ')
outF.write("\n")
outF.flush()
def outputAlignment(srcBatch,tgtBatch,r,outF):
for b in range(len(srcBatch)):
for i in range(len(srcBatch[b])):
for j in range (len(tgtBatch[b])):
outF.write("%i-%i#%f " % (i,j,r[i,j,b]))
outF.write("\n")
if __name__ == "__main__":
main()
| 9,790 | 35.808271 | 102 | py |
NMTGMinor | NMTGMinor-master/translate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-sub_model', required=False, default="",
help='Path to (secondary) model .pt file')
parser.add_argument('-pretrained_classifier', required=False, default="",
help='Path to external classifier model .pt file')
parser.add_argument('-streaming', action="store_true",
help="""Use streaming mode (for model with streaming)""")
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-vocab_id_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-sub_src', required=False, default="",
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-past_src', required=False, default="",
help='Past Source sequence to decode (one line per sequence)')
parser.add_argument('-src_lang', default='src',
help='Source language')
parser.add_argument('-src_atb', default='nothingness',
help='Source language')
parser.add_argument('-tgt_lang', default='tgt',
help='Target language')
parser.add_argument('-tgt_atb', default='nothingness',
help='Target language')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="scp", required=False,
help="Format of asr data (only scp supported for now)")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-prefix_string', default='',
help="""Prefix string for all of the translation""")
parser.add_argument('-anti_prefix_string', default='',
help="""Prefix string for all of the translation""")
parser.add_argument('-prefix_tgt', default='',
help="""Prefix file that contains prefix string for each of the translation
(must use either this or prefix_string, not both""")
parser.add_argument('-force_bos', action="store_true",
help="""Force the first token in the prefix to be bos""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=256,
help='Maximum sentence length.')
parser.add_argument('-min_sent_length', type=int, default=0,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-no_bos_gold', action="store_true",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-no_repeat_ngram_size', type=int, default=0,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-no_buffering', action='store_true',
help='To remove buffering for transformer models (slower but more memory)')
parser.add_argument('-src_align_right', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-dynamic_quantile', type=int, default=0,
help='To use int8 in decoding (for linear and LSTM layers only).')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fast_translate', action='store_true',
help='Using the fast decoder')
parser.add_argument('-global_search', action='store_true',
help='Using the global beam search for streaming')
parser.add_argument('-dynamic_max_len', action='store_true',
help='Using the fast decoder')
parser.add_argument('-dynamic_max_len_scale', type=float, default=5.0,
help='Using the fast decoder')
parser.add_argument('-dynamic_min_len_scale', type=float, default=0.0,
help='Using the fast decoder')
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def report_score(name, score_total, words_total):
try:
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / (words_total + 1e-9),
name, math.exp(-score_total / (words_total + 1e-9))))
except OverflowError:
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, -100 / (words_total + 1e-9),
name, math.exp(-100 / (words_total + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def len_penalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def get_sentence_from_tokens(tokens, ids, input_type, external_tokenizer=None):
if external_tokenizer is None:
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
else:
sent = external_tokenizer.decode(ids, True, True).strip()
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
pred_score_total, pred_words_total, gold_score_total, gold_words_total = 0, 0, 0, 0
src_batches = []
src_batch, tgt_batch, past_src_batch = [], [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
in_file = None
if opt.src == "stdin":
in_file = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
# import kaldiio
# from kaldiio import ReadHelper
from onmt.data.audio_utils import ArkLoader
audio_data = open(opt.src)
scp_reader = ArkLoader()
elif opt.asr_format == 'wav':
audio_data = open(opt.src)
else:
in_file = open(opt.src)
sub_src = None
if opt.streaming:
if opt.batch_size != 1:
opt.batch_size = 1
print("Warning: Streaming only works with batch size 1")
if opt.global_search:
print(" Using global search algorithm ")
from onmt.inference.global_translator import GlobalStreamTranslator
translator = GlobalStreamTranslator(opt)
else:
translator = StreamTranslator(opt)
else:
translator = FastTranslator(opt)
if hasattr(translator, 'tgt_external_tokenizer'):
external_tokenizer = translator.tgt_external_tokenizer
else:
external_tokenizer = None
# if "mbart-large-50" in opt.external_tokenizer.lower():
# print("[INFO] Using the external MBART50 tokenizer...")
#
# from transformers import MBart50TokenizerFast
# external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang=opt.src_lang)
#
# elif "bart" in opt.external_tokenizer.lower():
# print("[INFO] Using the external BART tokenizer...")
#
# from transformers import BartTokenizer
# external_tokenizer = BartTokenizer.from_pretrained(opt.external_tokenizer)
#
# elif "m2m100" in opt.external_tokenizer.lower():
# print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
# from transformers import M2M100Tokenizer
# external_tokenizer = M2M100Tokenizer.from_pretrained(opt.external_tokenizer, src_lang=opt.src_lang)
#
# elif opt.external_tokenizer is None or len(opt.external_tokenizer) == 0:
# external_tokenizer = None
# else:
# raise NotImplementedError
prefix = None
prefix_reader = None
if len(opt.prefix_string) > 0:
assert len(opt.prefix_tgt) <= 0
prefix = [opt.prefix_string]
elif len(opt.prefix_tgt) > 0:
prefix = list()
prefix_reader = open(opt.prefix_tgt)
anti_prefix = None
if len(opt.anti_prefix_string) > 0:
anti_prefix = opt.anti_prefix_string
# Audio processing for the source batch
if opt.encoder_type == "audio" and opt.asr_format in ['scp', 'kaldi']:
"""
For Audio we will have to group samples by the total number of frames in the source
"""
past_audio_data = open(opt.past_src) if opt.past_src else None
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
concats = opt.concat.split("|")
n_models = len(opt.model.split("|"))
if len(concats) == 1:
concats = concats * n_models
assert len(concats) == n_models, "The number of models must match the number of concat configs"
for j, _ in enumerate(concats):
src_batches.append(list()) # We assign different inputs for each model in the ensemble
if past_audio_data:
past_src_batches.append(list())
sub_src = open(opt.sub_src) if opt.sub_src else None
sub_src_batch = list()
while True:
try:
scp_path = next(audio_data).strip().split()[1]
line = scp_reader.load_mat(scp_path)
if past_audio_data:
scp_path = next(past_audio_data).strip().split()[1]
past_line = scp_reader.load_mat(scp_path)
else:
past_line = None
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
if past_line: past_line = past_line[0::opt.stride]
line = torch.from_numpy(line)
past_line = torch.from_numpy(past_line) if past_audio_data else None
original_line = line
src_length = line.size(0)
"""
Handling different concatenation size for different models, to make ensembling possible
"""
if _is_oversized(src_batches[0], src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
if past_audio_data:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch),
len(past_src_batches[0]))
else:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, \
pred_score, pred_length, gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches, tgt_batch,
sub_src_data=sub_src_batch, past_src_data=past_src_batches,
type='asr',
prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords = \
translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
# only refresh when prefix reader is not None
if prefix is not None and prefix_reader is not None:
prefix = []
# handling different concatenation settings (for example 4|1|4)
for j, concat_ in enumerate(concats):
concat = int(concat_)
line = original_line
# TODO: move this block to function
if concat != 1:
add = (concat - line.size()[0] % concat) % concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // concat, line.size()[1] * concat))
if past_audio_data:
add = (concat - past_line.size()[0] % concat) % concat
z = torch.FloatTensor(add, past_line.size()[1]).zero_()
past_line = torch.cat((past_line, z), 0)
past_line = past_line.reshape((past_line.size()[0] // concat, past_line.size()[1] * concat))
src_batches[j].append(line)
if past_audio_data: past_src_batches[j].append(past_line)
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
tline = tgtF.readline().strip()
if opt.previous_context > 0:
t_prev_context.append(tline)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
tline = t_prev_context[-i - 1] + " # " + tline
if len(t_prev_context) > opt.previous_context:
t_prev_context = t_prev_context[-1 * opt.previous_context:]
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
# read the "sub" input which is text based
# this is done for ensemble between a speech model and a text based model
if opt.sub_src:
sline = sub_src.readline().strip()
if opt.input_type == 'word':
src_tokens = sline.split()
elif opt.input_type == 'char':
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
if prefix is not None and prefix_reader is not None:
prefix.append(prefix_reader.readline().strip())
# catch the last batch
if len(src_batches[0]) != 0:
print("Batch size:", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches,
tgt_batch,
past_src_data=past_src_batches,
sub_src_data=sub_src_batch,
type='asr', prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords \
= translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
if prefix is not None and prefix_reader is not None:
prefix = []
# Text processing for MT
elif opt.asr_format == 'wav':
from onmt.utils import safe_readaudio
past_audio_data = open(opt.past_src) if opt.past_src else None
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
n_models = len(opt.model.split("|"))
for j in range(n_models):
src_batches.append(list()) # We assign different inputs for each model in the ensemble
if past_audio_data:
past_src_batches.append(list())
sub_src = open(opt.sub_src) if opt.sub_src else None
sub_src_batch = list()
while True:
try:
line = next(audio_data).strip().split()
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
if past_audio_data:
past_line = next(past_audio_data).strip().split()
if len(past_line) == 2:
wav_path = past_line[1]
start = 0
end = 0
else:
wav_path, start, end = past_line[1], float(past_line[2]), float(past_line[3])
past_line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
else:
past_line = None
except StopIteration:
break
original_line = line
src_length = line.size(0)
"""
Handling different concatenation size for different models, to make ensembling possible
"""
if _is_oversized(src_batches[0], src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
if past_audio_data:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch),
len(past_src_batches[0]))
else:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches, tgt_batch, sub_src_data=sub_src_batch, past_src_data=past_src_batches, type='asr',
prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords = \
translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
if prefix is not None and prefix_reader is not None:
prefix = []
# handling different concatenation settings (for example 4|1|4)
for j in range(n_models):
src_batches[j].append(line)
if past_audio_data: past_src_batches[j].append(past_line)
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
tline = tgtF.readline().strip()
if opt.previous_context > 0:
t_prev_context.append(tline)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
tline = t_prev_context[-i - 1] + " # " + tline
if len(t_prev_context) > opt.previous_context:
t_prev_context = t_prev_context[-1 * opt.previous_context:]
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
# read the "sub" input which is text based
# this is done for ensemble between a speech model and a text based model
if opt.sub_src:
sline = sub_src.readline().strip()
if opt.input_type == 'word':
src_tokens = sline.split()
elif opt.input_type == 'char':
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
if prefix is not None and prefix_reader is not None:
prefix.append(prefix_reader.readline().strip())
# catch the last batch
if len(src_batches[0]) != 0:
print("Batch size:", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches,
tgt_batch,
past_src_data=past_src_batches,
sub_src_data=sub_src_batch, type='asr', prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords \
= translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
if prefix is not None and prefix_reader is not None:
prefix = []
else:
past_text_data = open(opt.past_src) if opt.past_src else None
for line in addone(in_file):
if line is not None:
if opt.input_type == 'word':
src_tokens = line.split()
elif opt.input_type == 'char':
src_tokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
if line.strip() == "":
if opt.streaming:
print("Found a document break")
translator.reset_stream()
continue
src_batch += [src_tokens]
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgt_tokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
if past_text_data:
if opt.input_type == 'word':
past_src_tokens = past_text_data.readline().split()
elif opt.input_type == 'char':
past_src_tokens = list(past_text_data.readline().strip())
else:
raise NotImplementedError("Input type unknown")
past_src_batch += [past_src_tokens]
if prefix is not None and prefix_reader is not None:
prefix.append(prefix_reader.readline().strip())
if len(src_batch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(src_batch) == 0:
break
# actually done beam search from the model
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batch,
tgt_batch,
past_src_batch,
prefix=prefix, anti_prefix=anti_prefix)
# convert output tensor to words
count, pred_score, pred_words, gold_score, goldWords = translate_batch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
pred_batch, pred_ids,
pred_score, pred_length,
gold_score, num_gold_words,
all_gold_scores, opt.input_type,
external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, past_src_batch = [], [], []
if prefix is not None and prefix_reader is not None:
prefix = []
if opt.verbose:
report_score('PRED', pred_score_total, pred_words_total)
if tgtF: report_score('GOLD', gold_score_total, gold_words_total)
if tgtF:
tgtF.close()
if opt.dump_beam:
json.dump(translator.beam_accum, open(opt.dump_beam, 'w'))
if prefix_reader is not None:
prefix_reader.close()
if sub_src is not None:
sub_src.close()
def translate_batch(opt, tgtF, count, outF, translator, src_batch, tgt_batch,
pred_batch, pred_ids, pred_score, pred_length,
gold_score,
num_gold_words, all_gold_scores, input_type, external_tokenizer=None):
original_pred_batch = pred_batch
original_pred_score = pred_score
# if print n best list then do not print the scores
if opt.print_nbest:
opt.normalize = False
if opt.normalize and not opt.fast_translate:
pred_batch_ = []
pred_score_ = []
for bb, ss, ll in zip(pred_batch, pred_score, pred_length):
# ~ ss_ = [s_/numpy.maximum(1.,len(b_)) for b_,s_,l_ in zip(bb,ss,ll)]
length = [len(i) for i in [''.join(b_) for b_ in bb]]
ss_ = [len_penalty(s_, max(l_, 1), opt.alpha) for b_, s_, l_ in zip(bb, ss, length)]
ss_origin = [(s_, len(b_)) for b_, s_, l_ in zip(bb, ss, ll)]
sidx = numpy.argsort(ss_)[::-1]
# ~ print(ss_, sidx, ss_origin)
pred_batch_.append([bb[s] for s in sidx])
pred_score_.append([ss_[s] for s in sidx])
pred_batch = pred_batch_
pred_score = pred_score_
pred_score_total = sum(score[0].item() for score in pred_score)
pred_words_total = sum(len(x[0]) for x in pred_batch)
gold_score_total = 0
gold_words_total = 0
if tgtF is not None:
gold_score_total = sum(gold_score).item()
gold_words_total = num_gold_words
for b in range(len(pred_batch)):
count += 1
if not opt.print_nbest:
outF.write(
get_sentence_from_tokens(pred_batch[b][0], pred_ids[b][0], input_type, external_tokenizer) + '\n')
outF.flush()
else:
for n in range(opt.n_best):
idx = n
output_sent = get_sentence_from_tokens(pred_batch[b][idx], pred_ids[b][idx], input_type,
external_tokenizer)
out_str = "%s ||| %.4f" % (output_sent, pred_score[b][idx])
outF.write(out_str + '\n')
outF.flush()
if opt.verbose:
if opt.encoder_type == "text":
src_sent = " ".join(src_batch[b])
print('SRC %d: %s' % (count, src_sent))
print('PRED %d: %s' % (
count, get_sentence_from_tokens(pred_batch[b][0], pred_ids[b][0], input_type, external_tokenizer)))
print("PRED SCORE: %.4f" % pred_score[b][0])
if tgtF is not None:
tgt_sent = get_sentence_from_tokens(tgt_batch[b], input_type)
if translator.tgt_dict.lower:
tgt_sent = tgt_sent.lower()
print('GOLD %d: %s ' % (count, tgt_sent))
print("GOLD SCORE: %.4f" % gold_score[b])
print()
if opt.print_nbest:
print('\n BEST HYP:')
for n in range(opt.n_best):
idx = n
out_str = "%s ||| %.4f" % (" ".join(pred_batch[b][idx]), pred_score[b][idx])
print(out_str)
print('')
return count, pred_score_total, pred_words_total, gold_score_total, gold_words_total
if __name__ == "__main__":
main()
| 35,674 | 43.04321 | 121 | py |
NMTGMinor | NMTGMinor-master/verify_wav2vec2_feat.py | #!/usr/bin/env python
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
from torch.cuda.amp import autocast
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-scp', default='output.scp',
help="""Path to output the feature paths""")
# parser.add_argument('-ark_output', default='output.ark',
# help="""Path to output the features""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size (in audio samples)')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def verify_ark(utts, features, padding_mask, scp_data):
# cache_wav = ''
features = features.cpu()
bsz, seq_len, feat_size = features.size()
lengths = (1 - padding_mask).sum(dim=1)
# print(features.size(), lengths)
assert(torch.max(lengths).item() == seq_len)
assert len(utts) == bsz
for i in range(bsz):
feature_ = features[i, 0:lengths[i]]
feature_ = feature_.numpy()
precomputed_feature_ = scp_data[i]
np.testing.assert_allclose(
feature_,
precomputed_feature_,
atol=1e-5, rtol=1e-5)
# if opt.fp16:
# feature_ = feature_.astype(np.float16)
# seg_name = utts[i]
# dic = {seg_name: feature_}
#
# from onmt.data.kaldiio.io import write_ark_file
# write_ark_file(out_ark, out_scp, dic)
def build_data(src_sents):
from onmt.data.wav_dataset import WavDataset
src_data = src_sents
data_type = 'wav'
tgt_data = None
src_lang_data = [torch.Tensor([0])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=False,
past_src_data=None)
if __name__ == '__main__':
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2VecExtractor
model = FairseqWav2VecExtractor(opt.model)
# if opt.fp16:
# model = model.half()
if opt.cuda:
model = model.cuda()
model.eval()
print(model.wav2vec_encoder.feature_extractor)
audio_data = open(opt.src)
scp_data = open(opt.scp)
from onmt.data.audio_utils import ArkLoader
scp_reader = ArkLoader()
from onmt.utils import safe_readaudio
i = 0
n_models = len(opt.model.split("|"))
src_batch = list()
src_utts = list()
src_scp = list()
while True:
try:
line = next(audio_data).strip().split()
utt = line[0]
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
# read the wav samples
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
# read the scp data
scp_path = next(scp_data).strip().split()[1]
scp_line = scp_reader.load_mat(scp_path)
except StopIteration:
break
src_length = line.size(0)
"""
Read features output from wav2vec model and write into scp/ark file just like Kaldi w/ logmel features
"""
if _is_oversized(src_batch, src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batch))
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
# write_ark(src_utts, features, padding_mask, ark_out, scp_out, opt)
verify_ark(src_utts, features, padding_mask, src_scp)
src_batch = []
src_utts = []
src_scp = []
src_batch.append(line)
src_utts.append(utt)
src_scp.append(scp_line)
# catch the last batch
if len(src_batch) != 0:
print("Batch sizes :", len(src_batch), )
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
verify_ark(src_utts, features, padding_mask, src_scp)
src_batch = []
src_utts = []
src_scp = []
ark_out.close()
scp_out.close()
| 8,036 | 32.348548 | 119 | py |
NMTGMinor | NMTGMinor-master/rematch_language_embedding.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import copy
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.constants import add_tokenidx
from options import backward_compatible
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model_src', required=True,
help='Path to model .pt file')
parser.add_argument('-model_tgt', required=True,
help='Path to model .pt file')
parser.add_argument('-model_out', required=True,
help='Path to model .pt file')
opt = parser.parse_args()
# first, we load the model src
print(opt.model_src)
checkpoint = torch.load(opt.model_src, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
src_dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, src_dicts)
model = build_model(model_opt, checkpoint['dicts'])
model.load_state_dict(checkpoint['model'])
# now load the 2nd model
print(opt.model_tgt)
checkpoint = torch.load(opt.model_tgt, map_location=lambda storage, loc: storage)
# model_opt = checkpoint['opt']
# model_opt = backward_compatible(model_opt)
tgt_dicts = checkpoint['dicts']
# tgt_model = build_model(model_opt, checkpoint['dicts'])
# check the embedding
lang_emb = copy.deepcopy(model.encoder.language_embedding.weight.data)
new_emb = copy.deepcopy(lang_emb)
for key in src_dicts['langs']:
old_idx = src_dicts['langs'][key]
new_idx = tgt_dicts['langs'][key]
print(key, old_idx, "->", new_idx)
new_emb[new_idx].copy_(lang_emb[old_idx])
model.encoder.language_embedding.weight.data.copy_(new_emb)
model_state_dict = model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': tgt_dicts,
'opt': model_opt,
'epoch': -1,
'iteration': -1,
'batchOrder': None,
'optim': None
}
print("Saving converted model to %s" % opt.model_out)
torch.save(save_checkpoint, opt.model_out)
| 2,213 | 26 | 81 | py |
NMTGMinor | NMTGMinor-master/extract_wav2vec2_codebook.py | #!/usr/bin/env python
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
from torch.cuda.amp import autocast
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-scp_output', default='output.scp',
help="""Path to output the feature paths""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size (in audio samples)')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def write_codes(utts, codes, padding_mask, out_scp, opt):
# cache_wav = ''
codes = codes.cpu()
bsz, seq_len, groups = codes.size()
if padding_mask is not None:
lengths = (1 - padding_mask.long()).sum(dim=1).long()
else:
lengths = torch.LongTensor(bsz).fill_(seq_len)
assert len(utts) == bsz
for i in range(bsz):
code_ = codes[i, 0:lengths[i], :]
code_ = code_.prod(dim=-1, keepdim=False)
code_ = code_.tolist()
code_ = " ".join([str(c) for c in code_])
seg_name = utts[i]
# print(seg_name)
# print(code_)
out_scp.write(code_ + "\n")
# dic = {seg_name: feature_}
# from onmt.data.kaldiio.io import write_ark_file
# write_ark_file(out_ark, out_scp, dic)
def build_data(src_sents):
from onmt.data.wav_dataset import WavDataset
src_data = src_sents
data_type = 'wav'
tgt_data = None
src_lang_data = [torch.Tensor([0])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=False,
past_src_data=None)
if __name__ == '__main__':
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
print("Loading Wav2vec 2.0 model ...")
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2VecQuantizer
model = FairseqWav2VecQuantizer(opt.model)
print("Done")
if opt.cuda:
model = model.cuda()
model.eval()
scp_out = open(opt.scp_output, 'w')
audio_data = open(opt.src)
from onmt.utils import safe_readaudio
i = 0
n_models = len(opt.model.split("|"))
src_batch = list()
src_utts = list()
while True:
try:
line = next(audio_data).strip().split()
utt = line[0]
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
except StopIteration:
break
src_length = line.size(0)
"""
Read features output from wav2vec model and write into scp/ark file just like Kaldi w/ logmel features
"""
if _is_oversized(src_batch, src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batch))
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with torch.no_grad():
with autocast(enabled=opt.fp16):
codes, padding_mask = model(batch)
write_codes(src_utts, codes, padding_mask, scp_out, opt)
src_batch = []
src_utts = []
src_batch.append(line)
src_utts.append(utt)
# catch the last batch
if len(src_batch) != 0:
print("Batch sizes :", len(src_batch), )
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
codes, padding_mask = model(batch)
write_codes(src_utts, codes, padding_mask, scp_out, opt)
src_batch = []
src_utts = []
ark_out.close()
scp_out.close()
| 7,425 | 32.151786 | 119 | py |
NMTGMinor | NMTGMinor-master/average_checkpoints_auto.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import os, sys
from onmt.model_factory import build_model, build_language_model, build_classifier, optimize_model
from copy import deepcopy
from onmt.utils import checkpoint_paths, normalize_gradients
import glob
from onmt.constants import add_tokenidx
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-models', required=True,
help='Path to model .pt file')
parser.add_argument('-type', default='seq2seq', help="""Type of models""")
parser.add_argument('-lm', action='store_true',
help='Language model (default is seq2seq model')
parser.add_argument('-sort_by_date', action='store_true',
help='Sort the model files by date')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-top', type=int, default=10,
help="Device to run on")
parser.add_argument('-method', default='mean',
help="method to average: mean|gmean")
def custom_build_model(opt, dict, lm=False, type='seq2seq', constants=None):
if type == 'seq2seq':
if not lm:
model = build_model(opt, dict, False, constants)
else:
model = build_language_model(opt, dict)
elif type == 'classifier':
model = build_classifier(opt, dict)
optimize_model(model)
return model
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
path = opt.models
if not opt.sort_by_date:
existed_save_files = checkpoint_paths(path)
else:
existed_save_files = glob.glob(path + "/" + "*.pt")
existed_save_files.sort(key=os.path.getmtime)
print("\n".join(existed_save_files))
# print(existed_save_files)
models = existed_save_files
# take the top
models = models[:opt.top]
# print(models)
#
n_models = len(models)
#
# checkpoint for main model
checkpoint = torch.load(models[0], map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
main_checkpoint = checkpoint
# best_checkpoint = {
# 'model': deepcpy(main_checkpoint['model']),
# 'dicts': main_checkpoint['dicts'],
# 'opt': main_checkpoint['opt'],
# 'epoch': -1,
# 'iteration': -1,
# 'batchOrder': None,
# 'optim': None
# }
best_checkpoint = main_checkpoint
# print("Saving best model to %s" % opt.output + ".top")
# torch.save(best_checkpoint, opt.output + ".top")
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
constants = onmt.constants
# only create the object
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
print(model_opt.layers)
main_model = custom_build_model(model_opt, checkpoint['dicts'], lm=opt.lm, type=opt.type, constants=constants)
print("Loading main model from %s ..." % models[0])
try:
main_model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
main_model.load_state_dict(checkpoint['model'], strict=True)
if opt.cuda:
main_model = main_model.cuda()
for i in range(1, len(models)):
model = models[i]
# checkpoint for models[i])
checkpoint = torch.load(model, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# model_opt.enc_not_load_state = True
# model_opt.dec_not_load_state = True
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
# delete optim information to save GPU memory
if 'optim' in checkpoint:
del checkpoint['optim']
current_model = custom_build_model(model_opt, checkpoint['dicts'], lm=opt.lm, type=opt.type)
current_model.eval()
print("Loading model from %s ..." % models[i])
try:
current_model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
current_model.load_state_dict(checkpoint['model'], strict=True)
if opt.cuda:
current_model = current_model.cuda()
if opt.method == 'mean':
# Sum the parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.add_(param.data)
elif opt.method == 'gmean':
# Take the geometric mean of parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.mul_(param.data)
else:
raise NotImplementedError
# Normalizing
if opt.method == 'mean':
for main_param in main_model.parameters():
main_param.data.div_(n_models)
elif opt.method == 'gmean':
for main_param in main_model.parameters():
main_param.data.pow_(1. / n_models)
# Saving
model_state_dict = main_model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration': -1,
'batchOrder': None,
'optim': None
}
print("Saving averaged model to %s" % opt.output)
torch.save(save_checkpoint, opt.output)
if __name__ == "__main__":
main()
| 5,784 | 27.925 | 114 | py |
NMTGMinor | NMTGMinor-master/autoencoder.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
from onmt.modules.loss import NMTLossFunc
from onmt.model_factory import build_model, init_model_parameters
from ae.Autoencoder import Autoencoder
from ae.Trainer import AETrainer
parser = argparse.ArgumentParser(description='train.py')
onmt.markdown.add_md_help_argument(parser)
from options import make_parser
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
parser.add_argument('-representation', type=str, default="EncoderHiddenState",
help="Representation for Autoencoder")
parser.add_argument('-auto_encoder_hidden_size', type=int, default=100,
help="Hidden size of autoencoder")
parser.add_argument('-auto_encoder_drop_out', type=float, default=0,
help="Use drop_out in autoencoder")
parser.add_argument('-auto_encoder_type', type=str, default="Baseline",
help="Use drop_out in autoencoder")
opt = parser.parse_args()
print(opt)
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def main():
if opt.data_format == 'raw':
start = time.time()
print("Loading data from '%s'" % opt.data)
if opt.data.endswith(".train.pt"):
print("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
print("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
trainData = onmt.Dataset(dataset['train']['src'],
dataset['train']['tgt'], opt.batch_size_words,
data_type=dataset.get("type", "text"),
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier)
validData = onmt.Dataset(dataset['valid']['src'],
dataset['valid']['tgt'], opt.batch_size_words,
data_type=dataset.get("type", "text"),
batch_size_sents=opt.batch_size_sents)
dicts = dataset['dicts']
if ("src" in dicts):
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
print(' * number of training sentences. %d' %
len(dataset['train']['src']))
print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
elif opt.data_format == 'bin':
from onmt.data.indexed_dataset import IndexedInMemoryDataset
dicts = torch.load(opt.data + ".dict.pt")
# ~ train = {}
train_path = opt.data + '.train'
train_src = IndexedInMemoryDataset(train_path + '.src')
train_tgt = IndexedInMemoryDataset(train_path + '.tgt')
trainData = onmt.Dataset(train_src,
train_tgt, opt.batch_size_words,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier)
valid_path = opt.data + '.valid'
valid_src = IndexedInMemoryDataset(valid_path + '.src')
valid_tgt = IndexedInMemoryDataset(valid_path + '.tgt')
validData = onmt.Dataset(valid_src,
valid_tgt, opt.batch_size_words,
batch_size_sents=opt.batch_size_sents)
else:
raise NotImplementedError
print('Building model...')
model = build_model(opt, dicts)
autoencoder = Autoencoder(model,opt)
""" Building the loss function """
loss_function = nn.MSELoss(size_average=False)
nParams = sum([p.nelement() for p in autoencoder.parameters()])
print('* number of parameters: %d' % nParams)
# load nmt model
checkpoint = None
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
else:
raise NotImplementedError
if checkpoint is not None:
print('Loading model from checkpoint at %s' % opt.load_from)
model.load_state_dict(checkpoint['model'])
del checkpoint['model']
del checkpoint['optim']
del checkpoint
if len(opt.gpus) > 1 or opt.virtual_gpu > 1:
# ~ trainer = MultiGPUXETrainer(model, loss_function, trainData, validData, dataset, opt)
raise NotImplementedError("Warning! Multi-GPU training is not fully tested and potential bugs can happen.")
else:
trainer = AETrainer(autoencoder,model, loss_function, trainData, validData, dicts, opt)
trainer.run(save_file=False)
if __name__ == "__main__":
main()
| 5,533 | 34.703226 | 115 | py |
NMTGMinor | NMTGMinor-master/sample_lm.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
from onmt.model_factory import build_model
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-models', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-method', default='mean',
help="method to average: mean|gmean")
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# opt.model should be a string of models, split by |
models = opt.models.split("|")
# print(models)
n_models = len(models)
print("Loading main model from %s ..." % models[0])
checkpoint = torch.load(models[0], map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
main_checkpoint = checkpoint
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
main_model = build_model(model_opt, checkpoint['dicts'])
main_model.load_state_dict(checkpoint['model'])
if opt.cuda:
main_model = main_model.cuda()
for i in range(1, len(models)):
model = models[i]
print("Loading model from %s ..." % models[i])
checkpoint = torch.load(model, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# delete optim information to save GPU memory
if 'optim' in checkpoint:
del checkpoint['optim']
current_model = build_model(model_opt, checkpoint['dicts'])
current_model.load_state_dict(checkpoint['model'])
if opt.cuda:
current_model = current_model.cuda()
if opt.method == 'mean':
# Sum the parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.add_(param.data)
elif opt.method == 'gmean':
# Take the geometric mean of parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.mul_(param.data)
else:
raise NotImplementedError
# Normalizing
if opt.method == 'mean':
for main_param in main_model.parameters():
main_param.data.div_(n_models)
elif opt.method == 'gmean':
for main_param in main_model.parameters():
main_param.data.pow_(1./n_models)
# Saving
model_state_dict = main_model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration' : -1,
'batchOrder' : None,
'optim': None
}
print("Saving averaged model to %s" % opt.output)
torch.save(save_checkpoint, opt.output)
if __name__ == "__main__":
main()
| 3,478 | 26.393701 | 96 | py |
NMTGMinor | NMTGMinor-master/rescore.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
import apex
parser = argparse.ArgumentParser(description='rescore.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=2048,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / (wordsTotal + 1e-9),
name, math.exp(-scoreTotal / (wordsTotal + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def lenPenalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def getSentenceFromTokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
predScoreTotal, predWordsTotal, goldScoreTotal, goldWordsTotal = 0, 0, 0, 0
srcBatch, tgtBatch, tgtScores = [], [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
if opt.dump_beam != "":
import json
translator.initBeamAccum()
# here we are trying to
inFile = None
if opt.src == "stdin":
inFile = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "h5":
inFile = h5.File(opt.src, 'r')
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
import kaldiio
from kaldiio import ReadHelper
audio_data = iter(ReadHelper('scp:' + opt.src))
else:
inFile = open(opt.src)
# initialize the rescorer (with models) and stuff
rescorer = onmt.Rescorer(opt)
if opt.encoder_type == "audio":
s_prev_context = []
t_prev_context = []
i = 0
while True:
if opt.asr_format == "h5":
if i == len(inFile):
break
line = np.array(inFile[str(i)])
i += 1
elif opt.asr_format == "scp":
try:
_, line = next(audio_data)
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
line = torch.from_numpy(line)
if opt.concat != 1:
add = (opt.concat - line.size()[0] % opt.concat) % opt.concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // opt.concat, line.size()[1] * opt.concat))
if opt.previous_context > 0:
s_prev_context.append(line)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
line = torch.cat((torch.cat((s_prev_context[-i - 1], torch.zeros(1, line.size()[1]))), line))
if len(s_prev_context) > opt.previous_context:
s_prev_context = s_prev_context[-1 * opt.previous_context:]
srcBatch += [line]
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
tline = tgtF.readline().strip()
twords = tline.split("|||")[0].strip()
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgt_tokens]
if len(srcBatch) < opt.batch_size:
continue
print("Batch size:", len(srcBatch), len(tgtBatch))
goldScore, numGoldWords, allGoldScores = rescorer.rescore_asr(
srcBatch, tgtBatch)
print("Result:", len(predBatch))
count = translateBatch(opt, tgtF, count, outF, translator,
srcBatch, tgtBatch, goldScore, numGoldWords,
allGoldScores, opt.input_type)
srcBatch, tgtBatch, tgtScores = [], []
if len(srcBatch) != 0:
print("Batch size:", len(srcBatch), len(tgtBatch))
goldScore, numGoldWords, allGoldScores = translator.rescore_asr(srcBatch, tgtBatch)
print("Result:", len(predBatch))
count = translateBatch(opt, tgtF, count, outF, srcBatch, tgtBatch, tgtScores,
goldScore, numGoldWords,
allGoldScores, opt.input_type)
srcBatch, tgtBatch, tgtScores = [], []
else:
for line in addone(inFile):
if line is not None:
if opt.input_type == 'word':
srcTokens = line.split()
elif opt.input_type == 'char':
srcTokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
# for each source sentence, we read in n target
for n in range(opt.n_best):
# duplicate the srcTokens
srcBatch += [srcTokens]
tgtline = tgtF.readline()
tgt_text = tgtline.strip().split(' ||| ')[0]
tgt_score = tgtline.strip().split(' ||| ')[1]
if opt.input_type == 'word':
tgt_tokens = tgt_text.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tgt_text.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgt_tokens]
tgtScores += [tgt_score]
if len(srcBatch) < opt.batch_size * opt.n_best:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
goldScore, numGoldWords, allGoldScores = rescorer.rescore(srcBatch, tgtBatch)
# convert output tensor to words
count = translateBatch(opt, tgtF, count, outF, srcBatch, tgtBatch, tgtScores,
goldScore, numGoldWords,
allGoldScores, opt.input_type)
srcBatch, tgtBatch = [], []
if tgtF:
tgtF.close()
def translateBatch(opt, tgtF, count, outF, srcBatch, tgtBatch, tgtScores, goldScore,
numGoldWords, allGoldScores, input_type):
for b in range(len(tgtBatch)):
# if not opt.print_nbest:
# outF.write(getSentenceFromTokens(predBatch[b][0], input_type) + '\n')
# outF.flush()
# else:
# for n in range(opt.n_best):
# idx = n
# output_sent = getSentenceFromTokens(predBatch[b][idx], input_type)
# out_str = "%s ||| %.4f" % (output_sent, predScore[b][idx])
#
# print(out_str)
# outF.write(out_str + 'n')
# outF.flush()
tgtSent = getSentenceFromTokens(tgtBatch[b], input_type)
gold_score = goldScore[b]
prev_score = tgtScores[b] # string
outstr = "%s ||| %s %.4f" % (tgtSent, prev_score, gold_score)
outF.write(outstr + '\n')
outF.flush()
if opt.verbose:
if count % opt.beam_size == 0:
srcSent = getSentenceFromTokens(srcBatch[b], input_type)
print('SRC SENT %d: %s ' % (count // opt.beam_size + 1, srcSent))
print('')
print(outstr)
# if tgtF is not None:
# tgtSent = getSentenceFromTokens(tgtBatch[b], input_type)
# print('GOLD %d: %s ' % (count, tgtSent))
# print("GOLD SCORE: %.4f" % goldScore[b])
# # print("Single GOLD Scores:",end=" ")
# # for j in range(len(tgtBatch[b])):
# # print(allGoldScores[j][b].item(),end =" ")
# print ()
# if opt.print_nbest:
# print('\n BEST HYP:')
# for n in range(opt.n_best):
# idx = n
# out_str = "%s ||| %.4f" % (" ".join(predBatch[b][idx]), predScore[b][idx])
# print(out_str)
print('')
count += 1
return count
if __name__ == "__main__":
main()
| 13,352 | 38.979042 | 117 | py |
NMTGMinor | NMTGMinor-master/options.py | import argparse
def make_parser(parser):
# Data options
parser.add_argument('-data', required=True,
help='Path to the *-train.pt file from preprocess.py')
parser.add_argument('-data_format', required=False, default='raw',
help='Default data format: raw')
parser.add_argument('-data_cache_size', type=int, default=32,
help="""Caching for dataset (if implemented)""")
parser.add_argument('-multi_dataset', action='store_true',
help='Reading multiple datasets (sharing the same dictionary)')
parser.add_argument('-override_dict_from_checkpoint', action='store_true',
help='The dictionary will be overidden from checkpoint instead of reading from data.')
parser.add_argument('-gem_training', action='store_true',
help='Gradient Episodic Memory training')
parser.add_argument('-train_sets', default=[], nargs='+', type=int,
help="Sets of training data. For example 0 1 2")
parser.add_argument('-valid_sets', default=[], nargs='+', type=int,
help="Sets of validation data.")
parser.add_argument('-train_set_orders', default=[], nargs='+', type=int,
help="The order of the training data for gradient episodic memory. For example 0 0 1 1 (must match the number of datasets).")
parser.add_argument('-run_validation_before_training', action='store_true',
help='Run validation before training')
parser.add_argument('-estimate_fisher_information', action='store_true',
help='Only estimate Fisher Information')
parser.add_argument('-load_fisher', default='', type=str,
help="""Load the fisher information from a checkpoint.""")
parser.add_argument('-ewc_importance', type=float, default=0.0,
help='Importance of EWC penalty')
parser.add_argument('-ewc_delay', type=int, default=0,
help='EWC penalty only applies after this delay (steps)')
parser.add_argument('-ewc_normalize', action='store_true',
help='EWC penalty being normalized')
parser.add_argument('-ewc_decay_every', type=int, default=10000,
help='EWC scale reduced after these steps')
parser.add_argument('-ewc_decay_scale', type=int, default=10,
help='EWC scale reduced after these steps')
parser.add_argument('-patch_vocab_multiplier', type=int, default=1,
help='Pad vocab so that the size divides by this multiplier')
parser.add_argument('-buffer_size', type=int, default=16,
help='The iterator fills the data buffer with this size')
parser.add_argument('-num_workers', type=int, default=0,
help='Number of extra workers for data fetching. 0=uses the main process. ')
parser.add_argument('-pin_memory', action="store_true",
help='The data loader pins memory into the GPU to reduce the bottleneck between GPU-CPU')
parser.add_argument('-bayes_by_backprop', action='store_true',
help="""Using Bayes-By-Backprop models in training""")
parser.add_argument('-neg_log_sigma1', type=float, default=0,
help='Coefficient for the KL divergence term')
parser.add_argument('-neg_log_sigma2', type=float, default=6,
help='Coefficient for the KL divergence term')
parser.add_argument('-prior_pi', type=float, default=0.5,
help='Coefficient for the KL divergence term')
# MODEL UTIL
parser.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_epochN_PPL.pt where PPL is the
validation perplexity""")
parser.add_argument('-load_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model.""")
parser.add_argument('-load_encoder_from', default='', type=str,
help="""Load encoder weight from a pretrained model.""")
parser.add_argument('-load_decoder_from', default='', type=str,
help="""Load encoder weight from a pretrained model.""")
parser.add_argument('-streaming', action='store_true',
help="""Using streaming in training""")
parser.add_argument('-stream_context', default='global', type=str,
help="""Using streaming in training""")
# MODEL CONFIG
parser.add_argument('-model', default='transformer',
help="Translation model. [transformer|relative_transformer ]")
parser.add_argument('-layers', type=int, default=2,
help='Number of layers in the Transformer encoder/decoder')
parser.add_argument('-encoder_layers', type=int, default=-1,
help='Number of layers in the LSTM encoder if different')
parser.add_argument('-max_pos_length', type=int, default=2048,
help='Maximum distance length for relative self-attention')
parser.add_argument('-max_src_length', type=int, default=320000,
help='Maximum source length for training')
parser.add_argument('-max_tgt_length', type=int, default=320000,
help='Maximum target length for training')
parser.add_argument('-learnable_position_encoding', action='store_true',
help="""Use embeddings as learnable position encoding.""")
parser.add_argument('-rotary_position_encoding', action='store_true',
help="""Use rotary position encoding.""")
parser.add_argument('-pos_emb_type', default='absolute',
help="Position embedding type. [absolute| relative_k| relative_kv]")
parser.add_argument('-fix_norm_output_embedding', action='store_true',
help="""Normalize the output embedding""")
# parser.add_argument('-asynchronous', action='store_true',
# help="""Different attention values for past and future""")
# parser.add_argument('-nce_noise', type=int, default=0,
# help="""Use noise contrastive estimation for the output layer.
# Default=0 (full softmax), increase to 100 to use 100 noise samples.""")
# parser.add_argument('-unidirectional', action='store_true',
# help="""Unidirectional encoder""")
parser.add_argument('-reconstruct', action='store_true',
help='Apply reconstruction with an additional decoder')
parser.add_argument('-mirror_loss', action='store_true',
help='Using mirror loss')
# parser.add_argument('-universal', action='store_true',
# help='Using one layer universally (recurrent)')
# parser.add_argument('-act', action='store_true',
# help='Using ACT for Universal models (TODO)')
# Transforer Model options
parser.add_argument('-use_language_embedding', action='store_true',
help="""Language embedding to add into the word embeddings""")
parser.add_argument('-language_embedding_type', default='sum', type=str,
help="""Language embedding combination type: sum|concat. (Concat uses more parameters)""")
parser.add_argument('-model_size', type=int, default=512,
help='Size of embedding / transformer hidden')
parser.add_argument('-inner_size', type=int, default=2048,
help='Size of inner feed forward layer')
parser.add_argument('-attribute_size', type=int, default=1,
help='Number of attributes')
parser.add_argument('-n_heads', type=int, default=8,
help='Number of heads for multi-head attention')
parser.add_argument('-checkpointing', type=int, default=0,
help='Number of checkpointed layers in the Transformer')
parser.add_argument('-attn_dropout', type=float, default=0.1,
help='Dropout probability; applied on multi-head attention.')
parser.add_argument('-emb_dropout', type=float, default=0.1,
help='Dropout probability; applied on top of embedding.')
parser.add_argument('-variational_dropout', action='store_true',
help='Apply variational dropout (same network per timestep)')
parser.add_argument('-weight_norm', action='store_true',
help='Apply weight normalization on linear modules')
parser.add_argument('-death_rate', type=float, default=0.0,
help='Stochastic layer death rate')
parser.add_argument('-death_rate_decoder', type=float, default=0.0,
help='Stochastic layer death rate')
parser.add_argument('-stochastic_sublayer', action='store_true',
help='Apply stochastic death rate for each sub-layer')
parser.add_argument('-activation_layer', default='linear_relu_linear', type=str,
help='The activation layer in each transformer block '
'linear_relu_linear|linear_swish_linear|maxout')
parser.add_argument('-time', default='positional_encoding', type=str,
help='Type of time representation positional_encoding|gru|lstm')
parser.add_argument('-residual_type', default='regular',
help='Type of residual type. regular|gated')
# parser.add_argument('-adaptive', type=str, default='shared',
# help='Universal adaptive layer. universal=UniversalTF|shared=factorized|unshared')
# Optimization options
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img].")
parser.add_argument('-input_size', type=int, default=2048,
help='Size of input features')
parser.add_argument('-init', default='normal',
help="How to init the weight. normal or uniform/xavier.")
parser.add_argument('-init_embedding', default='normal',
help="How to init the embedding matrices. Xavier or Normal.")
parser.add_argument('-batch_size_frames', type=int, default=204800,
help='Maximum batch size in frame dimension')
parser.add_argument('-batch_size_words', type=int, default=2048,
help='Maximum batch size in word dimension')
parser.add_argument('-batch_size_sents', type=int, default=99999999,
help='Maximum number of sentences in a batch')
parser.add_argument('-batch_size_update', type=int, default=-1,
help='Maximum number of words per update')
parser.add_argument('-update_frequency', type=int, default=1,
help='Maximum number of batches per update (will override the batch_size_update')
parser.add_argument('-batch_size_multiplier', type=int, default=1,
help='Maximum number of words per update')
parser.add_argument('-max_position_length', type=int, default=1024,
help='Maximum length for positional embedding')
parser.add_argument('-max_memory_size', type=int, default=1024,
help='Maximum memory size for buffering in transformer XL')
parser.add_argument('-extra_context_size', type=int, default=32,
help='Extra context size in transformer Xl')
parser.add_argument('-epochs', type=int, default=13,
help='Number of training epochs')
parser.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init)""")
parser.add_argument('-optim', default='adam',
help="Optimization method. [sgd|adagrad|adadelta|adam]")
parser.add_argument('-zeror_optim', action="store_true",
help="""Use Zero redundancy optimizer""")
parser.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to max_grad_norm""")
# Dropout
parser.add_argument('-dropout', type=float, default=0.3,
help='Dropout probability; general values for ffn and residual if set negatively')
parser.add_argument('-ffn_dropout', type=float, default=-1,
help='Dropout probability; applied at the FFN.')
parser.add_argument('-residual_dropout', type=float, default=-1,
help='Dropout probability; applied at the residual connection.')
parser.add_argument('-word_dropout', type=float, default=0.0,
help='Dropout probability; applied on embedding indices.')
parser.add_argument('-switchout', type=float, default=0.0,
help='Switchout algorithm')
# Loss function
parser.add_argument('-label_smoothing', type=float, default=0.0,
help='Label smoothing value for loss functions.')
parser.add_argument('-true_zero_grad', action="store_true",
help='truly set grad to zero instead of None.')
# parser.add_argument('-curriculum', type=int, default=-1,
# help="""For this many epochs, order the minibatches based
# on source sequence length. Sometimes setting this to 1 will
# increase convergence speed.""")
parser.add_argument('-normalize_gradient', action="store_true",
help="""Normalize the gradients by number of tokens before updates""")
# parser.add_argument('-gradient_scaler', type=int, default=1,
# help='avoid gradient overflow with fp16')
# learning rate
parser.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate. If adagrad/adadelta/adam is
used, then this is the global learning rate. Recommended
settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
parser.add_argument('-learning_rate_decay', type=float, default=1,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) epoch has gone past
start_decay_at""")
parser.add_argument('-start_decay_at', type=int, default=99999,
help="""Start decaying every epoch after and including this
epoch""")
parser.add_argument('-warmup_steps', type=int, default=4096,
help="""Number of steps to increase the lr in noam""")
parser.add_argument('-max_steps', type=int, default=100000,
help="""Number of steps to train the model""")
parser.add_argument('-noam_step_interval', type=int, default=1,
help="""How many steps before updating the parameters""")
parser.add_argument('-max_step', type=int, default=4000000,
help="""How many steps before updating the parameters""")
parser.add_argument('-starting_step', type=int, default=-1,
help="""How many steps before updating the parameters""")
parser.add_argument('-factorizing_step', type=int, default=0,
help="""How many steps before using the factorized parameters""")
parser.add_argument('-reset_optim', action='store_true',
help='Reset the optimizer running variables')
parser.add_argument('-beta1', type=float, default=0.9,
help="""beta_1 value for adam""")
parser.add_argument('-beta2', type=float, default=0.997,
help="""beta_2 value for adam""")
parser.add_argument('-weight_decay', type=float, default=0.0,
help="""weight decay (L2 penalty)""")
parser.add_argument('-amsgrad', action='store_true',
help='Using AMSGRad for adam')
parser.add_argument('-update_method', default='regular',
help="Type of update rule to use. Options are [regular|noam].")
# pretrained word vectors
parser.add_argument('-tie_weights', action='store_true',
help='Tie the weights of the encoder and decoder layer')
# parser.add_argument('-experimental', action='store_true',
# help='Set the model into the experimental mode (trying unverified features)')
parser.add_argument('-join_embedding', action='store_true',
help='Jointly train the embedding of encoder and decoder in one weight')
# parser.add_argument('-add_position_encoding', action='store_true',
# help='Adding pos encodings to embedding (like Transformer)')
parser.add_argument('-batch_ensemble', type=int, default=0,
help='To use batch ensemble algorithm')
parser.add_argument('-save_metrics', default='ppl',
help="Type of update rule to use. Options are [perplexity|ppl|accuracy|acc].")
# GPU
parser.add_argument('-gpus', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
parser.add_argument('-fp16', action='store_true',
help='Use half precision training')
parser.add_argument('-seed', default=-1, type=int,
help="Seed for deterministic runs.")
parser.add_argument('-log_interval', type=int, default=100,
help="Print stats at this interval.")
parser.add_argument('-save_every', type=int, default=-1,
help="Save every this interval.")
parser.add_argument('-keep_save_files', type=int, default=5,
help="Save every this interval.")
parser.add_argument('-copy_generator', action='store_true',
help='Use the copy_generator')
parser.add_argument('-verbose', action='store_true',
help='Show more information about training (for Nerds)')
# FAST IMPLEMENTATION
parser.add_argument('-fast_xentropy', action="store_true",
help="""Fast cross entropy loss""")
parser.add_argument('-fast_xattention', action="store_true",
help="""Fast cross attention between encoder decoder""")
parser.add_argument('-fast_self_attention', action="store_true",
help="""Fast self attention between encoder decoder""")
parser.add_argument('-fast_feed_forward', action="store_true",
help="""Fast cross attention between encoder decoder""")
parser.add_argument('-macaron', action='store_true',
help='Macaron style network with 2 FFN per block.')
parser.add_argument('-fused_ffn', action="store_true",
help="""Fast feedforward""")
parser.add_argument('-favor_attention', action="store_true",
help="""Use Favor+ Attention for faster self-attention""")
# for FUSION
parser.add_argument('-lm_checkpoint', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model.""")
parser.add_argument('-fusion', action='store_true',
help='Use fusion training with language model')
parser.add_argument('-lm_seq_length', type=int, default=128,
help='Sequence length for the language model')
# for Speech
parser.add_argument('-reshape_speech', type=int, default=0,
help="Reshaping the speech data (0 is ignored, done at preprocessing).")
parser.add_argument('-concat', type=int, default=4,
help="Concatenate frames to downsample.")
parser.add_argument('-input_feature_size', type=int, default=40,
help="Input feature size.")
parser.add_argument('-augment_speech', action='store_true',
help='Use f/t augmentation for speech')
parser.add_argument('-wav2vec_spec_augment', action='store_true',
help='Use f/t augmentation for wav2vec')
parser.add_argument('-upsampling', action='store_true',
help='In case the data is downsampled during preprocess. This option will upsample the '
'samples again')
parser.add_argument('-cnn_downsampling', action='store_true',
help='Use CNN for downsampling instead of reshaping')
parser.add_argument('-zero_encoder', action='store_true',
help='Zero-out encoders during training')
parser.add_argument('-ctc_loss', type=float, default=0.0,
help='CTC Loss as additional loss function with this weight')
# parser.add_argument('-lfv_multilingual', action='store_true',
# help='Use multilingual language identifier to get LFV for each language')
parser.add_argument('-bottleneck_size', type=int, default=64,
help="Bottleneck size for the LFV vector).")
parser.add_argument('-conv_kernel', type=int, default=31,
help="Kernels for convolution in conformer).")
parser.add_argument('-no_batch_norm', action='store_true',
help="Remove Batch Norm to avoid NaN errors that can happen with spec augmentation.).")
parser.add_argument('-depthwise_conv', action='store_true',
help='Use depthwise convolution in the encoder block')
parser.add_argument('-no_ffn', action='store_true',
help='No feedforward network in the speech encoder')
parser.add_argument('-multilingual_factorized_weights', action='store_true',
help='Factorize the weights in the model for multilingual')
parser.add_argument('-multilingual_factorized_weights_decoder', action='store_true',
help='Factorize the weights in the model decoder for multilingual')
parser.add_argument('-fast_factorize', action='store_true',
help='Fast Factorize the weights in the model for multilingual (Batch Ensemble style)')
parser.add_argument('-mfw_rank', type=int, default=1,
help="Rank of the mfw vectors.")
parser.add_argument('-mfw_multiplicative', action='store_true',
help='Use another multiplicative weights W = W^ * M + A')
parser.add_argument('-mfw_no_bias', action='store_true',
help='Use another multiplicative weights W = W^ * M + A')
parser.add_argument('-mfw_activation', type=str, default="none",
help="Using activation function for the MFW so W = f(W^ * M + A'). "
"Currently accepting gelu/silu")
parser.add_argument('-mfw_atb_rank_scale', type=float, default=1.0,
help="Rank of the mfw atb vectors.")
parser.add_argument('-freezing_steps', type=int, default=0,
help="Number of steps for freezing the mfw vectors.")
parser.add_argument('-multilingual_partitioned_weights', action='store_true',
help='Partition the weights in the multilingual models')
parser.add_argument('-mpw_factor_size', type=int, default=8,
help="Size of the language factor vector")
parser.add_argument('-multilingual_layer_norm', action='store_true',
help='New norm for each language')
parser.add_argument('-multilingual_linear_projection', action='store_true',
help='New linear projection for each language')
parser.add_argument('-sub_encoder', type=int, default=4,
help='New linear projection for each language')
parser.add_argument('-weight_drop', type=float, default=0.0,
help='dropout rate for the main weights of the MFW model')
parser.add_argument('-multilingual_adapter', action='store_true',
help='New norm for each language')
parser.add_argument('-adapter_bottleneck_size', type=int, default=1024,
help='New norm for each language')
parser.add_argument('-ffn_activation', default='silu', type=str,
help='The activation layer in each transformer block '
'relu|gelu|silu|swish')
parser.add_argument('-ffn_glu', action='store_true',
help='Gated Linear Unit application at the FFN')
# for Reversible Transformer
parser.add_argument('-src_reversible', action='store_true',
help='Using reversible models for encoder')
parser.add_argument('-tgt_reversible', action='store_true',
help='Using reversible models for decoder')
parser.add_argument('-debugging', action='store_true',
help='Using reversible models for decoder')
parser.add_argument('-master_addr', default='localhost', type=str,
help="""""")
parser.add_argument('-master_port', default='8888', type=str,
help="""""")
# for DISCOURSE-AWARE models
# parser.add_argument('-n_past', type=int, default=0,
# help='number of segments / utterances in the past')
# parser.add_argument('-n_future', type=int, default=0,
# help='number of segments / utterances in the future')
# For pretraining
# pretrained encoder
parser.add_argument('-enc_pretrained_model', default="", type=str,
help=""" the name of trained model""")
parser.add_argument('-enc_stacked_pretrained_model', default="", type=str,
help=""" the name of trained model""")
parser.add_argument('-enc_pretrain_hidden_size', type=int, default=768,
help='Size of bert hidden')
parser.add_argument('-s4_config_file', default="", type=str,
help=""" the name of src pretrained model configuration.""")
parser.add_argument('-enc_config_file', default="", type=str,
help=""" the name of src pretrained model configuration.""")
parser.add_argument('-enc_state_dict', default="", type=str,
help=""" the state_dict of the pretrained model for src language """)
# parser.add_argument('-enc_not_load_state', action='store_true',
# help='only create a Bert Object, not load the state from pytorch modle or fituned model for src')
parser.add_argument('-enc_pretrain_word_dropout', type=float, default=0.0,
help="""word dropout appled on bert""")
parser.add_argument('-enc_pretrain_emb_dropout', type=float, default=0.0,
help="""dropout applied on bert embedding""")
parser.add_argument('-enc_pretrain_attn_dropout', type=float, default=0.1,
help="""dropout on bert attention, corresponds to attention_probs_dropout_prob""")
parser.add_argument('-enc_pretrain_hidden_dropout', type=float, default=0.0,
help="""dropout applied on bert hidden, corresponds to hidden_dropout_prob""")
parser.add_argument('-checkpointing_ffn', action='store_true',
help='use gradient checkpointing on FFN layers')
parser.add_argument('-checkpointing_cross_attn', action='store_true',
help='use gradient checkpointing on Cross Attn layers')
parser.add_argument('-checkpointing_self_attn', action='store_true',
help='use gradient checkpointing on (wav2vec) self attn layers')
# pretrained decoder
parser.add_argument('-dec_pretrained_model', default="", type=str,
help=""" the name of trained model""")
parser.add_argument('-dec_pretrain_hidden_size', type=int, default=768,
help='Size of bert hidden')
parser.add_argument('-dec_config_file', default="", type=str,
help=""" the name of tgt pretrained model configuration.""")
parser.add_argument('-dec_state_dict', default="", type=str,
help=""" the state_dict of the pretrained model""")
parser.add_argument('-dec_pretrain_word_dropout', type=float, default=0.0,
help="""word dropout appled on bert""")
parser.add_argument('-dec_pretrain_emb_dropout', type=float, default=0.1,
help="""dropout applied on bert embedding""")
parser.add_argument('-dec_pretrain_attn_dropout', type=float, default=0.1,
help="""dropout on bert attention, corresponds to attention_probs_dropout_prob""")
parser.add_argument('-dec_pretrain_hidden_dropout', type=float, default=0.1,
help="""dropout applied on bert hidden, corresponds to hidden_dropout_prob""")
parser.add_argument('-dec_gradient_checkpointing', action='store_true',
help='use gradient checkpointing on decoder')
parser.add_argument('-enc_gradient_checkpointing', action='store_true',
help='use gradient checkpointing on encoder')
parser.add_argument('-find_unused_parameters', action='store_true',
help='find unused parameters for torch DistributedDataParallel')
# special tokens
parser.add_argument('-src_pad_word', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-src_unk_word', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_bos_word', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_word', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-tgt_pad_word', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_unk_word', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-tgt_bos_word', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_word', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-rezero', action='store_true',
help='use ReZero residual mechanism')
parser.add_argument('-post_norm', action='store_true',
help='use post-layer norm')
parser.add_argument('-absolute_position_encoding', action='store_true',
help='use absolute position encoding for the Translator')
parser.add_argument('-decoder_late_emb_scale', action='store_true',
help='only scale the embedding very late at the decoder. This option is here'
'to fix the problem of the multilingual model w/ relative position.')
parser.add_argument('-encoder_early_emb_scale', action='store_true',
help='only scale the embedding early in the encoder. This option is here'
'to fix the problem of the multilingual model w/ relative position.')
parser.add_argument('-sa_f', type=int, default=8,
help="""word dropout appled on bert""")
parser.add_argument('-sa_t', type=int, default=64,
help="""word dropout appled on bert""")
parser.add_argument('-no_input_scale', action='store_true',
help='Do not scale the embeddings of the speech (the features) before transformer.')
parser.add_argument('-mpc', action='store_true',
help='Using masked predictive coding for speech models')
parser.add_argument('-load_pretrained_classifier', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model.""")
parser.add_argument('-wav2vec2_pretrained_model', default='wav2vec2-large-lv60', type=str,
help="""Wav2vec2 model from HuggingFace. """)
parser.add_argument('-wav2vec2_quantize', action='store_true',
help='Keep the quantization part of Wav2vec 2.0')
parser.add_argument('-wav2vec2_dual_output', action='store_true',
help='Use both wav2vec quantized and continuous outputs for decoder')
parser.add_argument('-wav2vec2_relative_attention', action='store_true',
help='Add relative attention to Wav2vec 2.0 ')
parser.add_argument('-freeze_encoder', action='store_true',
help='Freeze the whole wav2vec weights.')
parser.add_argument('-freeze_encoder_self_attn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_encoder_ffn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_decoder', action='store_true',
help='Freeze the whole mbart decoder weights.')
parser.add_argument('-freeze_decoder_self_attn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_decoder_ffn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_cross_attention', action='store_true',
help='Freeze the cross attention.')
parser.add_argument('-freeze_embedding', action='store_true',
help='Freeze the embedding.')
parser.add_argument('-virtual_adversarial_training_mode', type=int, default=0,
help='Virtual Adversarial Training. 0=disabled. 1=kl_loss. 2=ce. 3=kl_loss + ce.')
parser.add_argument('-wav2vec_adapter', type=int, default=0,
help='Adapter for wav2vec model')
parser.add_argument('-decoder_adapter', type=int, default=0,
help='Adapter for wav2vec model')
parser.add_argument('-mutual_modality_training', type=float, default=0,
help='Coefficient for the Mutual Modality Training term')
parser.add_argument('-contrastive_loss_coeff', type=float, default=0.0,
help='Coefficient for the Mutual Modality Training term')
parser.add_argument('-predict_language', action='store_true',
help='Freeze the embedding.')
return parser
def backward_compatible(opt):
# FOR BACKWARD COMPATIBILITY
if not hasattr(opt, 'predict_language'):
opt.predict_language = False
if not hasattr(opt, 'model'):
opt.model = 'recurrent'
if not hasattr(opt, 'layer_norm'):
opt.layer_norm = 'slow'
if not hasattr(opt, 'attention_out'):
opt.attention_out = 'default'
if not hasattr(opt, 'residual_type'):
opt.residual_type = 'regular'
if not hasattr(opt, 'input_size'):
opt.input_size = 40
if not hasattr(opt, 'init_embedding'):
opt.init_embedding = 'normal'
if not hasattr(opt, 'ctc_loss'):
opt.ctc_loss = 0
if not hasattr(opt, 'encoder_layers'):
opt.encoder_layers = -1
if not hasattr(opt, 'fusion'):
opt.fusion = False
if not hasattr(opt, 'cnn_downsampling'):
opt.cnn_downsampling = False
if not hasattr(opt, 'switchout'):
opt.switchout = 0.0
if not hasattr(opt, 'variational_dropout'):
opt.variational_dropout = False
if not hasattr(opt, 'copy_generator'):
opt.copy_generator = False
if not hasattr(opt, 'upsampling'):
opt.upsampling = False
if not hasattr(opt, 'double_position'):
opt.double_position = False
if not hasattr(opt, 'max_pos_length'):
opt.max_pos_length = 0
if not hasattr(opt, 'learnable_position_encoding'):
opt.learnable_position_encoding = False
if not hasattr(opt, 'use_language_embedding'):
opt.use_language_embedding = False
if not hasattr(opt, 'language_embedding_type'):
opt.language_embedding_type = "sum"
if not hasattr(opt, 'asynchronous'):
opt.asynchronous = False
if not hasattr(opt, 'bidirectional'):
opt.bidirectional = False
if not hasattr(opt, 'fix_norm_output_embedding'):
opt.fix_norm_output_embedding = False
if not hasattr(opt, 'mirror_loss'):
opt.mirror_loss = False
if not hasattr(opt, 'max_memory_size'):
opt.max_memory_size = 0
if not hasattr(opt, 'stream_context'):
opt.stream_context = 'local'
if not hasattr(opt, 'extra_context_size'):
opt.extra_context_size = 0
if not hasattr(opt, 'experimental'):
opt.experimental = False
if not hasattr(opt, 'reconstruct'):
opt.reconstruct = False
if not hasattr(opt, 'unidirectional'):
opt.unidirectional = False
if not hasattr(opt, 'lsh_src_attention'):
opt.lsh_src_attention = False
if not hasattr(opt, 'src_reversible'):
opt.src_reversible = False
if not hasattr(opt, 'tgt_reversible'):
opt.tgt_reversible = False
if not hasattr(opt, 'fast_xentropy'):
opt.fast_xentropy = False
if not hasattr(opt, 'fast_xattention'):
opt.fast_xattention = False
if not hasattr(opt, 'fast_self_attention'):
opt.fast_self_attention = False
if not hasattr(opt, 'fast_feed_forward'):
opt.fast_feed_forward = False
if not hasattr(opt, 'fused_ffn'):
opt.fused_ffn = False
if not hasattr(opt, 'concat'):
opt.concat = 4
if not hasattr(opt, 'input_feature_size'):
opt.input_feature_size = 40
if not hasattr(opt, 'bayes_by_backprop'):
opt.bayes_by_backprop = False
if not hasattr(opt, 'add_position_encoding'):
opt.add_position_encoding = False
if not hasattr(opt, 'batch_ensemble'):
opt.batch_ensemble = 0
if not hasattr(opt, 'multilingual_factorized_weights'):
opt.multilingual_factorized_weights = False
if not hasattr(opt, 'multilingual_factorized_weights_decoder'):
opt.multilingual_factorized_weights_decoder = False
if not hasattr(opt, 'mfw_rank'):
opt.mfw_rank = 1
if not hasattr(opt, 'mfw_no_bias'):
opt.mfw_no_bias = False
if not hasattr(opt, 'lfv_multilingual'):
opt.lfv_multilingual = False
if not hasattr(opt, 'nce_noise'):
opt.nce_noise = 0
if not hasattr(opt, 'mfw_multiplicative'):
opt.mfw_multiplicative = False
if not hasattr(opt, 'fast_factorize'):
opt.fast_factorize = False
if not hasattr(opt, 'macaron'):
opt.macaron = False
if not hasattr(opt, 'depthwise_conv'):
opt.depthwise_conv = False
if not hasattr(opt, 'fused_ffn'):
opt.fused_ffn = False
if not hasattr(opt, 'no_batch_norm'):
opt.no_batch_norm = False
if not hasattr(opt, 'no_ffn'):
opt.no_ffn = False
if not hasattr(opt, 'multilingual_partitioned_weights'):
opt.multilingual_partitioned_weights = False
if not hasattr(opt, 'mpw_factor_size'):
opt.mpw_factor_size = 1
if not hasattr(opt, 'multilingual_layer_norm'):
opt.multilingual_layer_norm = False
if not hasattr(opt, 'multilingual_linear_projection'):
opt.multilingual_linear_projection = False
if not hasattr(opt, 'weight_drop'):
opt.weight_drop = 0.0
if not hasattr(opt, 'multilingual_adapter'):
opt.multilingual_adapter = False
if not hasattr(opt, 'adapter_bottleneck_size'):
opt.adapter_bottleneck_size = 0.0
if not hasattr(opt, 'mfw_activation'):
opt.mfw_activation = "none"
if not hasattr(opt, 'src_pad_word'):
opt.src_pad_word = '<blank>'
if not hasattr(opt, 'src_unk_word'):
opt.src_unk_word = '<unk>'
if not hasattr(opt, 'src_bos_word'):
opt.src_bos_word = '<s>'
if not hasattr(opt, 'src_eos_word'):
opt.src_eos_word = '</s>'
if not hasattr(opt, 'tgt_pad_word'):
opt.tgt_pad_word = '<blank>'
if not hasattr(opt, 'tgt_unk_word'):
opt.tgt_unk_word = '<unk>'
if not hasattr(opt, 'tgt_bos_word'):
opt.tgt_bos_word = '<s>'
if not hasattr(opt, 'tgt_eos_word'):
opt.tgt_eos_word = '</s>'
if not hasattr(opt, 'enc_pretrained_model'):
opt.enc_pretrained_model = ''
if not hasattr(opt, 'dec_pretrained_model'):
opt.dec_pretrained_model = ''
if not hasattr(opt, 'rezero'):
opt.rezero = False
if not hasattr(opt, 'sa_f'):
opt.sa_f = 8
if not hasattr(opt, 'sa_t'):
opt.sa_t = 64
if not hasattr(opt, 'ffn_activation'):
opt.ffn_activation = 'relu'
if not hasattr(opt, 'ffn_glu'):
opt.ffn_glu = False
if not hasattr(opt, 'absolute_position_encoding'):
opt.absolute_position_encoding = False
if not hasattr(opt, 'rotary_position_encoding'):
opt.rotary_position_encoding = False
if not hasattr(opt, 'decoder_late_emb_scale'):
opt.decoder_late_emb_scale = False
if not hasattr(opt, 'encoder_early_emb_scale'):
opt.encoder_early_emb_scale = False
if not hasattr(opt, 'no_input_scale'):
opt.no_input_scale = False
if not hasattr(opt, 'stochastic_sublayer'):
opt.stochastic_sublayer = False
if not hasattr(opt, 'ffn_dropout'):
opt.ffn_dropout = opt.dropout
if not hasattr(opt, 'residual_dropout'):
opt.residual_dropout = opt.dropout
if not hasattr(opt, 'post_norm'):
opt.post_norm = False
if not hasattr(opt, 'favor_attention'):
opt.favor_attention = False
if not hasattr(opt, 'wav2vec_spec_augment'):
opt.wav2vec_spec_augment = False
if not hasattr(opt, 'wav2vec_adapter'):
opt.wav2vec_adapter = 0
if not hasattr(opt, 'wav2vec2_quantize'):
opt.wav2vec2_quantize = False
if not hasattr(opt, 'wav2vec2_dual_output'):
opt.wav2vec2_dual_output = False
if not hasattr(opt, 'decoder_adapter'):
opt.decoder_adapter = 0
if not hasattr(opt, 'freeze_encoder'):
opt.freeze_encoder = False
if not hasattr(opt, 'freeze_embedding'):
opt.freeze_embedding = False
if not hasattr(opt, 'freeze_decoder'):
opt.freeze_decoder = False
if not hasattr(opt, 'freeze_cross_attention'):
opt.freeze_cross_attention = False
if not hasattr(opt, 'enc_stacked_pretrained_model'):
opt.enc_stacked_pretrained_model = ""
if not hasattr(opt, 'mfw_atb_rank_scale'):
opt.mfw_atb_rank_scale = 0.125
if not hasattr(opt, 'wav2vec2_relative_attention'):
opt.wav2vec2_relative_attention = False
return opt | 43,910 | 49.822917 | 149 | py |
NMTGMinor | NMTGMinor-master/extend_weight.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import os, sys
from onmt.model_factory import build_model, build_language_model, build_classifier, optimize_model
from copy import deepcopy
from onmt.utils import checkpoint_paths, normalize_gradients
import glob
import torch.nn as nn
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-n_languages', type=int, default=10,
help="Device to run on")
def custom_build_model(opt, dict, lm=False, type='seq2seq'):
if type == 'seq2seq':
if not lm:
model = build_model(opt, dict)
else:
model = build_language_model(opt, dict)
elif type == 'classifier':
model = build_classifier(opt, dict)
optimize_model(model)
return model
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# checkpoint for main model
checkpoint = torch.load(opt.model, map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
# extending the weights
def is_factorize_params(p_name):
# feed forward neural net
if p_name.endswith(".r_i") or p_name.endswith(".s_i") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_p") or p_name.endswith(".s_p"):
return True
# if p_name.endswith(".sub_r_i") or p_name.endswith(".sub_s_i") \
# or p_name.endswith(".sub_r_o") or p_name.endswith(".sub_s_o") \
# or p_name.endswith(".sub_r_p") or p_name.endswith(".sub_s_p"):
# return True
if p_name.endswith(".rm_i") or p_name.endswith(".sm_i") or \
p_name.endswith(".rm_o") or p_name.endswith(".sm_o") or \
p_name.endswith(".rm_p") or p_name.endswith(".sm_p"):
return True
if p_name.endswith(".r_q") or p_name.endswith(".s_q") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_kv") or p_name.endswith(".s_kv"):
return True
if p_name.endswith(".rm_q") or p_name.endswith(".sm_q") \
or p_name.endswith(".rm_o") or p_name.endswith(".sm_o") \
or p_name.endswith(".rm_kv") or p_name.endswith(".sm_kv"):
return True
return False
# Saving
model_state_dict = checkpoint['model']
for name in model_state_dict:
if is_factorize_params(name):
param = model_state_dict[name]
sizes = list(param.size())
print(name)
# initialize it
if name.endswith("r_i") or name.endswith("r_o") or name.endswith("r_kv") or name.endswith("r_q") or name.endswith("r_p") or \
name.endswith("s_i") or name.endswith("s_o") or name.endswith("s_kv") or name.endswith("s_q") or name.endswith(
"s_p"):
std = 0.02
prev_n_languages = sizes[0]
sizes[0] = max(opt.n_languages, sizes[0])
# new parameter
p = param.new_zeros(sizes)
nn.init.normal_(p, 0.0, std)
p[0:prev_n_languages].copy_(param)
elif name.endswith("rm_i") or name.endswith("rm_o") or name.endswith("rm_kv") or name.endswith("rm_q") or name.endswith("rm_p") or \
name.endswith("sm_i") or name.endswith("sm_o") or name.endswith("sm_kv") or name.endswith("sm_q") or name.endswith(
"sm_p"):
rank = sizes[1]
fast = (sizes[0] > 1)
prev_n_languages = sizes[0]
if fast:
# new parameter
sizes[0] = max(opt.n_languages, sizes[0])
p = param.new_zeros(sizes)
else:
sizes[0] = 1
p = param.new_zeros(sizes)
sizes[0] = 1
constant = math.sqrt(1.0 / rank) if fast else 1
nn.init.constant_(p, constant)
if fast:
p[0:prev_n_languages].copy_(param)
else:
p.copy_(param)
model_state_dict[name] = p
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration': -1,
'batchOrder': None,
'optim': None
}
output = opt.model + ".extend" + str(opt.n_languages)
print("Saving averaged model to %s" % output)
torch.save(save_checkpoint, output)
if __name__ == "__main__":
main()
| 5,225 | 31.259259 | 144 | py |
NMTGMinor | NMTGMinor-master/preprocess_triangle.py | #!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import numpy as np
import warnings
import os
from os.path import dirname, abspath
import gc
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-multi_dataset', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-multi_mirror', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-resume', action='store_true',
help="If the dataset is created, ignored and create the next one")
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-aux_train_tgt', default="",
help="Path to the training source data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-aux_valid_tgt', default="",
help="Path to the training source data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-train_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def save_dataset(path, data, format, dicts, src_type):
# Each dataset is comprised of the following components:
# src: tensors for the source vectors, or the scp_path (in ASR case)
# tgt: tensors for the target vectors
# src_lang: tensors for the source language ids (simplified)
# tgt_lang: tensors for the target language ids (simplified)
# convert all datasets to pytorch tensors and save to .pt
if format in ['raw', 'bin']:
print('Saving data to ' + os.path.join(path, 'data.pt') + '...')
save_data = {'type': opt.src_type ,
'data': data}
torch.save(save_data, os.path.join(path, 'data.pt'))
print("Done")
# for ASR only
elif format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
# TODO: changing this to before saving everything
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'aux_tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy") % set_, np_array)
else:
print("Training %s not found " % set_)
# Finally save the audio path
torch.save(data['src'], os.path.join(path, 'data.scp_path.pt'))
if 'prev_src' in data and data['prev_src'] is not None:
torch.save(data['prev_src'], os.path.join(path, 'data.prev_scp_path.pt'))
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy" % set_), np_array)
else:
print("Set %s not found " % set_)
def make_lm_data(tgt_file, tgt_dicts, max_tgt_length=1000, input_type='word', data_type='int32'):
tgt = []
sizes = []
count, ignored = 0, 0
print('Processing %s ...' % (tgt_file))
tgtf = open(tgt_file)
eos = torch.LongTensor(1).fill_(opt.tgt_eos_token)
# print(eos.size())
tensors = [eos]
# find the number of words in the sentence
while True:
tline = tgtf.readline()
# normal end of file
if tline == "":
break
tline = tline.strip()
# source and/or target are empty
if tline == "":
print('WARNING: ignoring an empty line (' + str(count + 1) + ')')
continue
if input_type == 'word':
tgt_words = tline.split()
elif input_type == 'char':
tgt_words = split_line_by_char(tline)
tensor = tgt_dicts.convertToIdx(tgt_words,
opt.tgt_unk_token,
None,
opt.tgt_eos_token,
type=data_type)
# print(tensor.size())
tensors.append(tensor)
count = count + 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
tgtf.close()
# concatenate all tensors into one
tensor = torch.cat(tensors, dim=-1)
return tensor
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False,
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[],
early_save=False, savedir="", mirror=False, mirror_savedir=""):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
if type(lang_list) is dict:
lang_list = sorted(list(lang_list.keys()))
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=src_lang, lang_list=lang_list, target=False
)
if early_save:
os.makedirs(savedir, exist_ok=True)
if mirror:
os.makedirs(mirror_savedir, exist_ok=True)
src_len = len(binarized_src['data'])
print("Saving source data to %s .... with %d entries" % (savedir, src_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "src"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_src['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "src"))
del binarized_src['data']
gc.collect()
np_array = np.asarray(binarized_src['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "src_sizes"), np_array)
del binarized_src
del indexed_data
del np_array
gc.collect()
if mirror:
print("Saving mirrrored target data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "src")
target = os.path.join(mirror_savedir, "data.%s.bin" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "src")
target = os.path.join(mirror_savedir, "data.%s.idx" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "src_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "tgt_sizes")
os.symlink(os.path.abspath(source), target)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True
)
if early_save:
tgt_len = len(binarized_tgt['data'])
assert tgt_len == src_len, "Number of samples doesn't match between source and target!!!"
print("Saving target data to %s .... with %d samples" % (savedir, tgt_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "tgt"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_tgt['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "tgt"))
del binarized_tgt['data']
del indexed_data
gc.collect()
np_array = np.asarray(binarized_tgt['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "tgt_sizes"), np_array)
del binarized_tgt
del np_array
gc.collect()
if mirror:
print("Saving mirrrored source data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.bin" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.idx" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "tgt_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "src_sizes")
os.symlink(os.path.abspath(source), target)
src, tgt, src_sizes, tgt_sizes = None, None, None, None
else:
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="scp", output_format="raw",
external_tokenizer=None, src_lang=None, tgt_lang=None,aux_tgt_file=None, lang_list=[]):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
else:
tgt = None
tgt_sizes = None
if aux_tgt_file is not None:
aux_tgt = []
print("[INFO] Binarizing auxiliary target file %s ..." % aux_tgt_file)
aux_binarized_tgt = Binarizer.binarize_file(aux_tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list)
aux_tgt = aux_binarized_tgt['data']
aux_tgt_sizes = aux_binarized_tgt['sizes']
ignored = 0
else:
aux_tgt = None
aux_tgt_sizes = None
print('[INFO] Processing %s ...' % src_file)
# num_workers = num_workers if asr_format in ['scp', 'kaldi'] else 1
# speech binarizer has to be 1 thread at the moment
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if len(src_sizes) != len(tgt_sizes) and tgt_file is not None:
print("Warning: data size mismatched. Src: %d . Tgt: %d" % len(src_sizes), len(tgt_sizes))
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes, aux_tgt, aux_tgt_sizes
def main():
dicts = {}
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict is not None and len(opt.load_dict) > 0:
print("[INFO] Loading dictionary from ... %s" % opt.load_dict)
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
langs = (src_langs + tgt_langs)
langs = sorted(list(set(langs)))
if len (opt.train_src_atbs) > 0:
src_atbs = opt.train_src_atbs.split("|")
tgt_atbs = opt.train_tgt_atbs.split("|")
atbs = (src_atbs + tgt_atbs)
atbs = sorted(list(set(atbs)))
else:
atbs = []
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
dicts['atbs'] = dict()
for atb in atbs:
idx = len(dicts['atbs'])
dicts['atbs'][atb] = idx
else:
if 'langs' not in dicts:
dicts['langs'] = dict()
else:
print(dicts['langs'])
print("Adding languages to existing dictionary ...")
for lang in langs:
idx = len(dicts['langs'])
if lang not in dicts['langs']:
dicts['langs'][lang] = idx
if 'atbs' not in dicts:
dicts['atbs'] = dict()
else:
print("Adding attributes to existing dictionary ...")
for atb in atbs:
idx = len(dicts['atbs'])
if atb not in dicts['atbs']:
dicts['atbs'][atb] = idx
print("Languages: ", dicts['langs'])
print("Attributes: ", dicts['atbs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# for ASR and LM we only need to build vocab for the 'target' language
if opt.asr or opt.lm:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elif opt.join_vocab:
dicts['src'] = init_vocab('source', set(src_train_files + tgt_train_files), opt.src_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = dicts['src']
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.lm:
print('Preparing training language model ...')
train = dict()
train['tgt'] = make_lm_data(opt.train_tgt,
dicts['tgt'])
train['src'] = None
valid = dict()
valid['tgt'] = make_lm_data(opt.valid_tgt,
dicts['tgt'])
valid['src'] = None
train['src_sizes'] = None
train['tgt_sizes'] = None
valid['src_sizes'] = None
valid['tgt_sizes'] = None
elif opt.asr:
print('Preparing training acoustic model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
src_atbs = opt.train_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.train_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(src_atbs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
assert len(tgt_input_files) == len(tgt_atbs)
past_src_files = opt.past_train_src.split("|")
idx = 0
n_input_files = len(src_input_files)
# Training data ###################################################################
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_atb'], train['tgt_atb'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
data = dict()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "train.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
src_atb_data, tgt_atb_data = None, None
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
add_bos=not opt.no_bos,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
# Finalizing Training data ###################################################################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving training set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "train.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
train['src_atb'] += src_atb_data
train['tgt_atb'] += tgt_atb_data
# Validation data ###################################################################
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
src_atbs = opt.valid_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.valid_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
idx = 0
n_input_files = len(src_input_files)
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
valid['src_atb'], valid['tgt_atb'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "valid.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
# Finalizing Validation data ... #########################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
valid['src_atb'] += src_atb_data
valid['tgt_atb'] += tgt_atb_data
else:
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
idx = 0
data = dict()
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
start = time.time()
print('Binarizing data to train translation models...')
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
dataset_idx = idx if not opt.multi_mirror else 2 * idx
data_name = "train.%i.%s-%s" % (dataset_idx , src_lang, tgt_lang)
mirrored_data_name = "train.%i.%s-%s" % (dataset_idx + 1 , tgt_lang, src_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
mirrored_dataset_path = os.path.join(dirname(opt.save_data), mirrored_data_name)
if opt.multi_dataset:
if opt.resume and os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
else:
os.makedirs(dataset_path, exist_ok=True)
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'],
early_save=opt.multi_dataset,
savedir=dataset_path,
mirror=opt.multi_mirror,
mirror_savedir=mirrored_dataset_path)
#TODO: check
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
assert src_data is not None
n_samples = len(src_data)
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
if opt.multi_mirror:
mdata = dict()
mdata['src'] = tgt_data
mdata['tgt'] = src_data
mdata['tgt_sizes'] = src_sizes
mdata['src_sizes'] = tgt_sizes
mdata['tgt_lang'] = src_lang_data
mdata['src_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx + 1, tgt_lang, src_lang))
# take basedir from opt.save_data
path = mirrored_dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, mdata, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
idx = 0
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs']
)
n_samples = len(src_data)
#TODO: this has to be changed
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
if opt.multi_dataset:
# SAVE DATA
print("Saving dictionary to %s" % (opt.save_data + '.dict.pt'))
torch.save(dicts, opt.save_data + '.dict.pt')
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
print("Finished.")
else:
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'past_src']:
if set_ not in train or train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if set_ not in train or train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
| 64,908 | 43.397401 | 119 | py |
NMTGMinor | NMTGMinor-master/extract_wav2vec2_tdnn.py | #!/usr/bin/env python
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
from torch.cuda.amp import autocast
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-scp_output', default='output.scp',
help="""Path to output the feature paths""")
parser.add_argument('-ark_output', default='output.ark',
help="""Path to output the features""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size (in audio samples)')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def write_ark(utts, features, padding_mask, out_ark, out_scp, opt):
# cache_wav = ''
features = features.cpu()
bsz, seq_len, feat_size = features.size()
lengths = (1 - padding_mask).sum(dim=1)
assert len(utts) == bsz
for i in range(bsz):
feature_ = features[i, 0:lengths[i]]
feature_ = feature_.numpy()
# if opt.fp16:
# feature_ = feature_.astype(np.float16)
seg_name = utts[i]
dic = {seg_name: feature_}
from onmt.data.kaldiio.io import write_ark_file
write_ark_file(out_ark, out_scp, dic)
def build_data(src_sents):
from onmt.data.wav_dataset import WavDataset
src_data = src_sents
data_type = 'wav'
tgt_data = None
src_lang_data = [torch.Tensor([0])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=False,
past_src_data=None)
if __name__ == '__main__':
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2VecExtractor
model = FairseqWav2VecExtractor(opt.model)
# if opt.fp16:
# model = model.half()
if opt.cuda:
model = model.cuda()
model.eval()
ark_out = open(opt.ark_output, 'wb')
scp_out = open(opt.scp_output, 'w')
audio_data = open(opt.src)
from onmt.utils import safe_readaudio
i = 0
n_models = len(opt.model.split("|"))
src_batch = list()
src_utts = list()
while True:
try:
line = next(audio_data).strip().split()
utt = line[0]
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
except StopIteration:
break
src_length = line.size(0)
"""
Read features output from wav2vec model and write into scp/ark file just like Kaldi w/ logmel features
"""
if _is_oversized(src_batch, src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batch))
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
write_ark(src_utts, features, padding_mask, ark_out, scp_out, opt)
src_batch = []
src_utts = []
src_batch.append(line)
src_utts.append(utt)
# catch the last batch
if len(src_batch) != 0:
print("Batch sizes :", len(src_batch), )
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
write_ark(src_utts, features, padding_mask, ark_out, scp_out, opt)
src_batch = []
src_utts = []
ark_out.close()
scp_out.close()
| 7,353 | 32.733945 | 119 | py |
NMTGMinor | NMTGMinor-master/train_distributed.py | #!/usr/bin/env python
from __future__ import division
import pickle
import types
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import time, datetime
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
from onmt.data.wav_dataset import WavDataset
from options import make_parser
from collections import defaultdict
from onmt.constants import add_tokenidx
import os
import numpy as np
import warnings
import dill
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager, NamespaceProxy
from torch.multiprocessing import Pool, Process, set_start_method
def pickle_trick(obj, max_depth=10):
output = {}
if max_depth <= 0:
return output
try:
pickle.dumps(obj)
except (pickle.PicklingError, TypeError) as e:
failing_children = []
if hasattr(obj, "__dict__"):
for k, v in obj.__dict__.items():
result = pickle_trick(v, max_depth=max_depth - 1)
if result:
failing_children.append(result)
output = {
"fail": obj,
"err": e,
"depth": max_depth,
"failing_children": failing_children
}
return output
Dataset = onmt.Dataset
#
# class MyManager(BaseManager):
# pass
#
#
# class MMapIndexedDatasetProxy(NamespaceProxy):
# _exposed_ = tuple(dir(MMapIndexedDataset))
#
# def __getattr__(self, name):
# result = super().__getattr__(name)
# if isinstance(result, types.MethodType):
# def wrapper(*args, **kwargs):
# return self._callmethod(name, args, kwargs) # Note the return here
# return wrapper
# return result
#
# def __len__(self):
# callmethod = object.__getattribute__(self, '_callmethod')
# return callmethod('__len__')
#
# def __getitem__(self, index):
# callmethod = object.__getattribute__(self, '_callmethod')
# return callmethod('__getitem__',(index,))
#
#
# MyManager.register('MMapIndexedDataset', MMapIndexedDataset, MMapIndexedDatasetProxy)
#
def numpy_to_torch(tensor_list):
out_list = list()
for tensor in tensor_list:
if isinstance(tensor, np.ndarray):
out_list.append(torch.from_numpy(tensor))
else:
out_list.append(tensor)
return out_list
def run_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants):
"""
Launch training for normal sequence2sequence models
Args:
gpu:
train_data:
valid_data:
dicts:
opt:
checkpoint:
constants:
Returns:
"""
from onmt.train_utils.mp_trainer import Trainer
trainer = Trainer(gpu, dicts, opt, constants)
trainer.run(checkpoint=checkpoint, train_data=train_data, valid_data=valid_data)
def run_gem_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants):
"""
Launch training for Gradient Episodic Memory
Args:
gpu:
train_data:
valid_data:
dicts:
opt:
checkpoint:
constants:
Returns:
"""
from onmt.train_utils.gem_trainer import GEMTrainer
trainer = GEMTrainer(gpu, train_data, valid_data, dicts, opt, constants)
trainer.run(checkpoint=checkpoint)
def main(gpu, opt):
def lprint(*args, **kwargs):
if gpu == 0:
print(*args, **kwargs, flush=True)
# manager = MyManager()
# manager.start()
if not opt.multi_dataset:
if opt.data_format in ['bin', 'raw']:
start = time.time()
if opt.data.endswith(".train.pt"):
lprint("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
lprint("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
lprint("Done after %s" % elapse)
dicts = dataset['dicts']
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if train_dict['src_atb'] is not None:
assert 'atbs' in dicts
train_src_atbs = train_dict['src_atb']
train_tgt_atbs = train_dict['tgt_atb']
else:
# allocate new languages
dicts['atbs'] = {'nothingness': 0}
train_src_atbs = list()
train_tgt_atbs = list()
train_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
train_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
if not opt.streaming:
train_data = onmt.Dataset(numpy_to_torch(train_dict['src']), numpy_to_torch(train_dict['tgt']),
train_dict['src_sizes'], train_dict['tgt_sizes'],
train_src_langs, train_tgt_langs,
train_src_atbs, train_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=dataset.get("type", "text"), sorting=True, cleaning=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
input_size=opt.input_size,
upsampling=opt.upsampling,
num_split=1,
constants=onmt.constants)
else:
train_data = onmt.StreamDataset(train_dict['src'], train_dict['tgt'],
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech,
upsampling=opt.upsampling)
dicts['tgt_pad'] = train_data.tgt_pad
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if valid_dict['src_atb'] is not None:
assert 'atbs' in dicts
valid_src_atbs = valid_dict['src_atb']
valid_tgt_atbs = valid_dict['tgt_atb']
else:
# allocate new languages
valid_src_atbs = list()
valid_tgt_atbs = list()
valid_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
valid_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
if not opt.streaming:
valid_data = onmt.Dataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_dict['src_sizes'], valid_dict['tgt_sizes'],
valid_src_langs, valid_tgt_langs,
valid_src_atbs, valid_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling,
input_size=opt.input_size,
constants=onmt.constants)
else:
valid_data = onmt.StreamDataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
upsampling=opt.upsampling)
lprint(' * number of training sentences. %d' % len(dataset['train']['src']))
lprint(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
# Loading asr data structures
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
lprint("Loading memory mapped data files ....")
start = time.time()
from onmt.data.scp_dataset import SCPIndexDataset
dicts = torch.load(opt.data + ".dict.pt")
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(opt.data + ".scp_path.pt")
elif opt.data_format in ['wav']:
audio_data = torch.load(opt.data + ".wav_path.pt")
# # TODO: maybe having another option like -past_context
# if os.path.exists(opt.data + '.prev_src_path.pt'):
# prev_audio_data = torch.load(opt.data + '.prev_src_path.pt')
# else:
# prev_audio_data = None
# allocate languages if not
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
else:
lprint(dicts['langs'])
train_path = opt.data + '.train'
if opt.data_format in ['scp', 'scpmem']:
train_src = SCPIndexDataset(audio_data['train'], concat=opt.concat)
if 'train_past' in audio_data:
past_train_src = SCPIndexDataset(audio_data['train_past'],
concat=opt.concat, shared_object=train_src)
else:
past_train_src = None
elif opt.data_format in ['wav']:
train_src = WavDataset(audio_data['train'], cache_size=opt.data_cache_size)
past_train_src = None
else:
train_src = MMapIndexedDataset(train_path + '.src')
past_train_src = None
train_tgt = MMapIndexedDataset(train_path + '.tgt')
# check the lang files if they exist (in the case of multi-lingual models)
if os.path.exists(train_path + '.src_lang.bin'):
assert 'langs' in dicts
train_src_langs = MMapIndexedDataset(train_path + '.src_lang')
train_tgt_langs = MMapIndexedDataset(train_path + '.tgt_lang')
else:
train_src_langs = list()
train_tgt_langs = list()
# Allocate a Tensor(1) for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if os.path.exists(train_path + '.src_atb.bin'):
assert 'atbs' in dicts
train_src_atbs = MMapIndexedDataset(train_path + '.src_atb')
train_tgt_atbs = MMapIndexedDataset(train_path + '.tgt_atb')
else:
dicts['atbs'] = {'nothingness': 0}
train_src_atbs = list()
train_tgt_atbs = list()
train_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
train_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
# check the length files if they exist
if os.path.exists(train_path + '.src_sizes.npy'):
train_src_sizes = np.load(train_path + '.src_sizes.npy')
train_tgt_sizes = np.load(train_path + '.tgt_sizes.npy')
else:
train_src_sizes, train_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(train_path + '.past_src_sizes.npy'):
past_train_src_sizes = np.load(train_path + '.past_src_sizes.npy')
else:
past_train_src_sizes = None
if opt.data_format in ['scp', 'scpmem']:
data_type = 'audio'
elif opt.data_format in ['wav']:
data_type = 'wav'
else:
data_type = 'text'
if not opt.streaming:
train_data = onmt.Dataset(train_src,
train_tgt,
train_src_sizes, train_tgt_sizes,
train_src_langs, train_tgt_langs,
train_src_atbs, train_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
input_size=opt.input_size,
past_src_data=past_train_src,
past_src_data_sizes=past_train_src_sizes,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
constants=onmt.constants)
else:
train_data = onmt.StreamDataset(train_src,
train_tgt,
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=False,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling)
dicts['tgt_pad'] = train_data.tgt_pad
valid_path = opt.data + '.valid'
if opt.data_format in ['scp', 'scpmem']:
valid_src = SCPIndexDataset(audio_data['valid'], concat=opt.concat)
if 'valid_past' in audio_data:
past_valid_src = SCPIndexDataset(audio_data['valid_past'],
concat=opt.concat, shared_object=valid_src)
else:
past_valid_src = None
elif opt.data_format in ['wav']:
valid_src = WavDataset(audio_data['valid'], cache_size=opt.data_cache_size)
past_valid_src = None
else:
valid_src = MMapIndexedDataset(valid_path + '.src')
past_valid_src = None
valid_tgt = MMapIndexedDataset(valid_path + '.tgt')
if os.path.exists(valid_path + '.src_lang.bin'):
assert 'langs' in dicts
valid_src_langs = MMapIndexedDataset(valid_path + '.src_lang')
valid_tgt_langs = MMapIndexedDataset(valid_path + '.tgt_lang')
else:
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if os.path.exists(valid_path + '.src_atb.bin'):
assert 'atbs' in dicts
valid_src_atbs = MMapIndexedDataset(valid_path + '.src_atb')
valid_tgt_atbs = MMapIndexedDataset(valid_path + '.tgt_atb')
else:
valid_src_atbs = list()
valid_tgt_atbs = list()
valid_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
valid_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
# check the length files if they exist
if os.path.exists(valid_path + '.src_sizes.npy'):
valid_src_sizes = np.load(valid_path + '.src_sizes.npy')
valid_tgt_sizes = np.load(valid_path + '.tgt_sizes.npy')
else:
valid_src_sizes, valid_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(valid_path + '.past_src_sizes.npy'):
past_valid_src_sizes = np.load(valid_path + '.past_src_sizes.npy')
else:
past_valid_src_sizes = None
if not opt.streaming:
valid_data = onmt.Dataset(valid_src, valid_tgt,
valid_src_sizes, valid_tgt_sizes,
valid_src_langs, valid_tgt_langs,
valid_src_atbs, valid_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
input_size=opt.input_size,
batch_size_sents=opt.batch_size_sents,
cleaning=True, verbose=True, debug=True,
past_src_data=past_valid_src,
past_src_data_sizes=past_valid_src_sizes,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
min_src_len=1, min_tgt_len=3,
constants=onmt.constants)
else:
# for validation data, we have to go through sentences (very slow but to ensure correctness)
valid_data = onmt.StreamDataset(valid_src, valid_tgt,
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
lprint("Done after %s" % elapse)
else:
raise NotImplementedError
lprint(' * number of sentences in training data: %d' % train_data.size())
lprint(' * number of sentences in validation data: %d' % valid_data.size())
# Multi-data set handling
else:
lprint("[INFO] Reading multiple dataset ...")
dicts = torch.load(opt.data + ".dict.pt")
lprint("Languages: ", dicts['langs'])
if 'atbs' not in dicts or len(dicts['atbs']) == 0: # backward compatible
dicts['atbs'] = {'nothingness': 0}
lprint("Atributes: ", dicts['atbs'])
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
root_dir = os.path.dirname(opt.data)
lprint("Loading training data ...")
train_dirs, valid_dirs = dict(), dict()
# scan the data directory to find the training data
for dir_ in os.listdir(root_dir):
if os.path.isdir(os.path.join(root_dir, dir_)):
if str(dir_).startswith("train"):
idx = int(dir_.split(".")[1])
train_dirs[idx] = dir_
if dir_.startswith("valid"):
idx = int(dir_.split(".")[1])
valid_dirs[idx] = dir_
train_sets, valid_sets = list(), list()
c = 0
for (idx_, dir_) in sorted(train_dirs.items()):
c += 1
data_dir = os.path.join(root_dir, dir_)
lprint("[INFO] Loading training data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data, cache_size=opt.data_cache_size)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_atb.bin')):
src_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_atb'))
tgt_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_atb'))
else:
src_atbs_data = list()
tgt_atbs_data = list()
src_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
tgt_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type in ['audio', 'wav2vec2_scp']:
data_type = 'audio'
elif opt.encoder_type == 'wav2vec2':
data_type = 'wav'
else:
data_type = 'text'
if not opt.streaming:
constants = dill.dumps(onmt.constants)
train_data = onmt.Dataset(src_data,
tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
src_atbs_data, tgt_atbs_data,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
input_size=opt.input_size,
constants=constants)
if c == 1:
dicts['tgt_pad'] = train_data.get_tgt_pad()
del src_sizes, tgt_sizes, src_data, tgt_data, src_lang_data, tgt_lang_data
train_sets.append(train_data)
else:
lprint("Multi-dataset not implemented for Streaming tasks.")
raise NotImplementedError
for (idx_, dir_) in sorted(valid_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
lprint("[INFO] Loading validation data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data, cache_size=opt.data_cache_size)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
# load data attributes
if os.path.exists(os.path.join(data_dir, 'data.src_atb.bin')):
src_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_atb'))
tgt_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_atb'))
else:
src_atbs_data = list()
tgt_atbs_data = list()
src_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
tgt_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
# load data size
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type in ['audio', 'wav2vec2_scp']:
data_type = 'audio'
elif opt.encoder_type == 'wav2vec2':
data_type = 'wav'
else:
data_type = 'text'
if not opt.streaming:
constants = dill.dumps(onmt.constants)
valid_data = onmt.Dataset(src_data, tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
src_atbs_data, tgt_atbs_data,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
min_src_len=1, min_tgt_len=3,
input_size=opt.input_size,
cleaning=True, verbose=True,
constants=constants)
valid_sets.append(valid_data)
else:
raise NotImplementedError
train_data = train_sets
valid_data = valid_sets
if opt.load_from and not opt.reset_optim:
lprint("Loading checkpoint: ", opt.load_from)
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
lprint("* Loading dictionaries from the checkpoint")
del checkpoint['model']
del checkpoint['optim']
if opt.override_dict_from_checkpoint:
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
if "src" in dicts:
lprint(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
lprint(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
os.environ['MASTER_ADDR'] = opt.master_addr # default 'localhost'
os.environ['MASTER_PORT'] = opt.master_port # default '8888'
# spawn N processes for N gpus
# each process has a different trainer
constants = dill.dumps(onmt.constants)
if opt.gem_training:
# if len(opt.gpus) > 1:
# # torch.multiprocessing.spawn(run_gem_process, nprocs=len(opt.gpus),
# # args=(train_data, valid_data, dicts, opt, checkpoint, constants))
#
# torch.multiprocessing.spawn(run_gem_process, nprocs=len(opt.gpus),
# args=(train_data, valid_data, dicts, opt, checkpoint, constants))
# else:
run_gem_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants)
else:
run_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants)
# torch.multiprocessing.spawn(run_process, nprocs=len(opt.gpus),
# args=(train_data, valid_data, dicts, opt, checkpoint, constants),
# start_method='fork')
if __name__ == "__main__":
warnings.filterwarnings("ignore", message="The given NumPy array is not writeable ")
torch.multiprocessing.set_sharing_strategy('file_system')
parser = argparse.ArgumentParser(description='train_distributed.py')
onmt.markdown.add_md_help_argument(parser)
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
if len(opt.gpus) == 1:
main(0, opt)
else:
torch.multiprocessing.spawn(main, args=(opt, ),
nprocs=len(opt.gpus))
| 33,419 | 44.345997 | 117 | py |
NMTGMinor | NMTGMinor-master/train_language_model.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
from onmt.train_utils.trainer import XETrainer
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.model_factory import build_language_model, optimize_model
from onmt.data.lm_dataset import LanguageModelDataset
from collections import defaultdict
parser = argparse.ArgumentParser(description='train.py')
onmt.markdown.add_md_help_argument(parser)
from options import make_parser
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
print(opt)
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def main():
start = time.time()
print("Loading data from '%s'" % opt.data)
if opt.data_format == 'raw':
dataset = torch.load(opt.data)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
dicts = dataset['dicts']
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
train_data = LanguageModelDataset(
dataset['train']['tgt'], train_tgt_langs,
batch_size_sents=opt.batch_size_sents,
seq_length=opt.lm_seq_length)
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
valid_data = LanguageModelDataset(
dataset['valid']['tgt'], valid_tgt_langs,
batch_size_sents=opt.batch_size_sents,
seq_length=opt.lm_seq_length)
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
print("* Loading dictionaries from the checkpoint")
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
if "src" in dicts:
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
print(' * number of training sentences. %d' %
train_data.size())
print(' * maximum batch size (words per batch). %d' % (opt.batch_size_sents * opt.lm_seq_length))
else:
raise NotImplementedError
print('Building model...')
model = build_language_model(opt, dicts)
optimize_model(model)
""" Building the loss function """
loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(), label_smoothing=opt.label_smoothing)
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
if len(opt.gpus) > 1 or opt.virtual_gpu > 1:
raise NotImplementedError("Multi-GPU training is not supported ATM.")
else:
trainer = XETrainer(model, loss_function, train_data, valid_data, dicts, opt)
trainer.run(checkpoint=checkpoint)
if __name__ == "__main__":
main()
| 4,807 | 32.158621 | 105 | py |
NMTGMinor | NMTGMinor-master/preprocess.py | #!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import numpy as np
import warnings
import os
from os.path import dirname, abspath
import gc
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-multi_dataset', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-multi_mirror', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-resume', action='store_true',
help="If the dataset is created, ignored and create the next one")
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-train_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def save_dataset(path, data, format, dicts, src_type):
# Each dataset is comprised of the following components:
# src: tensors for the source vectors, or the scp_path (in ASR case)
# tgt: tensors for the target vectors
# src_lang: tensors for the source language ids (simplified)
# tgt_lang: tensors for the target language ids (simplified)
# convert all datasets to pytorch tensors and save to .pt
if format in ['raw', 'bin']:
print('Saving data to ' + os.path.join(path, 'data.pt') + '...')
save_data = {'type': opt.src_type ,
'data': data}
torch.save(save_data, os.path.join(path, 'data.pt'))
print("Done")
# for ASR only
elif format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
# TODO: changing this to before saving everything
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy") % set_, np_array)
else:
print("Training %s not found " % set_)
# Finally save the audio path
torch.save(data['src'], os.path.join(path, 'data.scp_path.pt'))
if 'prev_src' in data and data['prev_src'] is not None:
torch.save(data['prev_src'], os.path.join(path, 'data.prev_scp_path.pt'))
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy" % set_), np_array)
else:
print("Set %s not found " % set_)
def make_lm_data(tgt_file, tgt_dicts, max_tgt_length=1000, input_type='word', data_type='int32'):
tgt = []
sizes = []
count, ignored = 0, 0
print('Processing %s ...' % (tgt_file))
tgtf = open(tgt_file)
eos = torch.LongTensor(1).fill_(opt.tgt_eos_token)
# print(eos.size())
tensors = [eos]
# find the number of words in the sentence
while True:
tline = tgtf.readline()
# normal end of file
if tline == "":
break
tline = tline.strip()
# source and/or target are empty
if tline == "":
print('WARNING: ignoring an empty line (' + str(count + 1) + ')')
continue
if input_type == 'word':
tgt_words = tline.split()
elif input_type == 'char':
tgt_words = split_line_by_char(tline)
tensor = tgt_dicts.convertToIdx(tgt_words,
opt.tgt_unk_token,
None,
opt.tgt_eos_token,
type=data_type)
# print(tensor.size())
tensors.append(tensor)
count = count + 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
tgtf.close()
# concatenate all tensors into one
tensor = torch.cat(tensors, dim=-1)
return tensor
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False,
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[],
early_save=False, savedir="", mirror=False, mirror_savedir=""):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
if type(lang_list) is dict:
lang_list = sorted(list(lang_list.keys()))
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=src_lang, lang_list=lang_list, target=False
)
if early_save:
os.makedirs(savedir, exist_ok=True)
if mirror:
os.makedirs(mirror_savedir, exist_ok=True)
src_len = len(binarized_src['data'])
print("Saving source data to %s .... with %d entries" % (savedir, src_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "src"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_src['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "src"))
del binarized_src['data']
gc.collect()
np_array = np.asarray(binarized_src['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "src_sizes"), np_array)
del binarized_src
del indexed_data
del np_array
gc.collect()
if mirror:
print("Saving mirrrored target data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "src")
target = os.path.join(mirror_savedir, "data.%s.bin" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "src")
target = os.path.join(mirror_savedir, "data.%s.idx" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "src_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "tgt_sizes")
os.symlink(os.path.abspath(source), target)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True
)
if early_save:
tgt_len = len(binarized_tgt['data'])
assert tgt_len == src_len, "Number of samples doesn't match between source and target!!!"
print("Saving target data to %s .... with %d samples" % (savedir, tgt_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "tgt"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_tgt['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "tgt"))
del binarized_tgt['data']
del indexed_data
gc.collect()
np_array = np.asarray(binarized_tgt['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "tgt_sizes"), np_array)
del binarized_tgt
del np_array
gc.collect()
if mirror:
print("Saving mirrored source data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.bin" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.idx" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "tgt_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "src_sizes")
os.symlink(os.path.abspath(source), target)
src, tgt, src_sizes, tgt_sizes = None, None, None, None
else:
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="scp", output_format="raw",
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[]):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
else:
tgt = None
tgt_sizes = None
print('[INFO] Processing %s ...' % src_file)
# num_workers = num_workers if asr_format in ['scp', 'kaldi'] else 1
# speech binarizer has to be 1 thread at the moment
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if len(src_sizes) != len(tgt_sizes) and tgt_file is not None:
print("Warning: data size mismatched. Src: %d . Tgt: %d" % len(src_sizes), len(tgt_sizes))
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def main():
dicts = {}
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict is not None and len(opt.load_dict) > 0:
print("[INFO] Loading dictionary from ... %s" % opt.load_dict)
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
langs = (src_langs + tgt_langs)
langs = sorted(list(set(langs)))
if len (opt.train_src_atbs) > 0:
src_atbs = opt.train_src_atbs.split("|")
tgt_atbs = opt.train_tgt_atbs.split("|")
atbs = (src_atbs + tgt_atbs)
atbs = sorted(list(set(atbs)))
else:
atbs = []
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
dicts['atbs'] = dict()
for atb in atbs:
idx = len(dicts['atbs'])
dicts['atbs'][atb] = idx
else:
if 'langs' not in dicts:
dicts['langs'] = dict()
else:
print(dicts['langs'])
print("Adding languages to existing dictionary ...")
for lang in langs:
idx = len(dicts['langs'])
if lang not in dicts['langs']:
dicts['langs'][lang] = idx
if 'atbs' not in dicts:
dicts['atbs'] = dict()
else:
print("Adding attributes to existing dictionary ...")
for atb in atbs:
idx = len(dicts['atbs'])
if atb not in dicts['atbs']:
dicts['atbs'][atb] = idx
print("Languages: ", dicts['langs'])
print("Attributes: ", dicts['atbs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# for ASR and LM we only need to build vocab for the 'target' language
if opt.asr or opt.lm:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elif opt.join_vocab:
dicts['src'] = init_vocab('source', set(src_train_files + tgt_train_files), opt.src_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = dicts['src']
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.lm:
print('Preparing training language model ...')
train = dict()
train['tgt'] = make_lm_data(opt.train_tgt,
dicts['tgt'])
train['src'] = None
valid = dict()
valid['tgt'] = make_lm_data(opt.valid_tgt,
dicts['tgt'])
valid['src'] = None
train['src_sizes'] = None
train['tgt_sizes'] = None
valid['src_sizes'] = None
valid['tgt_sizes'] = None
elif opt.asr:
print('Preparing training acoustic model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
src_atbs = opt.train_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.train_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(src_atbs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
assert len(tgt_input_files) == len(tgt_atbs)
past_src_files = opt.past_train_src.split("|")
idx = 0
n_input_files = len(src_input_files)
# Training data ###################################################################
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_atb'], train['tgt_atb'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
data = dict()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "train.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
print("Checking existing path %s ..." % dataset_path)
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
src_atb_data, tgt_atb_data = None, None
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
add_bos=not opt.no_bos,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
# Finalizing Training data ###################################################################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving training set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "train.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
train['src_atb'] += src_atb_data
train['tgt_atb'] += tgt_atb_data
# Validation data ###################################################################
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
src_atbs = opt.valid_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.valid_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
idx = 0
n_input_files = len(src_input_files)
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
valid['src_atb'], valid['tgt_atb'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "valid.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
# Finalizing Validation data ... #########################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
valid['src_atb'] += src_atb_data
valid['tgt_atb'] += tgt_atb_data
else: # MACHINE TRANSLATION DATA
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
idx = 0
data = dict()
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
start = time.time()
print('Binarizing data to train translation models...')
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
dataset_idx = idx if not opt.multi_mirror else 2 * idx
data_name = "train.%i.%s-%s" % (dataset_idx , src_lang, tgt_lang)
mirrored_data_name = "train.%i.%s-%s" % (dataset_idx + 1 , tgt_lang, src_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
mirrored_dataset_path = os.path.join(dirname(opt.save_data), mirrored_data_name)
if opt.multi_dataset and opt.resume:
print("Checking existing path %s ..." % dataset_path)
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
else:
os.makedirs(dataset_path, exist_ok=True)
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'],
early_save=opt.multi_dataset,
savedir=dataset_path,
mirror=opt.multi_mirror,
mirror_savedir=mirrored_dataset_path)
#TODO: check
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
assert src_data is not None
n_samples = len(src_data)
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
if opt.multi_mirror:
mdata = dict()
mdata['src'] = tgt_data
mdata['tgt'] = src_data
mdata['tgt_sizes'] = src_sizes
mdata['src_sizes'] = tgt_sizes
mdata['tgt_lang'] = src_lang_data
mdata['src_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx + 1, tgt_lang, src_lang))
# take basedir from opt.save_data
path = mirrored_dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, mdata, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
idx = 0
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs']
)
n_samples = len(src_data)
#TODO: this has to be changed
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
if opt.multi_dataset:
# SAVE DATA
print("Saving dictionary to %s" % (opt.save_data + '.dict.pt'))
torch.save(dicts, opt.save_data + '.dict.pt')
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
print("Finished.")
else:
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'past_src']:
if set_ not in train or train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if set_ not in train or train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
| 63,944 | 43.498956 | 119 | py |
NMTGMinor | NMTGMinor-master/flask_online.py | #!/usr/bin/env python
from onmt.online_translator import RecognizerParameter, ASROnlineTranslator
from flask import Flask, request
import torch
import numpy as np
import math
import sys
import json
import threading
import queue
import uuid
import traceback
import subprocess
host = sys.argv[1] # 192.168.0.72
port = sys.argv[2] # 5051
if len(sys.argv)<=2:
filename = "model.conf"
else:
filename = sys.argv[3]
conf_data = open(filename,"r").read().split("\n")
model = None
for d in conf_data:
d = d.split()
if len(d)==2 and d[0]=="model":
model = d[1]
break
conf_data.append("model_ls "+str(subprocess.run(("ls -l "+model).split(), capture_output=True).stdout))
conf_data = "\n".join(conf_data)
app = Flask(__name__)
def create_unique_list(my_list):
my_list = list(set(my_list))
return my_list
def initialize_model():
model = ASROnlineTranslator(filename)
print("ASR initialized")
max_batch_size = 16
return model, max_batch_size
def use_model(reqs):
if len(reqs) == 1:
req = reqs[0]
audio_tensor, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
hypo = model.translate(audio_tensor, [prefix])
result = {"hypo": hypo}
req.publish(result)
else:
audio_tensors = list()
prefixes = list()
input_languages = list()
output_languages = list()
batch_runnable = False
for req in reqs:
audio_tensor, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
audio_tensors.append(audio_tensor)
prefixes.append(prefix)
input_languages.append(input_language)
output_languages.append(output_language)
unique_prefix_list = create_unique_list(prefixes)
unique_input_languages = create_unique_list(input_languages)
unique_output_languages = create_unique_list(output_languages)
if len(unique_prefix_list) == 1 and len(unique_input_languages) == 1 and len(unique_output_languages) == 1:
batch_runnable = True
if batch_runnable:
model.set_language(input_languages[0], output_languages[0])
hypos = model.translate_batch(audio_tensors, prefixes)
for req, hypo in zip(reqs, hypos):
result = {"hypo": hypo}
req.publish(result)
else:
for req, audio_tensor, prefix, input_language, output_language \
in zip(reqs, audio_tensors, prefixes, input_languages, output_languages):
model.set_language(input_language, output_language)
hypo = model.translate(audio_tensor, [prefix])
result = {"hypo": hypo}
req.publish(result)
def run_decoding():
while True:
reqs = [queue_in.get()]
while not queue_in.empty() and len(reqs) < max_batch_size:
req = queue_in.get()
reqs.append(req)
if req.priority >= 1:
break
print("Batch size:",len(reqs),"Queue size:",queue_in.qsize())
try:
use_model(reqs)
except Exception as e:
print("An error occured during model inference")
traceback.print_exc()
for req in reqs:
req.publish({"hypo":"", "status":400})
class Priority:
next_index = 0
def __init__(self, priority, id, condition, data):
self.index = Priority.next_index
Priority.next_index += 1
self.priority = priority
self.id = id
self.condition = condition
self.data = data
def __lt__(self, other):
return (-self.priority, self.index) < (-other.priority, other.index)
def get_data(self):
return self.data
def publish(self, result):
dict_out[self.id] = result
try:
with self.condition:
self.condition.notify()
except:
print("ERROR: Count not publish result")
def pcm_s16le_to_tensor(pcm_s16le):
audio_tensor = np.frombuffer(pcm_s16le, dtype=np.int16)
audio_tensor = torch.from_numpy(audio_tensor)
audio_tensor = audio_tensor.float() / math.pow(2, 15)
audio_tensor = audio_tensor.unsqueeze(1) # shape: frames x 1 (1 channel)
return audio_tensor
# corresponds to an asr_server "http://$host:$port/asr/infer/en,en" in StreamASR.py
# use None when no input- or output language should be specified
@app.route("/asr/infer/<input_language>,<output_language>", methods=["POST"])
def inference(input_language, output_language):
pcm_s16le: bytes = request.files.get("pcm_s16le").read()
prefix = request.files.get("prefix") # can be None
if prefix is not None:
prefix: str = prefix.read().decode("utf-8")
# calculate features corresponding to a torchaudio.load(filepath) call
audio_tensor = pcm_s16le_to_tensor(pcm_s16le)
priority = request.files.get("priority") # can be None
try:
priority = int(priority.read()) # used together with priority queue
except:
priority = 0
condition = threading.Condition()
with condition:
id = str(uuid.uuid4())
data = (audio_tensor,prefix,input_language,output_language)
queue_in.put(Priority(priority,id,condition,data))
condition.wait()
result = dict_out.pop(id)
status = 200
if status in result:
status = result.pop(status)
# result has to contain a key "hypo" with a string as value (other optional keys are possible)
return json.dumps(result), status
# called during automatic evaluation of the pipeline to store worker information
@app.route("/asr/version", methods=["POST"])
def version():
# return dict or string (as first argument)
return conf_data, 200
model, max_batch_size = initialize_model()
queue_in = queue.PriorityQueue()
dict_out = {}
decoding = threading.Thread(target=run_decoding)
decoding.daemon = True
decoding.start()
app.run(host=host, port=port)
| 6,118 | 29.595 | 115 | py |
NMTGMinor | NMTGMinor-master/flask_mt.py | #!/usr/bin/env python
# from onmt.online_translator import RecognizerParameter, ASROnlineTranslator
from onmt.online_translator import TranslatorParameter, OnlineTranslator
from flask import Flask, request
import torch
import numpy as np
import math
import sys
import json
import threading
import queue
import uuid
import traceback
import subprocess
host = sys.argv[1] # 192.168.0.72
port = sys.argv[2] # 5051
#if len(sys.argv)<=2:
filename = "model.conf"
print(host, port)
#else:
# filename = sys.argv[3]
# I have no idea what these lines are doing
conf_data = open(filename,"r").read().split("\n")
model = None
for d in conf_data:
d = d.split()
if len(d)==2 and d[0]=="model":
model = d[1]
break
conf_data.append("model_ls "+str(subprocess.run(("ls -l "+model).split(), capture_output=True).stdout))
conf_data = "\n".join(conf_data)
app = Flask(__name__)
def create_unique_list(my_list):
"""
This function is used in checking if the prefixes and languages are the same or not
Args:
my_list:
Returns:
"""
my_list = list(set(my_list))
return my_list
def initialize_model():
"""
Build the translator
"""
model = OnlineTranslator(filename)
print("MT Model initialized")
max_batch_size = 16
return model, max_batch_size
def use_model(reqs):
if len(reqs) == 1:
req = reqs[0]
input_text, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
hypo = model.translate(input_text, [prefix])
result = {"hypo": hypo}
req.publish(result)
else:
input_texts = list()
prefixes = list()
input_languages = list()
output_languages = list()
batch_runnable = False
for req in reqs:
input_text, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
input_texts.append(input_text)
prefixes.append(prefix)
input_languages.append(input_language)
output_languages.append(output_language)
unique_prefix_list = create_unique_list(prefixes)
unique_input_languages = create_unique_list(input_languages)
unique_output_languages = create_unique_list(output_languages)
if len(unique_prefix_list) == 1 and len(unique_input_languages) == 1 and len(unique_output_languages) == 1:
batch_runnable = True
if batch_runnable:
model.set_language(input_languages[0], output_languages[0])
hypos = model.translate_batch(input_texts, prefixes)
for req, hypo in zip(reqs, hypos):
result = {"hypo": hypo}
req.publish(result)
else:
for req, input_text, prefix, input_language, output_language \
in zip(reqs, input_texts, prefixes, input_languages, output_languages):
model.set_language(input_language, output_language)
hypo = model.translate(input_text, [prefix])
result = {"hypo": hypo}
req.publish(result)
def run_decoding():
while True:
reqs = [queue_in.get()]
while not queue_in.empty() and len(reqs) < max_batch_size:
req = queue_in.get()
reqs.append(req)
if req.priority >= 1:
break
print("Batch size:",len(reqs),"Queue size:",queue_in.qsize())
try:
use_model(reqs)
except Exception as e:
print("An error occured during model inference")
traceback.print_exc()
for req in reqs:
req.publish({"hypo":"", "status":400})
class Priority:
next_index = 0
def __init__(self, priority, id, condition, data):
self.index = Priority.next_index
Priority.next_index += 1
self.priority = priority
self.id = id
self.condition = condition
self.data = data
def __lt__(self, other):
return (-self.priority, self.index) < (-other.priority, other.index)
def get_data(self):
return self.data
def publish(self, result):
dict_out[self.id] = result
try:
with self.condition:
self.condition.notify()
except:
print("ERROR: Count not publish result")
# corresponds to an asr_server "http://$host:$port/asr/infer/en,en" in StreamASR.py
# use None when no input- or output language should be specified
# @app.route("/asr/infer/<input_language>,<output_language>", methods=["POST"])
@app.route("/predictions/<input_language>,<output_language>", methods=["POST"])
def inference(input_language, output_language):
# pcm_s16le: bytes = request.files.get("pcm_s16le").read()
# prefix = request.files.get("prefix") # can be None
# if prefix is not None:
# prefix: str = prefix.read().decode("utf-8")
# note: in ASR/SLT it should be "request.files"
# while in MT it's "request.data"
input_text = request.form['text'] # can be None
try:
prefix = request.form['prefix'] # can be None
except:
prefix = None
#
print("RECEIVED INPUT TEXT:", input_text)
try:
priority = request.form["priority"] # can be None
priority = int(priority.read()) # used together with priority queue
except:
priority = 0
condition = threading.Condition()
with condition:
id = str(uuid.uuid4())
# the same with SLT
data = (input_text, prefix, input_language, output_language)
queue_in.put(Priority( priority, id, condition, data))
condition.wait()
result = dict_out.pop(id)
status = 200
if status in result:
status = result.pop(status)
# result has to contain a key "hypo" with a string as value (other optional keys are possible)
return json.dumps(result), status
# called during automatic evaluation of the pipeline to store worker information
@app.route("/models/<input_language>,<output_language>", methods=["GET"])
def version(input_language, output_language):
# print(input_language, output_language)
# return dict or string (as first argument)
return conf_data, 200
model, max_batch_size = initialize_model()
queue_in = queue.PriorityQueue()
dict_out = {}
decoding = threading.Thread(target=run_decoding)
decoding.daemon = True
decoding.start()
app.run(host=host, port=port)
| 6,523 | 27.867257 | 115 | py |
NMTGMinor | NMTGMinor-master/train_classify.py | #!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import time, datetime
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
from onmt.data.wav_dataset import WavDataset
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from options import make_parser
from collections import defaultdict
from onmt.constants import add_tokenidx
import os
import numpy as np
parser = argparse.ArgumentParser(description='train_distributed.py')
onmt.markdown.add_md_help_argument(parser)
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def numpy_to_torch(tensor_list):
out_list = list()
for tensor in tensor_list:
if isinstance(tensor, np.ndarray):
out_list.append(torch.from_numpy(tensor))
else:
out_list.append(tensor)
return out_list
def run_process(gpu, train_data, valid_data, dicts, opt, checkpoint):
# from onmt.train_utils.mp_trainer import Trainer
from onmt.train_utils.classify_trainer import ClassifierTrainer
trainer = ClassifierTrainer(gpu, train_data, valid_data, dicts, opt)
trainer.run(checkpoint=checkpoint)
def main():
if not opt.multi_dataset:
if opt.data_format in ['bin', 'raw']:
start = time.time()
if opt.data.endswith(".train.pt"):
print("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
print("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
dicts = dataset['dicts']
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
train_data = onmt.Dataset(numpy_to_torch(train_dict['src']), numpy_to_torch(train_dict['tgt']),
train_dict['src_sizes'], train_dict['tgt_sizes'],
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
upsampling=opt.upsampling,
num_split=1)
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
valid_data = onmt.Dataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_dict['src_sizes'], valid_dict['tgt_sizes'],
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
cleaning=True,
upsampling=opt.upsampling)
print(' * number of training sentences. %d' % len(dataset['train']['src']))
print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
# Loading asr data structures
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
print("Loading memory mapped data files ....")
start = time.time()
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
dicts = torch.load(opt.data + ".dict.pt")
# onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(opt.data + ".scp_path.pt")
elif opt.data_format in ['wav']:
audio_data = torch.load(opt.data + ".wav_path.pt")
# allocate languages if not
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
else:
print(dicts['langs'])
train_path = opt.data + '.train'
if opt.data_format in ['scp', 'scpmem']:
train_src = SCPIndexDataset(audio_data['train'], concat=opt.concat)
if 'train_past' in audio_data:
past_train_src = SCPIndexDataset(audio_data['train_past'],
concat=opt.concat, shared_object=train_src)
else:
past_train_src = None
elif opt.data_format in ['wav']:
train_src = WavDataset(audio_data['train'])
past_train_src = None
else:
train_src = MMapIndexedDataset(train_path + '.src')
past_train_src = None
train_tgt = MMapIndexedDataset(train_path + '.tgt')
# check the lang files if they exist (in the case of multi-lingual models)
if os.path.exists(train_path + '.src_lang.bin'):
assert 'langs' in dicts
train_src_langs = MMapIndexedDataset(train_path + '.src_lang')
train_tgt_langs = MMapIndexedDataset(train_path + '.tgt_lang')
else:
train_src_langs = list()
train_tgt_langs = list()
# Allocate a Tensor(1) for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
# check the length files if they exist
if os.path.exists(train_path + '.src_sizes.npy'):
train_src_sizes = np.load(train_path + '.src_sizes.npy')
train_tgt_sizes = np.load(train_path + '.tgt_sizes.npy')
else:
train_src_sizes, train_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(train_path + '.past_src_sizes.npy'):
past_train_src_sizes = np.load(train_path + '.past_src_sizes.npy')
else:
past_train_src_sizes = None
if opt.data_format in ['scp', 'scpmem']:
data_type = 'audio'
elif opt.data_format in ['wav']:
data_type = 'wav'
else:
data_type = 'text'
train_data = onmt.Dataset(train_src,
train_tgt,
train_src_sizes, train_tgt_sizes,
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
input_size=opt.input_size,
past_src_data=past_train_src,
min_src_len=0, min_tgt_len=0,
past_src_data_sizes=past_train_src_sizes,
constants=onmt.constants)
valid_path = opt.data + '.valid'
if opt.data_format in ['scp', 'scpmem']:
valid_src = SCPIndexDataset(audio_data['valid'], concat=opt.concat)
if 'valid_past' in audio_data:
past_valid_src = SCPIndexDataset(audio_data['valid_past'],
concat=opt.concat, shared_object=valid_src)
else:
past_valid_src = None
elif opt.data_format in ['wav']:
valid_src = WavDataset(audio_data['valid'])
past_valid_src = None
else:
valid_src = MMapIndexedDataset(valid_path + '.src')
past_valid_src = None
valid_tgt = MMapIndexedDataset(valid_path + '.tgt')
if os.path.exists(valid_path + '.src_lang.bin'):
assert 'langs' in dicts
valid_src_langs = MMapIndexedDataset(valid_path + '.src_lang')
valid_tgt_langs = MMapIndexedDataset(valid_path + '.tgt_lang')
else:
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
# check the length files if they exist
if os.path.exists(valid_path + '.src_sizes.npy'):
valid_src_sizes = np.load(valid_path + '.src_sizes.npy')
valid_tgt_sizes = np.load(valid_path + '.tgt_sizes.npy')
else:
valid_src_sizes, valid_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(valid_path + '.past_src_sizes.npy'):
past_valid_src_sizes = np.load(valid_path + '.past_src_sizes.npy')
else:
past_valid_src_sizes = None
# we can use x2 batch eize for validation
valid_data = onmt.Dataset(valid_src, valid_tgt,
valid_src_sizes, valid_tgt_sizes,
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words * 2,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
input_size=opt.input_size,
batch_size_sents=opt.batch_size_sents,
cleaning=True, verbose=True, debug=True,
past_src_data=past_valid_src,
past_src_data_sizes=past_valid_src_sizes,
min_src_len=0, min_tgt_len=0,
constants=onmt.constants)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
else:
raise NotImplementedError
print(' * number of sentences in training data: %d' % train_data.size())
print(' * number of sentences in validation data: %d' % valid_data.size())
else:
print("[INFO] Reading multiple dataset ...")
# raise NotImplementedError
dicts = torch.load(opt.data + ".dict.pt")
# onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
root_dir = os.path.dirname(opt.data)
print("Loading training data ...")
train_dirs, valid_dirs = dict(), dict()
# scan the data directory to find the training data
for dir_ in os.listdir(root_dir):
if os.path.isdir(os.path.join(root_dir, dir_)):
if str(dir_).startswith("train"):
idx = int(dir_.split(".")[1])
train_dirs[idx] = dir_
if dir_.startswith("valid"):
idx = int(dir_.split(".")[1])
valid_dirs[idx] = dir_
train_sets, valid_sets = list(), list()
for (idx_, dir_) in sorted(train_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
print("[INFO] Loading training data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.data_format in ['scp', 'scpmem']:
data_type = 'audio'
elif opt.data_format in ['wav']:
data_type = 'wav'
else:
data_type = 'text'
train_data = onmt.Dataset(src_data,
tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
src_align_right=opt.src_align_right,
upsampling=opt.upsampling,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
input_size=opt.input_size,
constants=onmt.constants)
train_sets.append(train_data)
for (idx_, dir_) in sorted(valid_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
print("[INFO] Loading validation data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type == 'audio':
data_type = 'audio'
else:
data_type = 'text'
valid_data = onmt.Dataset(src_data, tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
batch_size_words=opt.batch_size_words,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
src_align_right=opt.src_align_right,
min_src_len=1, min_tgt_len=3,
input_size=opt.input_size,
cleaning=True, verbose=True, constants=onmt.constants)
valid_sets.append(valid_data)
train_data = train_sets
valid_data = valid_sets
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
print("* Loading dictionaries from the checkpoint")
del checkpoint['model']
del checkpoint['optim']
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
if "src" in dicts:
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
os.environ['MASTER_ADDR'] = opt.master_addr # default 'localhost'
os.environ['MASTER_PORT'] = opt.master_port # default '8888'
# spawn N processes for N gpus
# each process has a different trainer
if len(opt.gpus) > 1:
torch.multiprocessing.spawn(run_process, nprocs=len(opt.gpus),
args=(train_data, valid_data, dicts, opt, checkpoint))
else:
run_process(0, train_data, valid_data, dicts, opt, checkpoint)
if __name__ == "__main__":
main() | 20,484 | 44.220751 | 107 | py |
NMTGMinor | NMTGMinor-master/tools/grad_check.py | import torch.nn as nn
import onmt
import torch
from onmt.reversible_models.transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
ReversibleTransformerDecoderLayer, ReversibleDecoderFunction
class TestEncoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input):
return ReversibleEncoderFunction.apply(input, self.layers, None)
class TestDecoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input, context):
return ReversibleDecoderFunction.apply(input, context, self.layers,
None, None, False, None)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=16,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
parser.add_argument('-test_decoder', action='store_true',
help='Test decoder')
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
onmt.constants.weight_norm = False
onmt.constants.checkpointing = False
onmt.constants.max_position_length = 4096
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 1
opt.inner_size = 16
bsz = 4
seq_len = 16
input_states = torch.randn(*(seq_len, bsz, opt.model_size*2)).double().cuda()
if not opt.test_decoder:
layers = nn.ModuleList([ReversibleTransformerEncoderLayer(opt) for _ in range(opt.layers)])
# layers.cuda()
net = TestEncoder(layers)
net = net.double().cuda()
print(net)
print("start gradchecking ...")
input_states.requires_grad = True
torch.autograd.gradcheck(net, input_states)
print("gradchecking completed.")
else:
print("Testing decoder ...")
opt.ignore_source = False
layers = nn.ModuleList([ReversibleTransformerDecoderLayer(opt) for _ in range(opt.layers)])
net = TestDecoder(layers)
net = net.double().cuda()
src_seq_len = 8
context = torch.randn(*(src_seq_len, bsz, opt.model_size)).double().cuda()
print("start gradchecking for input and context...")
# input_states.requires_grad = True
context.requires_grad = True
torch.autograd.gradcheck(net, (input_states, context))
print("gradchecking completed.")
# context.requires_grad = True
# input.requires
# print("start gradchecking for context...")
# input_states.requires_grad = True
# torch.autograd.gradcheck(net, (input_states, context))
# print("gradchecking completed.")
| 2,992 | 28.93 | 111 | py |
NMTGMinor | NMTGMinor-master/tools/perplexity_score.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
import apex
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-src_lang', default='src',
help='Source language')
parser.add_argument('-tgt_lang', default='tgt',
help='Target language')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=256,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-no_bos_gold', action="store_true",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fast_translate', action='store_true',
help='Using the fast decoder')
def reportScore(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / (words_total + 1e-9),
name, math.exp(-score_total / (words_total + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def lenPenalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def getSentenceFromTokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
all_scores = torch.empty(0)
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
gold_score_total, gold_words_total = 0, 0,
src_batch, tgt_batch = [], []
cur_batch_sizes = []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
in_file = None
if opt.src == "stdin":
in_file = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "h5":
in_file = h5.File(opt.src, 'r')
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
import kaldiio
from kaldiio import ReadHelper
audio_data = iter(ReadHelper('scp:' + opt.src))
else:
in_file = open(opt.src)
from onmt.inference.perplexity_scorer import PerplexityScorer
translator = PerplexityScorer(opt)
# Audio processing for the source batch
if opt.encoder_type == "audio":
s_prev_context = []
t_prev_context = []
i = 0
while True:
if opt.asr_format == "h5":
if i == len(in_file):
break
line = np.array(in_file[str(i)])
i += 1
elif opt.asr_format == "scp":
try:
_, line = next(audio_data)
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
line = torch.from_numpy(line)
if opt.concat != 1:
add = (opt.concat - line.size()[0] % opt.concat) % opt.concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // opt.concat, line.size()[1] * opt.concat))
if opt.previous_context > 0:
s_prev_context.append(line)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
line = torch.cat((torch.cat((s_prev_context[-i - 1], torch.zeros(1, line.size()[1]))), line))
if len(s_prev_context) > opt.previous_context:
s_prev_context = s_prev_context[-1 * opt.previous_context:]
src_batch += [line]
if tgtF:
tline = tgtF.readline().strip()
if opt.previous_context > 0:
t_prev_context.append(tline)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
tline = t_prev_context[-i - 1] + " # " + tline
if len(t_prev_context) > opt.previous_context:
t_prev_context = t_prev_context[-1 * opt.previous_context:]
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
if len(src_batch) < opt.batch_size:
continue
print("Batch size:", len(src_batch), len(tgt_batch))
gold_score, num_gold_words, all_gold_scores = translator.translate(src_batch, tgt_batch, type='asr')
count, gold_score, goldWords, all_scores = translateBatch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
gold_score, num_gold_words,
all_gold_scores, all_scores,
opt.input_type)
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
# catch the last batch
if len(src_batch) != 0:
print("Batch size:", len(src_batch), len(tgt_batch))
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batch,
tgt_batch, type='asr')
count, gold_score, goldWords, all_scores = translateBatch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
gold_score, num_gold_words,
all_gold_scores, all_scores, opt.input_type)
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
# Text processing
else:
for line in addone(in_file):
if line is not None:
if opt.input_type == 'word':
src_tokens = line.split()
elif opt.input_type == 'char':
src_tokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
src_batch += [src_tokens]
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgt_tokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
# cur_batch_sizes.append(max([len(src_tokens),len(tgt_tokens)]))
# if len(src_batch) == 0 or (max(cur_batch_sizes) * len(src_batch)) < opt.batch_size:
if len(src_batch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(src_batch) == 0:
break
# actually done beam search from the model
gold_score, num_gold_words, all_gold_scores = translator.translate(src_batch, tgt_batch)
# convert output tensor to words
count, gold_score, goldWords, all_scores = translateBatch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
gold_score, num_gold_words,
all_gold_scores, all_scores,
opt.input_type)
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
cur_batch_sizes = []
if opt.verbose:
if tgtF: reportScore('GOLD', gold_score_total, gold_words_total)
if tgtF:
tgtF.close()
if opt.dump_beam:
json.dump(translator.beam_accum, open(opt.dump_beam, 'w'))
outF.close()
print(all_scores.size())
all_scores_numpy = all_scores.numpy()
np.savetxt(opt.output, all_scores_numpy, delimiter="\n")
def translateBatch(opt, tgtF, count, outF, translator, src_batch, tgt_batch, gold_score,
num_gold_words, all_gold_scores, all_scores, input_type):
gold_score_total = 0
gold_words_total = 0
if tgtF is not None:
gold_score_total = sum(gold_score).item()
gold_words_total = num_gold_words
batch_size = len(src_batch)
scores = torch.Tensor(batch_size).zero_()
for b in range(len(src_batch)):
count += 1
if opt.normalize:
gold_score_ = gold_score[b] / len(tgt_batch[b])
else:
gold_score_ = gold_score[b]
if opt.verbose:
if opt.encoder_type == "text":
src_sent = " ".join(src_batch[b])
print('SRC %d: %s' % (count, src_sent))
if tgtF is not None:
tgt_sent = getSentenceFromTokens(tgt_batch[b], input_type)
if translator.tgt_dict.lower:
tgt_sent = tgt_sent.lower()
print('GOLD %d: %s ' % (count, tgt_sent))
print("GOLD SCORE: %.4f" % gold_score_)
print()
print('')
else:
if count % 100000 == 0:
print("Finished %d sentences ... " % count)
scores[b].fill_(gold_score_)
all_scores = torch.cat([all_scores, scores], dim=0)
return count, gold_score_total, gold_words_total, all_scores
if __name__ == "__main__":
main()
| 14,590 | 39.30663 | 117 | py |
NMTGMinor | NMTGMinor-master/tools/test_amp.py | import torch
from apex import amp
from apex.normalization.fused_layer_norm import FusedLayerNorm
torch.cuda.set_device(1)
class NeuralNet(torch.nn.Module):
def __init__(self, d_in, d_out):
self.d_in = d_in
self.d_out = d_out
super().__init__()
self.norm = torch.nn.LayerNorm(d_in)
self.norm2 = FusedLayerNorm(d_out)
# self.norm2 = torch.nn.LayerNorm(d_out)
self.linear = torch.nn.Linear(d_in, d_out)
self.linear2 = torch.nn.Linear(d_out, d_out)
def forward(self, input):
input = self.norm(input)
print(input.type())
output = self.linear(input)
print(output.type())
output = torch.relu(output)
print(output.type())
output = self.norm2(output)
output = self.linear2(output)
print(output.type())
output = torch.nn.functional.log_softmax(output)
print("end")
return output
model = NeuralNet(500, 1000)
model = model.cuda()
loss_function = torch.nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
for i in range(1000):
x = torch.rand(128, 500).cuda()
o = model(x).float()
y = torch.randint(low=0, high=999, size=(128, )).cuda()
loss = loss_function(o, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
optimizer.zero_grad()
| 1,464 | 28.3 | 67 | py |
NMTGMinor | NMTGMinor-master/tools/get_best.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
import apex
parser = argparse.ArgumentParser(description='rescore.py')
onmt.markdown.add_md_help_argument(parser)
#
parser.add_argument('-input', required=True,
help='Path to the nbest file')
parser.add_argument('-n_best', type=int, default=1,
help="""n_best value from decoding and rescoring""")
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-coeff', default=[], nargs='+', type=float,
help="Use CUDA on the listed devices.")
def addone(f):
for line in f:
yield line
yield None
def main():
opt = parser.parse_args()
reader = open(opt.input)
out_writer = open(opt.output, 'w')
count = 0
all_sents, all_scores = [], []
for line in addone(reader):
if line is not None:
count += 1
parts = line.strip().split(" ||| ")
text = parts[0]
scores = parts[1].strip().split()
# print(scores)
# print(len(scores))
# assert(len(scores) == len(opt.coeff))
all_sents.append(text)
score = 0
print(count)
for i, score_ in enumerate(scores):
score += opt.coeff[i] * float(score_)
all_scores.append(score)
if count % opt.n_best == 0:
all = zip(all_sents, all_scores)
sorted_all = sorted(all, key=lambda x: x[1], reverse=True)
best_sent = sorted_all[0][0]
out_writer.write(best_sent + "\n")
all_sents = []
all_scores = []
else:
break
out_writer.close()
if __name__ == "__main__":
main() | 2,055 | 24.382716 | 75 | py |
NMTGMinor | NMTGMinor-master/tools/average_checkpoints.py | from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
from onmt.model_factory import build_model
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-models', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-method', default='mean',
help="method to average: mean|gmean")
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# opt.model should be a string of models, split by |
models = opt.models.split("|")
# print(models)
n_models = len(models)
print("Loading main model from %s ..." % models[0])
checkpoint = torch.load(models[0], map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
main_checkpoint = checkpoint
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
main_model = build_model(model_opt, checkpoint['dicts'])
main_model.load_state_dict(checkpoint['model'])
if opt.cuda:
main_model = main_model.cuda()
for i in range(1, len(models)):
model = models[i]
print("Loading model from %s ..." % models[i])
checkpoint = torch.load(model, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# delete optim information to save GPU memory
if 'optim' in checkpoint:
del checkpoint['optim']
current_model = build_model(model_opt, checkpoint['dicts'])
current_model.load_state_dict(checkpoint['model'])
if opt.cuda:
current_model = current_model.cuda()
if opt.method == 'mean':
# Sum the parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.add_(param.data)
elif opt.method == 'gmean':
# Take the geometric mean of parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.mul_(param.data)
else:
raise NotImplementedError
# Normalizing
if opt.method == 'mean':
for main_param in main_model.parameters():
main_param.data.div_(n_models)
elif opt.method == 'gmean':
for main_param in main_model.parameters():
main_param.data.pow_(1./n_models)
# Saving
model_state_dict = main_model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration' : -1,
'batchOrder' : None,
'optim': None
}
print("Saving averaged model to %s" % opt.output)
torch.save(save_checkpoint, opt.output)
if __name__ == "__main__":
main()
| 3,446 | 26.798387 | 96 | py |
NMTGMinor | NMTGMinor-master/tools/grad_check_reversible.py | import torch.nn as nn
import onmt
import torch
# from onmt.reversible_models.transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
# ReversibleTransformerDecoderLayer, ReversibleDecoderFunction
from onmt.reversible_models.relative_transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
ReversibleTransformerDecoderLayer, ReversibleDecoderFunction
class TestEncoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input, pos):
return ReversibleEncoderFunction.apply(input, pos, self.layers, None)
class TestDecoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input, context, pos):
return ReversibleDecoderFunction.apply(input, pos, context, self.layers,
None, None, False, None)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=16,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
parser.add_argument('-test_decoder', action='store_true',
help='Test decoder')
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
onmt.constants.weight_norm = False
onmt.constants.checkpointing = False
onmt.constants.max_position_length = 4096
onmt.constants.double_precision = True
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 1
opt.inner_size = 16
bsz = 4
seq_len = 16
input_states = torch.randn(*(seq_len, bsz, opt.model_size*2)).double().cuda()
pos = torch.randn(*(seq_len, 1, opt.model_size)).double().cuda()
pos.requires_grad=False
if not opt.test_decoder:
layers = nn.ModuleList([ReversibleTransformerEncoderLayer(opt) for _ in range(opt.layers)])
# layers.cuda()
net = TestEncoder(layers)
net = net.double().cuda()
print(net)
print("start gradchecking ...")
input_states.requires_grad = True
torch.autograd.gradcheck(net, (input_states, pos))
print("gradchecking completed.")
else:
print("Testing decoder ...")
opt.ignore_source = False
layers = nn.ModuleList([ReversibleTransformerDecoderLayer(opt) for x in range(opt.layers)])
net = TestDecoder(layers)
net = net.double().cuda()
src_seq_len = 8
context = torch.randn(*(src_seq_len, bsz, opt.model_size)).double().cuda()
print("start gradchecking for input and context...")
input_states.requires_grad = True
context.requires_grad = True
torch.autograd.gradcheck(net, (input_states, context, pos))
print("gradchecking completed.")
# context.requires_grad = True
# input.requires
# print("start gradchecking for context...")
# input_states.requires_grad = True
# torch.autograd.gradcheck(net, (input_states, context))
# print("gradchecking completed.")
| 3,353 | 30.641509 | 120 | py |
NMTGMinor | NMTGMinor-master/test/test_cmatmul.py | import torch
from time import time
B = 16384
N_in = 1024
N_out = 4096
num_iters = 200
x = torch.randn(B, N_in, dtype=torch.cfloat, requires_grad=True)
r = torch.randn(B, N_in, dtype=torch.float, requires_grad=True)
i = torch.randn(B, N_in, dtype=torch.float, requires_grad=True)
print(r.type())
r.data.copy_(x.real.data)
i.data.copy_(x.imag.data)
x = x.cuda()
r = r.cuda()
i = i.cuda()
x_2 = torch.randn(N_in, N_out, dtype=torch.cfloat, requires_grad=True)
r_2 = torch.randn(N_in, N_out, dtype=torch.float, requires_grad=True)
i_2 = torch.randn(N_in, N_out, dtype=torch.float, requires_grad=True)
r_2.data.copy_(x_2.real.data)
i_2.data.copy_(x_2.imag.data)
x_2 = x_2.cuda()
r_2 = r_2.cuda()
i_2 = i_2.cuda()
a = torch.mm(x, x_2)
with torch.no_grad():
a = torch.mm(x, x_2)
a_r = torch.mm(r, r_2) - torch.mm(i, i_2)
a_i = torch.mm(r, i_2) + torch.mm(i, r_2)
print(a.real - a_r)
print(a.imag - a_i)
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
a_r = torch.mm(r, r_2) - torch.mm(i, i_2)
a_i = torch.mm(r, i_2) + torch.mm(i, r_2)
(a_r.sum() + a_i.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPseudo CMATMUL fp32 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
a = torch.mm(x, x_2)
(a.real.sum() + a.imag.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch CMATMUL fp32 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
with torch.cuda.amp.autocast(enabled=True):
for _ in range(num_iters):
a_r = torch.mm(r, r_2) - torch.mm(i, i_2)
a_i = torch.mm(r, i_2) + torch.mm(i, r_2)
(a_r.sum() + a_i.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPseudo CMATMUL fp16 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
with torch.cuda.amp.autocast(enabled=True):
for _ in range(num_iters):
a = torch.mm(x, x_2)
(a.real.sum() + a.imag.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch CMATMUL fp16 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
| 2,405 | 20.872727 | 91 | py |
NMTGMinor | NMTGMinor-master/test/test_factorize_linear.py | import torch
import torch.nn.functional as F
from time import time
N_in = 1024
N_out = 4096
B = 16384
num_iters = 512
x = torch.randn(B, N_in, dtype=torch.float, requires_grad=True)
W = torch.randn(N_out, N_in, dtype=torch.float, requires_grad=True)
b = torch.randn(N_out, dtype=torch.float, requires_grad=True)
x = x.cuda()
W = W.cuda()
b = b.cuda()
y = F.linear(x, W, b)
y.sum().backward()
y2 = torch.mm(x, W.transpose(0, 1)) + b.unsqueeze(0)
y2.sum().backward()
print(y - y2)
r = torch.randn(1, N_in, dtype=torch.float, requires_grad=True)
s = torch.randn(1, N_out, dtype=torch.float, requires_grad=True)
r = r.cuda()
s = s.cuda()
y1 = F.linear(x, torch.mul(W, torch.mm(s.t(), r)), b)
# y2 = torch.mul(torch.mm(torch.mul(x, r)), s) + b.unsqueeze(0)
y2 = torch.mm(x * r, W.transpose(0, 1)) * s + b.unsqueeze(0)
print("Checking ")
print(y1.sum() / (B * N_out), y2.sum() / (B * N_out))
# print(torch.allclose(y1, y2, rtol=1e-05, atol=1e-08))
rank = 1
n_languages = 1024
r_table = torch.Tensor(n_languages, rank, N_in)
s_table = torch.Tensor(n_languages, rank, N_out)
# indices: [T x B x n_languages]
# r_output: T x B x rank x N_in
# s_output: T x B x rank x N_out
# apply the above equation. torch.mm(x * r, W.transpose(0, 1)) * s + b.unsqueeze(0)
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
#
# for _ in range(num_iters):
# y2 = torch.mm(x, W.transpose(0, 1)) + b.unsqueeze(0)
# y2.sum().backward()
#
# torch.cuda.synchronize()
# stop_time = time()
#
# print(F"\nPseudo CMATMUL fp32 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
# for _ in range(num_iters):
# y = F.linear(x, W, b)
# y.sum().backward()
#
# torch.cuda.synchronize()
# stop_time = time()
# print(F"\nPytorch CMATMUL fp32 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
#
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
# with torch.cuda.amp.autocast(enabled=True):
# for _ in range(num_iters):
# y = F.linear(x, W, b)
# y.sum().backward()
#
#
# torch.cuda.synchronize()
# stop_time = time()
#
# print(F"\nPytorch CMATMUL fp16 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
#
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
#
# with torch.cuda.amp.autocast(enabled=True):
# for _ in range(num_iters):
# y2 = torch.mm(x, W.transpose(0, 1)) + b.unsqueeze(0)
# y2.sum().backward()
#
# torch.cuda.synchronize()
# stop_time = time()
# print(F"\nPseudo CMATMUL fp16 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
| 3,043 | 24.579832 | 93 | py |
NMTGMinor | NMTGMinor-master/test/test_self_attention_blaslt.py | import torch
import unittest
from modules.self_multihead_attn import SelfMultiheadAttn
from time import time
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 512
self.sequences = 8
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='default')
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='fast')
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
def test_self_multihead_attn_additive_mask(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
# print(mask)
for i in range(20):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-3, rtol=1e-3))
self.assertTrue(not torch.any(torch.isnan(self.tst_inputs.grad)))
self.assertTrue(torch.allclose(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3))
self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3))
def test_speed(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.tst_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nC++ Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 5,583 | 43.672 | 109 | py |
NMTGMinor | NMTGMinor-master/test/test_rotation.py | import torch
import torch
from torch import nn, einsum
from einops import rearrange, repeat
class SinusoidalEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
"""
:param x: [bsz x time x hidden]
:return:
"""
# actually this module doesn't care about anything of x except x.size(1)
n = x.shape[0] # time dimension
t = torch.arange(n, device=x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb
def rotate_every_two(x):
# splits the last dimension in half
x = rearrange(x, '... (d j) -> ... d j', j=2)
x1, x2 = x.unbind(dim=-1)
# stack negative x2 with x1
x = torch.stack((-x2, x1), dim=-1)
return rearrange(x, '... d j -> ... (d j)')
def rotate_backward(dx):
dx = rearrange(dx, '... (d j) -> ... d j', j=2)
dx2, dx1 = dx.unbind(dim=-1)
dx = torch.stack((dx1, -dx2), dim=-1)
dx = rearrange(dx, '... d j -> ... (d j)')
return dx
# more like encodings because the position values are not learnablew weights
def apply_rotary_emb(q, sinu_pos):
"""
:param q: [bsz x time x hidden]
:param k: [bsz x time x hidden]
:param sinu_pos:
:return: q and k with applied position encoding
"""
# splits the last dimension of the sinu_pos in half and grab sin and cos terms
sinu_pos = rearrange(sinu_pos, 'n (j d) -> n j d', j=2)
sin, cos = sinu_pos.unbind(dim=-2)
# repeat the sin and cos terms with 2?
sin, cos = map(lambda t: repeat(t, 'n d -> n (d j)', j=2), (sin, cos))
# q' = (q * cos) + (rotate_every_two(q) * sin)
# dl_dq = dl_dq' * (cos + sin * rotate'(q))
print(q.size(), cos.size(), sin.size())
q = q * cos.unsqueeze(1) + rotate_every_two(q) * sin.unsqueeze(1)
# q = rotate_every_two(q) # * sin
# y = g(x) * a
# dy/dx = dy/dg * dg/dx = a *
# q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, sin, cos
BH = 1024 * 8
B = 1024
H = BH // B
Q = 75
K = 56
D = 64
pos_encoder = SinusoidalEmbeddings(D)
pos_encoder.cuda()
# create input
x = torch.randn((BH, Q, D), dtype=torch.float32, device=torch.device("cuda"), requires_grad=True)
# create the pos emb
pos_emb = pos_encoder(x)
rotate_grad = torch.Tensor([1, -1] * int(D / 2)).to(x.device)
rotate_grad = rotate_grad.unsqueeze(0).unsqueeze(1).repeat(BH, Q, 1)
#
r_x = rotate_every_two(x)
#
loss = r_x.sum() * 1
#
loss.backward()
#
print(x.grad - rotate_grad)
x.grad = None
x = torch.randn((Q, BH, D), dtype=torch.float32, device=torch.device("cuda"), requires_grad=True)
grad_rx = torch.randn((Q, BH, D), dtype=torch.float32, device=torch.device("cuda"), requires_grad=False)
pos_emb = pos_encoder(x)
rotary_emb_x, sin, cos = apply_rotary_emb(x, pos_emb)
rotary_emb_x.backward(grad_rx)
print(x.grad)
rotate_grad = rotate_backward(x.new_ones(x.shape))
# grad_x = (cos + rotate_grad * sin) * grad_rx
grad_x = cos.unsqueeze(1) * grad_rx + rotate_backward(sin.unsqueeze(1) * grad_rx)
print(x.grad - grad_x)
| 3,312 | 25.293651 | 104 | py |
NMTGMinor | NMTGMinor-master/test/test_fmha.py | ###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
import sys
import torch
import numpy as np
import unittest
import math
import fmhalib_sm86 as mha
from time import time
from random import randint
from torch.cuda.amp import custom_fwd, custom_bwd
# CONDITION to use fast mha:
# length <= 512 and sm=80
class IndexCopy(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, input, non_pad_indices, total_batch_size):
sizes = list(input.size())
sizes[0] = total_batch_size
output = input.new_zeros(*sizes)
output.index_copy_(0, non_pad_indices, input)
ctx.save_for_backward(non_pad_indices)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
non_pad_indices, = ctx.saved_tensors
grad_input = output_grads.index_select(0, non_pad_indices)
return grad_input, None, None
def py_mha(qkv, amask, b, s, h, d, high_precision=True):
qkv = qkv.view(b, s, h, 3, d)
q = qkv[:, :, :, 0, :].permute(0, 2, 1, 3)
k = qkv[:, :, :, 1, :].permute(0, 2, 1, 3)
v = qkv[:, :, :, 2, :].permute(0, 2, 1, 3)
if high_precision:
p = torch.matmul(q.float(), k.permute(0, 1, 3, 2).float())
p_masked = p / math.sqrt(d) + (amask) * -10000.0
s = torch.softmax(p_masked, -1).to(qkv.dtype)
ctx = torch.matmul(s, v)
else:
p = torch.matmul(q, k.permute(0, 1, 3, 2))
p_masked = p / math.sqrt(d) + (amask) * -10000.0
s = torch.softmax(p_masked, -1).to(qkv.dtype)
ctx = torch.matmul(s, v)
ctx = ctx.permute(0, 2, 1, 3).contiguous()
ctx.retain_grad()
return ctx
class TestFMHA(unittest.TestCase):
def run_uneven_test(self, s, b):
s = randint(s-127, s)
print(f'Test uneven s={s} b={b}')
torch.manual_seed(12341234)
torch.cuda.manual_seed(12341234)
dtype = torch.float16
device = torch.device('cuda')
h = 16
d = 64
amask = torch.ones(b, s, dtype=dtype, device=device)
slens = []
prev_size = -1
for b_ in range(b):
if prev_size == -1:
curr_size = randint(1, s)
slens.append(curr_size)
prev_size = curr_size
else:
# no sort?
curr_size = randint(1, s)
slens.append(curr_size)
prev_size = curr_size
amask[b_, :prev_size].fill_(0) # the first prev_size elements have no mask
max_s = max(slens)
non_pad_indices = torch.nonzero(amask.view(-1).ne(1)).squeeze(1)
a = torch.tensor(np.array([0] + slens), dtype=torch.int32)
amask = amask.unsqueeze(1).unsqueeze(1)
seqlens = torch.tensor(slens, dtype=torch.int32, device=device)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=device)
total = cu_seqlens[-1].item()
# input for python mha?
# should be identical layout with the current code
qkv = torch.randn((b, s, h, 3, d), device=device, dtype=dtype)
def run_fmha_forward(qkv_, non_pad_indices_, cu_seqlens_, max_s_):
qkv_vs = qkv_.permute(0, 1, 3, 2, 4).contiguous().view(b * s, 3, h, d)
qkv_vs = qkv_vs.index_select(0, non_pad_indices_)
if b < 4:
ctx, S_ = mha.fwd_nl(qkv_vs, cu_seqlens_, 0.0, max_s_, True, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens_, 0.0, max_s_, True, None)
ctx.requires_grad = True
ctx_out = IndexCopy.apply(ctx, non_pad_indices, b * s)
ctx_out = ctx_out.view(b, s, h, d)
return qkv_vs, ctx, ctx_out, S_
def run_mha_backward(grad, ctx_out_, ctx_, qkv_vs_, non_pad_indices_, cu_seqlens_, max_s_, S__):
ctx_out.backward(grad, inputs=[ctx_])
if b < 4:
dqkv2, _, _ = mha.bwd_nl(ctx.grad, qkv_vs_, S__, cu_seqlens_, 0.0, max_s_)
else:
dqkv2, _ = mha.bwd(ctx.grad, qkv_vs_, S__, cu_seqlens_, 0.0, max_s_)
dqkv2 = dqkv2.permute(0, 2, 1, 3) # [b*s, 3, h, d]
return dqkv2
qkv_vs, ctx, ctx_out, S_ = run_fmha_forward(qkv, non_pad_indices, cu_seqlens, max_s)
qkv.requires_grad = True
ctx_ref = py_mha(qkv, amask, b, s, h, d)
mask = amask.squeeze(1).squeeze(1).bool().unsqueeze(-1).unsqueeze(-1)
ctx_ref.masked_fill_(mask, 0)
self.assertTrue(torch.allclose(ctx_ref.float(), ctx_out.float(), atol=1e-2))
print("output ok.")
labels = torch.randn_like(ctx_ref)
diff = ctx_ref - labels
l = (diff * diff).sum() / b
l.backward(inputs=[ctx_ref, qkv])
dw = ctx_ref.grad # .permute(0, 2, 1, 3)
dw2 = dw.clone().detach().contiguous()
dqkv2 = run_mha_backward(dw2, ctx_out, ctx, qkv_vs, non_pad_indices, cu_seqlens, max_s, S_)
qkv_grad = qkv.grad.view(b * s, h, 3, d)
qkv_grad = qkv_grad.index_select(0, non_pad_indices)
if not torch.allclose(qkv_grad.float(), dqkv2.float(), atol=1e-3):
print(qkv_grad.float() - dqkv2.float())
self.assertTrue(torch.allclose(qkv_grad.float(), dqkv2.float(), atol=1e-2))
print("grad ok.")
num_iters = 20
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
qkv_vs, ctx, ctx_out, S_ = run_fmha_forward(qkv, non_pad_indices, cu_seqlens, max_s)
dw2 = torch.randn_like(ctx_out)
dqkv2 = run_mha_backward(dw2, ctx_out, ctx, qkv_vs, non_pad_indices, cu_seqlens, max_s, S_)
torch.cuda.synchronize()
stop_time = time()
print(F"Fused MHA MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ctx_ref = py_mha(qkv, amask, b, s, h, d, high_precision=False)
labels = torch.randn_like(ctx_ref)
ctx_ref.backward(labels)
torch.cuda.synchronize()
stop_time = time()
print(F"Python MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
def run_test(self, s, b):
#s = randint(s - 127, s)
s = s
print(f'Test s={s} b={b}')
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
dtype = torch.float16
device = torch.device('cuda')
h = 16
d = 64
slens = [s] * b
a = torch.tensor(np.array([0] + slens), dtype=torch.int32)
amask = torch.zeros(b, h, s, s, dtype=dtype, device=device)
seqlens = torch.tensor(slens, dtype=torch.int32, device=device)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=device)
total = cu_seqlens[-1].item()
# input for python mha?
qkv = torch.randn((b, s, h, 3, d), device=device, dtype=dtype)
# input for fmha
qkv_vs = qkv.permute(0, 1, 3, 2, 4).contiguous().view(b * s, 3, h, d)
qkv.requires_grad = True
if b < 4:
ctx, S_ = mha.fwd_nl(qkv_vs, cu_seqlens, 0.0, s, True, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens, 0.0, s, True, None)
ctx = ctx.view(b, s, h, d)
ctx_ref = py_mha(qkv, amask, b, s, h, d)
print(ctx_ref.float() -ctx.float() )
self.assertTrue(torch.allclose(ctx_ref.float(), ctx.float(), atol=1e-2))
labels = torch.randn_like(ctx_ref)
diff = ctx_ref - labels
l = (diff * diff).sum() / b
l.backward()
dw = ctx_ref.grad.permute(0, 2, 1, 3)
dw2 = dw.permute(0, 2, 1, 3).clone().detach().contiguous()
if b < 4:
dqkv2, _, _ = mha.bwd_nl(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
else:
dqkv2, _ = mha.bwd(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
dqkv2 = dqkv2.permute(0, 2, 1, 3).view(b, s, h, 3, d)
# print(qkv.grad.float() - dqkv2.float())
self.assertTrue(torch.allclose(qkv.grad.float(), dqkv2.float(), atol=1e-2))
num_iters = 20
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
if b < 4:
ctx, S_ = mha.fwd_nl(qkv_vs, cu_seqlens, 0.0, s, True, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens, 0.0, s, True, None)
dw2 = torch.randn_like(ctx)
if b < 4:
dqkv2, _, _ = mha.bwd_nl(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
else:
dqkv2, _ = mha.bwd(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
torch.cuda.synchronize()
stop_time = time()
print(F"Fused MHA MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ctx_ref = py_mha(qkv, amask, b, s, h, d, high_precision=False)
labels = torch.randn_like(ctx_ref)
ctx_ref.backward(labels)
torch.cuda.synchronize()
stop_time = time()
print(F"Python MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# def test_128(self):
# self.run_test(128, 55)
# self.run_test(128, 47)
# self.run_test(128, 90)
#
# self.run_uneven_test(128, 55)
# self.run_uneven_test(128, 47)
# self.run_uneven_test(128, 90)
# self.run_test(128, 3)
# self.run_uneven_test(128, 3)
#
# def test_256(self): # 129 - 256?
# #
# self.run_test(256, 32)
# self.run_test(256, 16)
# self.run_test(224, 16)
# self.run_test(224, 3)
# #
# self.run_uneven_test(256, 32)
# self.run_uneven_test(256, 16)
# self.run_uneven_test(224, 16)
# self.run_uneven_test(224, 3)
#
# def test_384(self):
# self.run_test(384, 32)
# self.run_test(384, 16)
# self.run_test(384, 8)
# #
# self.run_uneven_test(384, 32)
# self.run_uneven_test(384, 16)
# self.run_uneven_test(384, 8)
# self.run_test(384, 3)
#
# def test_512(self):
# self.run_test(512, 1)
# self.run_test(512, 2)
# self.run_test(512, 3)
# self.run_uneven_test(512, 32)
# self.run_uneven_test(512, 2)
# self.run_uneven_test(512, 3)
#
# def test_768(self):
# self.run_test(768, 1)
# self.run_test(768, 2)
# self.run_test(768, 3)
# self.run_test(768, 32)
# self.run_test(768, 64)
# self.run_uneven_test(768, 32)
# self.run_uneven_test(768, 64)
# self.run_uneven_test(768, 1)
# self.run_uneven_test(768, 2)
# self.run_uneven_test(768, 3)
def test_896(self):
l = 512
self.run_test(l, 1)
self.run_test(l, 2)
self.run_test(l, 3)
self.run_test(l, 32)
self.run_test(l, 64)
self.run_uneven_test(l, 32)
self.run_uneven_test(l, 64)
self.run_uneven_test(l, 1)
self.run_uneven_test(l, 2)
self.run_uneven_test(l, 3)
# def test_896(self):
# l = 896
# self.run_test(l, 1)
# self.run_test(l, 2)
# self.run_test(l, 3)
# self.run_test(l, 32)
# self.run_test(l, 64)
# self.run_uneven_test(l, 32)
# self.run_uneven_test(l, 64)
# self.run_uneven_test(l, 1)
# self.run_uneven_test(l, 2)
# self.run_uneven_test(l, 3)
#
# def test_768(self):
# self.run_test(768, 4)
# self.run_test(768, 2)
# self.run_test(768, 32)
# self.run_uneven_test(768, 32)
# self.run_uneven_test(768, 3)
# def test_896(self):
# self.run_test(896, 112)
# self.run_test(896, 32)
# self.run_test(896, 2)
# self.run_uneven_test(896, 32)
# self.run_uneven_test(896, 16)
# self.run_uneven_test(896, 3)
# def test_1024(self):
# self.run_test(1024, 4)
# self.run_uneven_test(1024, 32)
# self.run_test(640, 2)
# self.run_test(512, 3)
# self.run_uneven_test(1024, 32)
# self.run_uneven_test(1024, 2)
# self.run_uneven_test(1024, 3)
#
if __name__ == '__main__':
unittest.main()
| 14,170 | 32.343529 | 104 | py |
NMTGMinor | NMTGMinor-master/test/test_flattened_weight.py | import torch
import torch.nn.functional as F
from time import time
class ParameterRef(object):
def __init__(self, weight_buf, offset, length, size):
self.weight_buf = weight_buf
self.offset = offset
self.length = length
self.size = size
def __call__(self):
return self.weight_buf[self.offset:self.offset+self.length].view(*self.size)
def find_weight(m, _weight_list):
for attr_str in dir(m):
target_attr = getattr(m, attr_str)
if type(target_attr) == torch.nn.Parameter:
weight = target_attr
if weight.ndim == 2:
_weight_list.append(weight)
for n, ch in m.named_children():
find_weight(ch, _weight_list)
return _weight_list
def flatten_weight(m, _weight_buf, _offset):
for attr_str in dir(m):
target_attr = getattr(m, attr_str)
if type(target_attr) == torch.nn.Parameter:
weight = target_attr
size = weight.size()
numel = weight.numel()
_weight_buf.data[offset:offset+numel].copy_(weight.data.view(-1))
# print(_weight_buf[offset:offset+numel].view_as(weight))
setattr(m, attr_str, None)
del m._parameters[attr_str]
setattr(m, attr_str, ParameterRef(_weight_buf, _offset, numel, size))
_offset = _offset + numel
del weight
for n, ch in m.named_children():
_offset = find_weight(ch, _weight_buf, _offset)
return _offset
class MLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_hiddens=2):
super(MLP, self).__init__()
self.weight_buf = None
self.input_weight = torch.nn.Parameter(torch.randn(hidden_size, input_size))
self.hidden_weight = torch.nn.Parameter(torch.randn(hidden_size, hidden_size))
self.output_weight = torch.nn.Parameter(torch.randn(output_size, hidden_size))
def set_buffer(self, _weight_buf):
self.weight_buf = _weight_buf
def forward(self, x):
try:
x = F.linear(x, self.input_weight, None)
x = torch.relu(x)
x = F.linear(x, self.hidden_weight, None)
x = torch.relu(x)
x = F.linear(x, self.output_weight, None)
except TypeError as e:
x = F.linear(x, self.input_weight(), None)
x = torch.relu(x)
x = F.linear(x, self.hidden_weight(), None)
x = torch.relu(x)
x = F.linear(x, self.output_weight(), None)
return x
mlp = MLP(1024, 4096, 1024).cuda()
x = torch.rand(128, 1024).cuda()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for i in range(32):
y = mlp(x)
y.sum().backward()
mlp.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch default MLP time {(stop_time - start_time) * 1000. / 32:.4f} ms")
weight_list = list()
find_weight(mlp, weight_list)
numels = sum([w.numel() for w in weight_list])
weight_buf = torch.nn.Parameter(torch.zeros(numels)).cuda()
offset = 0
with torch.no_grad():
offset = flatten_weight(mlp, weight_buf, offset)
print(offset)
mlp.set_buffer(weight_buf)
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for i in range(32):
y = mlp(x)
y.sum().backward()
mlp.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch flattened MLP time {(stop_time - start_time) * 1000. / 32:.4f} ms") | 3,501 | 25.330827 | 86 | py |
NMTGMinor | NMTGMinor-master/test/test_multi_linear.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
len_q = 20
input_dim = 128
heads = 8
head_dim = input_dim // heads
output_dim = input_dim
k_proj = nn.Linear(input_dim, input_dim, bias=True)
v_proj = nn.Linear(input_dim, input_dim, bias=True)
q_proj = nn.Linear(input_dim, input_dim, bias=True)
# weight = Parameter(torch.Tensor(3 * input_dim, input_dim))
weight_t = torch.Tensor(3 * input_dim, input_dim)
bias_t = torch.Tensor(3 * input_dim)
# weight_t = weight_t.reshape(head_dim, 3, heads, input_dim)
w_q = q_proj.weight.clone()
w_k = k_proj.weight.clone()
w_v = v_proj.weight.clone()
print(torch.allclose(w_q, q_proj.weight))
weights = [w_q, w_k, w_v]
# with torch.no_grad():
# weight_t[:, 0, :, :].reshape(input_dim, input_dim).copy_(q_proj.weight)
# weight_t[:, 1, :, :].reshape(input_dim, input_dim).copy_(k_proj.weight)
# weight_t[:, 2, :, :].reshape(input_dim, input_dim).copy_(v_proj.weight)
weight_ = torch.cat(weights, dim=0).contiguous()
b_q = q_proj.bias.clone()
b_k = k_proj.bias.clone()
b_v = v_proj.bias.clone()
biases = [b_q, b_k, b_v]
bias_ = torch.cat(biases, dim=0).contiguous()
weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1).reshape(-1, input_dim)
bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
# weight_t = weight_t.reshape(3 * input_dim, input_dim)
weight_t.copy_(weight_)
bias_t.copy_(bias_)
weight = Parameter(weight_t)
bias = Parameter(bias_t)
bsz = 16
input = torch.randn(len_q, bsz, input_dim)
q_proj = q_proj.cuda()
k_proj = k_proj.cuda()
v_proj = v_proj.cuda()
weight = weight.cuda()
bias = bias.cuda()
input = input.cuda()
q = q_proj(input).view(len_q, bsz * heads, head_dim)
k = k_proj(input).view(len_q, bsz * heads, head_dim)
v = v_proj(input).view(len_q, bsz * heads, head_dim)
all = F.linear(input, weight, bias)
# all = all.view(len_q, bsz, 3, heads, head_dim)
#
# q_ = all[:, :, 0, :,:].reshape(len_q, bsz * heads, head_dim)
# k_ = all[:, :, 1, :,:].reshape(len_q, bsz * heads, head_dim)
# v_ = all[:, :, 2, :,:].reshape(len_q, bsz * heads, head_dim)
# all = all.view(len_q, bsz, 3, heads, head_dim).transpose(2, 3).contiguous()
all = all.view(len_q, bsz * heads, 3, head_dim)
q_ = all[:, :, 0, :] # .view(len_q, bsz * heads, head_dim)
k_ = all[:, :, 1, :] # .view(len_q, bsz * heads, head_dim)
v_ = all[:, :, 2, :] # .view(len_q, bsz * heads, head_dim)
# print(q - q_)
print("begin testing ...")
print(torch.allclose(q, q_))
print(torch.allclose(k, k_))
print(torch.allclose(v, v_))
# q_ = q.view(bsz * heads, head_dim)
# k_ = k.view(bsz * heads, head_dim)
# matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
# device=queries.device)
# matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
# keys.transpose(0, 1).transpose(1, 2),
# out=matmul1_results, beta=0.0, alpha=scale_t[0])
o = torch.bmm
| 3,110 | 32.095745 | 133 | py |
NMTGMinor | NMTGMinor-master/test/test_self_attention.py | import torch
import unittest
from modules.self_multihead_attn import SelfMultiheadAttn
from time import time
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 512
self.sequences = 8
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='default')
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='fast')
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
def test_self_multihead_attn_additive_mask(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
# print(mask)
for i in range(20):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-3, rtol=1e-3))
self.assertTrue(not torch.any(torch.isnan(self.tst_inputs.grad)))
self.assertTrue(torch.allclose(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3))
self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3))
def test_speed(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.tst_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nC++ Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 5,583 | 43.672 | 109 | py |
NMTGMinor | NMTGMinor-master/test/test_softmax.py | import torch
import mask_softmax_dropout_cuda
import copy
BH = 1024 * 8
B = 1024
H = BH // B
Q = 75
K = 56
x = torch.randn((BH, Q, K) , dtype=torch.float16, device=torch.device("cuda"), requires_grad=True) * 100
x_ref = x.clone().detach().requires_grad_(True)
grado = torch.randn((BH, Q, K), dtype=torch.float16, device=torch.device("cuda"), requires_grad=True)
dropout_mask, softmax_results = mask_softmax_dropout_cuda.forward(True, 8, x, 0.0)
pytorch_output = torch.nn.functional.softmax(x_ref, dim=-1, dtype=torch.float32).type_as(x)
dif = softmax_results - pytorch_output
print(dif)
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(softmax_results, pytorch_output, atol=1e-3, rtol=1e-3)
print(result)
print("Checking gradients ...")
grado2 = copy.deepcopy(grado)
grado3 = copy.deepcopy(grado)
pytorch_output.backward(grado)
gradx_ref = x_ref.grad
gradx = mask_softmax_dropout_cuda.backward(8, grado, softmax_results, dropout_mask, 0.0)
gradx2 = mask_softmax_dropout_cuda.backward_recompute(8, grado2, softmax_results, x, dropout_mask, 0.0)
dif = gradx - gradx_ref
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(gradx, gradx_ref, atol=1e-3, rtol=1e-3)
print(result)
dif = gradx2 - gradx_ref
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(gradx2, gradx_ref, atol=1e-3, rtol=1e-3)
print(result)
dif = gradx2 - gradx
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(gradx2, gradx, atol=1e-3, rtol=1e-3)
print(result)
| 1,510 | 24.610169 | 104 | py |
NMTGMinor | NMTGMinor-master/test/modules/fast_self_multihead_attn_func.py | import torch
# import fast_self_multihead_attn
# import fast_self_multihead_attn_bias
# import fast_self_multihead_attn_bias_additive_mask
import self_multihead_attn_cuda as fast_self_multihead_attn_bias_additive_mask
class FastSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases,
output_biases, pad_mask, mask_additive, dropout_prob):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
mask_additive_t = torch.tensor([mask_additive])
input_lin_results, \
bmm1_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias_additive_mask.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
null_tensor, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias_additive_mask.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
bmm1_results, \
pad_mask, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
return None, None, None, \
input_grads, input_weight_grads, output_weight_grads, input_bias_grads, output_bias_grads, \
None, None, None
fast_self_attn_func = FastSelfAttnFunc.apply | 3,260 | 32.96875 | 108 | py |
NMTGMinor | NMTGMinor-master/test/modules/self_multihead_attn.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
# from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
# from apex.normalization.fused_layer_norm import FusedLayerNorm
# from onmt.modules.layer_norm import LayerNorm
if hasattr(torch._C, '_jit_set_profiling_executor'):
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_set_profiling_mode'):
torch._C._jit_set_profiling_mode(False)
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=True)
out = residual + out
return out
class SelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=False, impl='fast',
mask_additive=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.impl = impl
self.scaling = self.head_dim ** -0.5
self.mask_additive = mask_additive
if mask_additive:
assert impl == 'default' or (
impl == 'fast' and bias), "additive mask not supported for fast mode without bias"
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.reset_parameters()
if impl == 'fast':
self.attn_func = fast_self_attn_func
elif impl == 'default':
self.attn_func = self_attn_func
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj_weight)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
def forward(self, query, key, value, key_padding_mask=None, need_weights=False, attn_mask=None, is_training=True):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
input_bias = self.in_proj_bias
input_weights = self.in_proj_weight
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
elif attn_mask is not None:
assert self.mask_additive == False, "additive mask not supported for time mask"
mask = attn_mask
else:
mask = None
if self.impl == 'fast':
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, query,
input_weights, self.out_proj_weight, input_bias, self.out_proj_bias, mask,
self.mask_additive, self.dropout)
else:
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, self.scaling, query,
input_weights, self.out_proj_weight,
input_bias, self.out_proj_bias,
mask, self.mask_additive, self.dropout)
return outputs, None
| 4,111 | 39.712871 | 118 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.