code stringlengths 17 6.64M |
|---|
class TestNorms(unittest.TestCase):
def test_norm(self):
def f(t, x):
return x
t = torch.tensor([0.0, 1.0])
is_called = False
def norm(state):
nonlocal is_called
is_called = True
self.assertIsInstance(state, torch.Tensor)
self.assertEqual(state.shape, ())
return state.pow(2).mean().sqrt()
x0 = torch.tensor(1.0)
torchdiffeq.odeint(f, x0, t, options=dict(norm=norm))
self.assertTrue(is_called)
is_called = False
def norm(state):
nonlocal is_called
is_called = True
self.assertIsInstance(state, tuple)
self.assertEqual(len(state), 1)
(state,) = state
self.assertEqual(state.shape, ())
return state.pow(2).mean().sqrt()
x0 = (torch.tensor(1.0),)
torchdiffeq.odeint(f, x0, t, options=dict(norm=norm))
self.assertTrue(is_called)
is_called = False
def norm(state):
nonlocal is_called
is_called = True
self.assertIsInstance(state, tuple)
self.assertEqual(len(state), 2)
(state1, state2) = state
self.assertEqual(state1.shape, ())
self.assertEqual(state2.shape, (2, 2))
return state1.pow(2).mean().sqrt()
x0 = (torch.tensor(1.0), torch.tensor([[0.5, 0.5], [0.1, 0.1]]))
torchdiffeq.odeint(f, x0, t, options=dict(norm=norm))
self.assertTrue(is_called)
def test_adjoint_norm(self):
def f(t, x):
return x
t = torch.tensor([0.0, 1.0])
adjoint_params = (torch.rand(7, requires_grad=True), torch.rand((), requires_grad=True))
def make_spy_on_adjoint_norm(adjoint_norm, actual_norm):
is_spy_called = [False]
def spy_on_adjoint_norm(tensor):
nonlocal is_spy_called
is_spy_called[0] = True
norm_result = adjoint_norm(tensor)
true_norm_result = actual_norm(tensor)
self.assertIsInstance(norm_result, torch.Tensor)
self.assertEqual(norm_result.shape, true_norm_result.shape)
self.assertLess((norm_result - true_norm_result).abs().max(), 1e-06)
return norm_result
return (spy_on_adjoint_norm, is_spy_called)
for shape in ((), (1,), (2, 2)):
for (use_adjoint_options, seminorm) in ((False, False), (True, False), (True, True)):
with self.subTest(shape=shape, use_adjoint_options=use_adjoint_options, seminorm=seminorm):
x0 = torch.full(shape, 1.0)
if use_adjoint_options:
if seminorm:
kwargs = dict(adjoint_options=dict(norm='seminorm'))
else:
kwargs = dict(adjoint_options={})
else:
kwargs = {}
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, **kwargs)
_adjoint_norm = xs.grad_fn.adjoint_options['norm']
is_called = False
def actual_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, y, adj_y, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(y.shape, shape)
self.assertEqual(adj_y.shape, shape)
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
out = max(t.abs(), y.pow(2).mean().sqrt(), adj_y.pow(2).mean().sqrt())
if (not seminorm):
out = max(out, adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
return out
(xs.grad_fn.adjoint_options['norm'], is_spy_called) = make_spy_on_adjoint_norm(_adjoint_norm, actual_norm)
xs.sum().backward()
self.assertTrue(is_called)
self.assertTrue(is_spy_called[0])
for (use_adjoint_options, seminorm) in ((False, False), (True, False), (True, True)):
with self.subTest(shape=shape, use_adjoint_options=use_adjoint_options, seminorm=seminorm):
x0 = (torch.tensor(1.0), torch.tensor([[0.5, 0.5], [0.1, 0.1]]))
if use_adjoint_options:
if seminorm:
kwargs = dict(adjoint_options=dict(norm='seminorm'))
else:
kwargs = dict(adjoint_options={})
else:
kwargs = {}
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, **kwargs)
adjoint_options_dict = xs[0].grad_fn.next_functions[0][0].next_functions[0][0].adjoint_options
_adjoint_norm = adjoint_options_dict['norm']
is_called = False
def actual_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, y, adj_y, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(y.shape, (5,))
self.assertEqual(adj_y.shape, (5,))
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
ya = y[0]
yb = y[1:]
adj_ya = adj_y[0]
adj_yb = adj_y[1:4]
out = max(t.abs(), ya.abs(), yb.pow(2).mean().sqrt(), adj_ya.abs(), adj_yb.pow(2).mean().sqrt())
if (not seminorm):
out = max(out, adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
return out
(spy_on_adjoint_norm, is_spy_called) = make_spy_on_adjoint_norm(_adjoint_norm, actual_norm)
adjoint_options_dict['norm'] = spy_on_adjoint_norm
xs[0].sum().backward()
self.assertTrue(is_called)
self.assertTrue(is_spy_called[0])
is_called = False
def adjoint_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, y, adj_y, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(y.shape, ())
self.assertEqual(adj_y.shape, ())
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
return max(t.abs(), y.pow(2).mean().sqrt(), adj_y.pow(2).mean().sqrt(), adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
x0 = torch.tensor(1.0)
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, adjoint_options=dict(norm=adjoint_norm))
xs.sum().backward()
self.assertTrue(is_called)
is_called = False
def adjoint_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, ya, yb, adj_ya, adj_yb, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(ya.shape, ())
self.assertEqual(yb.shape, (2, 2))
self.assertEqual(adj_ya.shape, ())
self.assertEqual(adj_yb.shape, (2, 2))
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
return max(t.abs(), ya.abs(), yb.pow(2).mean().sqrt(), adj_ya.abs(), adj_yb.pow(2).mean().sqrt(), adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
x0 = (torch.tensor(1.0), torch.tensor([[0.5, 0.5], [0.1, 0.1]]))
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, adjoint_options=dict(norm=adjoint_norm))
xs[0].sum().backward()
self.assertTrue(is_called)
def test_large_norm(self):
def norm(tensor):
return tensor.abs().max()
def large_norm(tensor):
return (10 * tensor.abs().max())
for dtype in DTYPES:
for device in DEVICES:
for method in ADAPTIVE_METHODS:
if ((dtype == torch.float32) and (method == 'dopri8')):
continue
with self.subTest(dtype=dtype, device=device, method=method):
x0 = torch.tensor([1.0, 2.0], device=device, dtype=dtype)
t = torch.tensor([0.0, 1.0], device=device, dtype=torch.float64)
norm_f = _NeuralF(width=10, oscillate=True).to(device, dtype)
torchdiffeq.odeint(norm_f, x0, t, method=method, options=dict(norm=norm))
large_norm_f = _NeuralF(width=10, oscillate=True).to(device, dtype)
with torch.no_grad():
for (norm_param, large_norm_param) in zip(norm_f.parameters(), large_norm_f.parameters()):
large_norm_param.copy_(norm_param)
torchdiffeq.odeint(large_norm_f, x0, t, method=method, options=dict(norm=large_norm))
self.assertLessEqual(norm_f.nfe, large_norm_f.nfe)
def test_seminorm(self):
for dtype in DTYPES:
for device in DEVICES:
for method in ADAPTIVE_METHODS:
with self.subTest(dtype=dtype, device=device, method=method):
if (dtype == torch.float64):
tol = 1e-08
else:
tol = 1e-06
x0 = torch.tensor([1.0, 2.0], device=device, dtype=dtype)
t = torch.tensor([0.0, 1.0], device=device, dtype=torch.float64)
ode_f = _NeuralF(width=1024, oscillate=True).to(device, dtype)
out = torchdiffeq.odeint_adjoint(ode_f, x0, t, atol=tol, rtol=tol, method=method)
ode_f.nfe = 0
out.sum().backward()
default_nfe = ode_f.nfe
out = torchdiffeq.odeint_adjoint(ode_f, x0, t, atol=tol, rtol=tol, method=method, adjoint_options=dict(norm='seminorm'))
ode_f.nfe = 0
out.sum().backward()
seminorm_nfe = ode_f.nfe
self.assertLessEqual(seminorm_nfe, default_nfe)
|
def rel_error(true, estimate):
return ((true - estimate) / true).abs().max()
|
class TestSolverError(unittest.TestCase):
def test_odeint(self):
for reverse in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for method in METHODS:
if ((method in SCIPY_METHODS) and (dtype == torch.complex64)):
continue
kwargs = dict()
if ((method == 'dopri8') and (dtype == torch.float64)):
kwargs = dict(rtol=1e-12, atol=1e-14)
if ((method == 'dopri8') and (dtype == torch.float32)):
kwargs = dict(rtol=1e-07, atol=1e-07)
problems = (PROBLEMS if (method in ADAPTIVE_METHODS) else ('constant',))
for ode in problems:
if (method in ['adaptive_heun', 'bosh3']):
eps = 0.004
elif (ode == 'linear'):
eps = 0.002
else:
eps = 0.0003
with self.subTest(reverse=reverse, dtype=dtype, device=device, ode=ode, method=method):
(f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device, ode=ode, reverse=reverse)
y = torchdiffeq.odeint(f, y0, t_points, method=method, **kwargs)
self.assertLess(rel_error(sol, y), eps)
def test_adjoint(self):
for reverse in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for ode in PROBLEMS:
if (ode == 'linear'):
eps = 0.002
else:
eps = 0.0001
with self.subTest(reverse=reverse, dtype=dtype, device=device, ode=ode):
(f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device, ode=ode, reverse=reverse)
y = torchdiffeq.odeint_adjoint(f, y0, t_points)
self.assertLess(rel_error(sol, y), eps)
|
class TestScipySolvers(unittest.TestCase):
def test_odeint(self):
for reverse in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for solver in ['RK45', 'RK23', 'DOP853', 'Radau', 'BDF', 'LSODA']:
for ode in PROBLEMS:
eps = 0.001
with self.subTest(reverse=reverse, dtype=dtype, device=device, ode=ode, solver=solver):
(f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device, ode=ode, reverse=reverse)
if (torch.is_complex(y0) and (solver in ['Radau', 'LSODA'])):
continue
y = torchdiffeq.odeint(f, y0, t_points, method='scipy_solver', options={'solver': solver})
self.assertTrue((sol.shape == y.shape))
self.assertLess(rel_error(sol, y), eps)
|
class TestNoIntegration(unittest.TestCase):
def test_odeint(self):
for reverse in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for method in METHODS:
for ode in PROBLEMS:
with self.subTest(reverse=reverse, dtype=dtype, device=device, ode=ode, method=method):
(f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device, ode=ode, reverse=reverse)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method=method)
self.assertLess((sol[0] - y).abs().max(), 1e-12)
|
class _JumpF():
def __init__(self):
self.nfe = 0
def __call__(self, t, x):
self.nfe += 1
if (t < 0.5):
return ((- 0.5) * x)
else:
return (x ** 2)
|
class TestDiscontinuities(unittest.TestCase):
def test_odeint_jump_t(self):
for adjoint in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for method in ADAPTIVE_METHODS:
with self.subTest(adjoint=adjoint, dtype=dtype, device=device, method=method):
if (method == 'dopri8'):
continue
x0 = torch.tensor([1.0, 2.0], device=device, dtype=dtype, requires_grad=True)
t = torch.tensor([0.0, 1.0], device=device)
simple_f = _JumpF()
odeint = (partial(torchdiffeq.odeint_adjoint, adjoint_params=()) if adjoint else torchdiffeq.odeint)
simple_xs = odeint(simple_f, x0, t, atol=1e-06, method=method)
better_f = _JumpF()
options = dict(jump_t=torch.tensor([0.5], device=device))
with warnings.catch_warnings():
better_xs = odeint(better_f, x0, t, rtol=1e-06, atol=1e-06, method=method, options=options)
self.assertLess(better_f.nfe, simple_f.nfe)
if adjoint:
simple_f.nfe = 0
better_f.nfe = 0
with warnings.catch_warnings():
simple_xs.sum().backward()
better_xs.sum().backward()
self.assertLess(better_f.nfe, simple_f.nfe)
def test_odeint_perturb(self):
for adjoint in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for method in FIXED_METHODS:
for perturb in (True, False):
with self.subTest(adjoint=adjoint, dtype=dtype, device=device, method=method, perturb=perturb):
x0 = torch.tensor([1.0, 2.0], device=device, dtype=dtype, requires_grad=True)
t = torch.tensor([0.0, 1.0], device=device)
ts = []
def f(t, x):
ts.append(t.item())
return (- x)
options = dict(step_size=0.5, perturb=perturb)
with warnings.catch_warnings():
odeint = (partial(torchdiffeq.odeint_adjoint, adjoint_params=()) if adjoint else torchdiffeq.odeint)
xs = odeint(f, x0, t, method=method, options=options)
if perturb:
self.assertNotIn(0.0, ts)
self.assertNotIn(0.5, ts)
else:
self.assertIn(0.0, ts)
self.assertIn(0.5, ts)
if adjoint:
ts.clear()
with warnings.catch_warnings():
xs.sum().backward()
if perturb:
self.assertNotIn(1.0, ts)
self.assertNotIn(0.5, ts)
else:
self.assertIn(1.0, ts)
self.assertIn(0.5, ts)
|
class TestGridConstructor(unittest.TestCase):
def test_grid_constructor(self):
def f(t, x):
return x
for adjoint in (False, True):
with self.subTest(adjoint=adjoint):
x0 = torch.tensor(1.0, requires_grad=True)
t = torch.tensor([0.0, 1.0])
first = True
def grid_constructor(f, y0, t):
nonlocal first
self.assertEqual(t.shape, (2,))
if first:
first = False
self.assertEqual(t[0], 0.0)
self.assertEqual(t[1], 1.0)
return torch.linspace(0, 1, 11)
else:
self.assertEqual(t[0], 1.0)
self.assertEqual(t[1], 0.0)
return torch.linspace(1, 0, 11)
odeint = (torchdiffeq.odeint_adjoint if adjoint else torchdiffeq.odeint)
kwargs = ({'adjoint_params': ()} if adjoint else {})
xs = odeint(f, x0, t, method='euler', options=dict(grid_constructor=grid_constructor), **kwargs)
x1 = xs[1]
true_x1 = (x0 * (1.1 ** 10))
self.assertLess((x1 - true_x1).abs().max(), 1e-06)
if adjoint:
x1.backward()
true_x0_grad = (1.1 ** 10)
self.assertLess((x0.grad - true_x0_grad).abs().max(), 1e-06)
|
class TestMinMaxStep(unittest.TestCase):
def test_min_max_step(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for device in DEVICES:
for min_step in (0, 2):
for max_step in (float('inf'), 5):
for (method, options) in [('dopri5', {}), ('scipy_solver', {'solver': 'LSODA'})]:
options['min_step'] = min_step
options['max_step'] = max_step
(f, y0, t_points, sol) = construct_problem(device=device, ode='linear')
torchdiffeq.odeint(f, y0, t_points, method=method, options=options)
if (min_step > 0):
self.assertLess(f.nfe, 50)
else:
self.assertGreater(f.nfe, 100)
|
class _NeuralF(torch.nn.Module):
def __init__(self, width, oscillate):
super(_NeuralF, self).__init__()
self.linears = torch.nn.Sequential(torch.nn.Linear(2, width), torch.nn.Tanh(), torch.nn.Linear(width, 2), torch.nn.Tanh())
self.nfe = 0
self.oscillate = oscillate
def forward(self, t, x):
self.nfe += 1
out = self.linears(x)
if self.oscillate:
out = (out * t.mul(20).sin())
return out
|
class TestCallbacks(unittest.TestCase):
def test_wrong_callback(self):
x0 = torch.tensor([1.0, 2.0])
t = torch.tensor([0.0, 1.0])
for method in FIXED_METHODS:
for callback_name in ('callback_accept_step', 'callback_reject_step'):
with self.subTest(method=method):
f = _NeuralF(width=10, oscillate=False)
setattr(f, callback_name, (lambda t0, y0, dt: None))
with self.assertWarns(Warning):
torchdiffeq.odeint(f, x0, t, method=method)
for method in SCIPY_METHODS:
for callback_name in ('callback_step', 'callback_accept_step', 'callback_reject_step'):
with self.subTest(method=method):
f = _NeuralF(width=10, oscillate=False)
setattr(f, callback_name, (lambda t0, y0, dt: None))
with self.assertWarns(Warning):
torchdiffeq.odeint(f, x0, t, method=method)
def test_steps(self):
for (forward, adjoint) in ((False, True), (True, False), (True, True)):
for method in (FIXED_METHODS + ADAPTIVE_METHODS):
if (method == 'dopri8'):
continue
with self.subTest(forward=forward, adjoint=adjoint, method=method):
f = _NeuralF(width=10, oscillate=False)
if forward:
forward_counter = 0
forward_accept_counter = 0
forward_reject_counter = 0
def callback_step(t0, y0, dt):
nonlocal forward_counter
forward_counter += 1
def callback_accept_step(t0, y0, dt):
nonlocal forward_accept_counter
forward_accept_counter += 1
def callback_reject_step(t0, y0, dt):
nonlocal forward_reject_counter
forward_reject_counter += 1
f.callback_step = callback_step
if (method in ADAPTIVE_METHODS):
f.callback_accept_step = callback_accept_step
f.callback_reject_step = callback_reject_step
if adjoint:
adjoint_counter = 0
adjoint_accept_counter = 0
adjoint_reject_counter = 0
def callback_step_adjoint(t0, y0, dt):
nonlocal adjoint_counter
adjoint_counter += 1
def callback_accept_step_adjoint(t0, y0, dt):
nonlocal adjoint_accept_counter
adjoint_accept_counter += 1
def callback_reject_step_adjoint(t0, y0, dt):
nonlocal adjoint_reject_counter
adjoint_reject_counter += 1
f.callback_step_adjoint = callback_step_adjoint
if (method in ADAPTIVE_METHODS):
f.callback_accept_step_adjoint = callback_accept_step_adjoint
f.callback_reject_step_adjoint = callback_reject_step_adjoint
x0 = torch.tensor([1.0, 2.0])
t = torch.tensor([0.0, 1.0])
if (method in FIXED_METHODS):
kwargs = dict(options=dict(step_size=0.1))
elif (method == 'implicit_adams'):
kwargs = dict(rtol=0.001, atol=0.0001)
else:
kwargs = {}
xs = torchdiffeq.odeint_adjoint(f, x0, t, method=method, **kwargs)
if forward:
if (method in FIXED_METHODS):
self.assertEqual(forward_counter, 10)
if (method in ADAPTIVE_METHODS):
self.assertGreater(forward_counter, 0)
self.assertEqual((forward_accept_counter + forward_reject_counter), forward_counter)
if adjoint:
xs.sum().backward()
if (method in FIXED_METHODS):
self.assertEqual(adjoint_counter, 10)
if (method in ADAPTIVE_METHODS):
self.assertGreater(adjoint_counter, 0)
self.assertEqual((adjoint_accept_counter + adjoint_reject_counter), adjoint_counter)
|
class ConstantODE(torch.nn.Module):
def __init__(self):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def forward(self, t, y):
return (self.a + ((y - ((self.a * t) + self.b)) ** 5))
def y_exact(self, t):
return ((self.a * t) + self.b)
|
class SineODE(torch.nn.Module):
def forward(self, t, y):
return (((((2 * y) / t) + ((t ** 4) * torch.sin((2 * t)))) - (t ** 2)) + (4 * (t ** 3)))
def y_exact(self, t):
return ((((((((- 0.5) * (t ** 4)) * torch.cos((2 * t))) + ((0.5 * (t ** 3)) * torch.sin((2 * t)))) + ((0.25 * (t ** 2)) * torch.cos((2 * t)))) - (t ** 3)) + (2 * (t ** 4))) + ((math.pi - 0.25) * (t ** 2)))
|
class LinearODE(torch.nn.Module):
def __init__(self, dim=10):
super(LinearODE, self).__init__()
torch.manual_seed(0)
self.dim = dim
U = (torch.randn(dim, dim) * 0.1)
A = ((2 * U) - (U + U.transpose(0, 1)))
self.A = torch.nn.Parameter(A)
self.initial_val = np.ones((dim, 1))
self.nfe = 0
def forward(self, t, y):
self.nfe += 1
return torch.mm(self.A, y.reshape(self.dim, 1)).reshape((- 1))
def y_exact(self, t):
t_numpy = t.detach().cpu().numpy()
A_np = self.A.detach().cpu().numpy()
ans = []
for t_i in t_numpy:
ans.append(np.matmul(scipy.linalg.expm((A_np * t_i)), self.initial_val))
return torch.stack([torch.tensor(ans_) for ans_ in ans]).reshape(len(t_numpy), self.dim).to(t)
|
def construct_problem(device, npts=10, ode='constant', reverse=False, dtype=torch.float64):
f = PROBLEMS[ode]().to(dtype=dtype, device=device)
t_points = torch.linspace(1, 8, npts, dtype=torch.float64, device=device, requires_grad=True)
sol = f.y_exact(t_points).to(dtype)
def _flip(x, dim):
indices = ([slice(None)] * x.dim())
indices[dim] = torch.arange((x.size(dim) - 1), (- 1), (- 1), dtype=torch.long, device=device)
return x[tuple(indices)]
if reverse:
t_points = _flip(t_points, 0).clone().detach()
sol = _flip(sol, 0).clone().detach()
return (f, sol[0].detach().requires_grad_(True), t_points, sol)
|
class AdaptiveHeunSolver(RKAdaptiveStepsizeODESolver):
order = 2
tableau = _ADAPTIVE_HEUN_TABLEAU
mid = _AH_C_MID
|
class OdeintAdjointMethod(torch.autograd.Function):
@staticmethod
def forward(ctx, shapes, func, y0, t, rtol, atol, method, options, event_fn, adjoint_rtol, adjoint_atol, adjoint_method, adjoint_options, t_requires_grad, *adjoint_params):
ctx.shapes = shapes
ctx.func = func
ctx.adjoint_rtol = adjoint_rtol
ctx.adjoint_atol = adjoint_atol
ctx.adjoint_method = adjoint_method
ctx.adjoint_options = adjoint_options
ctx.t_requires_grad = t_requires_grad
ctx.event_mode = (event_fn is not None)
with torch.no_grad():
ans = odeint(func, y0, t, rtol=rtol, atol=atol, method=method, options=options, event_fn=event_fn)
if (event_fn is None):
y = ans
ctx.save_for_backward(t, y, *adjoint_params)
else:
(event_t, y) = ans
ctx.save_for_backward(t, y, event_t, *adjoint_params)
return ans
@staticmethod
def backward(ctx, *grad_y):
with torch.no_grad():
func = ctx.func
adjoint_rtol = ctx.adjoint_rtol
adjoint_atol = ctx.adjoint_atol
adjoint_method = ctx.adjoint_method
adjoint_options = ctx.adjoint_options
t_requires_grad = ctx.t_requires_grad
event_mode = ctx.event_mode
if event_mode:
(t, y, event_t, *adjoint_params) = ctx.saved_tensors
_t = t
t = torch.cat([t[0].reshape((- 1)), event_t.reshape((- 1))])
grad_y = grad_y[1]
else:
(t, y, *adjoint_params) = ctx.saved_tensors
grad_y = grad_y[0]
adjoint_params = tuple(adjoint_params)
aug_state = [torch.zeros((), dtype=y.dtype, device=y.device), y[(- 1)], grad_y[(- 1)]]
aug_state.extend([torch.zeros_like(param) for param in adjoint_params])
def augmented_dynamics(t, y_aug):
y = y_aug[1]
adj_y = y_aug[2]
with torch.enable_grad():
t_ = t.detach()
t = t_.requires_grad_(True)
y = y.detach().requires_grad_(True)
func_eval = func((t if t_requires_grad else t_), y)
_t = torch.as_strided(t, (), ())
_y = torch.as_strided(y, (), ())
_params = tuple((torch.as_strided(param, (), ()) for param in adjoint_params))
(vjp_t, vjp_y, *vjp_params) = torch.autograd.grad(func_eval, ((t, y) + adjoint_params), (- adj_y), allow_unused=True, retain_graph=True)
vjp_t = (torch.zeros_like(t) if (vjp_t is None) else vjp_t)
vjp_y = (torch.zeros_like(y) if (vjp_y is None) else vjp_y)
vjp_params = [(torch.zeros_like(param) if (vjp_param is None) else vjp_param) for (param, vjp_param) in zip(adjoint_params, vjp_params)]
return (vjp_t, func_eval, vjp_y, *vjp_params)
for (callback_name, adjoint_callback_name) in zip(_all_callback_names, _all_adjoint_callback_names):
try:
callback = getattr(func, adjoint_callback_name)
except AttributeError:
pass
else:
setattr(augmented_dynamics, callback_name, callback)
if t_requires_grad:
time_vjps = torch.empty(len(t), dtype=t.dtype, device=t.device)
else:
time_vjps = None
for i in range((len(t) - 1), 0, (- 1)):
if t_requires_grad:
func_eval = func(t[i], y[i])
dLd_cur_t = func_eval.reshape((- 1)).dot(grad_y[i].reshape((- 1)))
aug_state[0] -= dLd_cur_t
time_vjps[i] = dLd_cur_t
aug_state = odeint(augmented_dynamics, tuple(aug_state), t[(i - 1):(i + 1)].flip(0), rtol=adjoint_rtol, atol=adjoint_atol, method=adjoint_method, options=adjoint_options)
aug_state = [a[1] for a in aug_state]
aug_state[1] = y[(i - 1)]
aug_state[2] += grad_y[(i - 1)]
if t_requires_grad:
time_vjps[0] = aug_state[0]
if (event_mode and t_requires_grad):
time_vjps = torch.cat([time_vjps[0].reshape((- 1)), torch.zeros_like(_t[1:])])
adj_y = aug_state[2]
adj_params = aug_state[3:]
return (None, None, adj_y, time_vjps, None, None, None, None, None, None, None, None, None, None, *adj_params)
|
def odeint_adjoint(func, y0, t, *, rtol=1e-07, atol=1e-09, method=None, options=None, event_fn=None, adjoint_rtol=None, adjoint_atol=None, adjoint_method=None, adjoint_options=None, adjoint_params=None):
if ((adjoint_params is None) and (not isinstance(func, nn.Module))):
raise ValueError('func must be an instance of nn.Module to specify the adjoint parameters; alternatively they can be specified explicitly via the `adjoint_params` argument. If there are no parameters then it is allowable to set `adjoint_params=()`.')
if (adjoint_rtol is None):
adjoint_rtol = rtol
if (adjoint_atol is None):
adjoint_atol = atol
if (adjoint_method is None):
adjoint_method = method
if ((adjoint_method != method) and (options is not None) and (adjoint_options is None)):
raise ValueError('If `adjoint_method != method` then we cannot infer `adjoint_options` from `options`. So as `options` has been passed then `adjoint_options` must be passed as well.')
if (adjoint_options is None):
adjoint_options = ({k: v for (k, v) in options.items() if (k != 'norm')} if (options is not None) else {})
else:
adjoint_options = adjoint_options.copy()
if (adjoint_params is None):
adjoint_params = tuple(find_parameters(func))
else:
adjoint_params = tuple(adjoint_params)
oldlen_ = len(adjoint_params)
adjoint_params = tuple((p for p in adjoint_params if p.requires_grad))
if (len(adjoint_params) != oldlen_):
if (('norm' in adjoint_options) and callable(adjoint_options['norm'])):
warnings.warn('An adjoint parameter was passed without requiring gradient. For efficiency this will be excluded from the adjoint pass, and will not appear as a tensor in the adjoint norm.')
(shapes, func, y0, t, rtol, atol, method, options, event_fn, decreasing_time) = _check_inputs(func, y0, t, rtol, atol, method, options, event_fn, SOLVERS)
state_norm = options['norm']
handle_adjoint_norm_(adjoint_options, shapes, state_norm)
ans = OdeintAdjointMethod.apply(shapes, func, y0, t, rtol, atol, method, options, event_fn, adjoint_rtol, adjoint_atol, adjoint_method, adjoint_options, t.requires_grad, *adjoint_params)
if (event_fn is None):
solution = ans
else:
(event_t, solution) = ans
event_t = event_t.to(t)
if decreasing_time:
event_t = (- event_t)
if (shapes is not None):
solution = _flat_to_shape(solution, (len(t),), shapes)
if (event_fn is None):
return solution
else:
return (event_t, solution)
|
def find_parameters(module):
assert isinstance(module, nn.Module)
if getattr(module, '_is_replica', False):
def find_tensor_attributes(module):
tuples = [(k, v) for (k, v) in module.__dict__.items() if (torch.is_tensor(v) and v.requires_grad)]
return tuples
gen = module._named_members(get_members_fn=find_tensor_attributes)
return [param for (_, param) in gen]
else:
return list(module.parameters())
|
def handle_adjoint_norm_(adjoint_options, shapes, state_norm):
'In-place modifies the adjoint options to choose or wrap the norm function.'
def default_adjoint_norm(tensor_tuple):
(t, y, adj_y, *adj_params) = tensor_tuple
return max(t.abs(), state_norm(y), state_norm(adj_y), _mixed_norm(adj_params))
if ('norm' not in adjoint_options):
adjoint_options['norm'] = default_adjoint_norm
else:
try:
adjoint_norm = adjoint_options['norm']
except KeyError:
adjoint_options['norm'] = default_adjoint_norm
else:
if (adjoint_norm == 'seminorm'):
def adjoint_seminorm(tensor_tuple):
(t, y, adj_y, *adj_params) = tensor_tuple
return max(t.abs(), state_norm(y), state_norm(adj_y))
adjoint_options['norm'] = adjoint_seminorm
elif (shapes is None):
pass
else:
def _adjoint_norm(tensor_tuple):
(t, y, adj_y, *adj_params) = tensor_tuple
y = _flat_to_shape(y, (), shapes)
adj_y = _flat_to_shape(adj_y, (), shapes)
return adjoint_norm((t, *y, *adj_y, *adj_params))
adjoint_options['norm'] = _adjoint_norm
|
class Bosh3Solver(RKAdaptiveStepsizeODESolver):
order = 3
tableau = _BOGACKI_SHAMPINE_TABLEAU
mid = _BS_C_MID
|
class Dopri5Solver(RKAdaptiveStepsizeODESolver):
order = 5
tableau = _DORMAND_PRINCE_SHAMPINE_TABLEAU
mid = DPS_C_MID
|
class Dopri8Solver(RKAdaptiveStepsizeODESolver):
order = 8
tableau = _DOPRI8_TABLEAU
mid = _C_mid
|
def find_event(interp_fn, sign0, t0, t1, event_fn, tol):
with torch.no_grad():
nitrs = torch.ceil((torch.log(((t1 - t0) / tol)) / math.log(2.0)))
for _ in range(nitrs.long()):
t_mid = ((t1 + t0) / 2.0)
y_mid = interp_fn(t_mid)
sign_mid = torch.sign(event_fn(t_mid, y_mid))
same_as_sign0 = (sign0 == sign_mid)
t0 = torch.where(same_as_sign0, t_mid, t0)
t1 = torch.where(same_as_sign0, t1, t_mid)
event_t = ((t0 + t1) / 2.0)
return (event_t, interp_fn(event_t))
|
def combine_event_functions(event_fn, t0, y0):
'\n We ensure all event functions are initially positive,\n so then we can combine them by taking a min.\n '
with torch.no_grad():
initial_signs = torch.sign(event_fn(t0, y0))
def combined_event_fn(t, y):
c = event_fn(t, y)
return torch.min((c * initial_signs))
return combined_event_fn
|
class Fehlberg2(RKAdaptiveStepsizeODESolver):
order = 2
tableau = _FEHLBERG2_TABLEAU
mid = _FE_C_MID
|
def _dot_product(x, y):
return sum(((xi * yi) for (xi, yi) in zip(x, y)))
|
class AdamsBashforthMoulton(FixedGridODESolver):
order = 4
def __init__(self, func, y0, rtol=0.001, atol=0.0001, implicit=True, max_iters=_MAX_ITERS, max_order=_MAX_ORDER, **kwargs):
super(AdamsBashforthMoulton, self).__init__(func, y0, rtol=rtol, atol=rtol, **kwargs)
assert (max_order <= _MAX_ORDER), 'max_order must be at most {}'.format(_MAX_ORDER)
if (max_order < _MIN_ORDER):
warnings.warn('max_order is below {}, so the solver reduces to `rk4`.'.format(_MIN_ORDER))
self.rtol = torch.as_tensor(rtol, dtype=y0.dtype, device=y0.device)
self.atol = torch.as_tensor(atol, dtype=y0.dtype, device=y0.device)
self.implicit = implicit
self.max_iters = max_iters
self.max_order = int(max_order)
self.prev_f = collections.deque(maxlen=(self.max_order - 1))
self.prev_t = None
self.bashforth = [x.to(y0.device) for x in _BASHFORTH_DIVISOR]
self.moulton = [x.to(y0.device) for x in _MOULTON_DIVISOR]
def _update_history(self, t, f):
if ((self.prev_t is None) or (self.prev_t != t)):
self.prev_f.appendleft(f)
self.prev_t = t
def _has_converged(self, y0, y1):
'Checks that each element is within the error tolerance.'
error_ratio = _compute_error_ratio(torch.abs((y0 - y1)), self.rtol, self.atol, y0, y1, _linf_norm)
return (error_ratio < 1)
def _step_func(self, func, t0, dt, t1, y0):
f0 = func(t0, y0, perturb=(Perturb.NEXT if self.perturb else Perturb.NONE))
self._update_history(t0, f0)
order = min(len(self.prev_f), (self.max_order - 1))
if (order < (_MIN_ORDER - 1)):
return (rk4_alt_step_func(func, t0, dt, t1, y0, f0=self.prev_f[0], perturb=self.perturb), f0)
else:
bashforth_coeffs = self.bashforth[order]
dy = _dot_product((dt * bashforth_coeffs), self.prev_f).type_as(y0)
if self.implicit:
moulton_coeffs = self.moulton[(order + 1)]
delta = (dt * _dot_product(moulton_coeffs[1:], self.prev_f).type_as(y0))
converged = False
for _ in range(self.max_iters):
dy_old = dy
f = func(t1, (y0 + dy), perturb=(Perturb.PREV if self.perturb else Perturb.NONE))
dy = (((dt * moulton_coeffs[0]) * f).type_as(y0) + delta)
converged = self._has_converged(dy_old, dy)
if converged:
break
if (not converged):
warnings.warn('Functional iteration did not converge. Solution may be incorrect.')
self.prev_f.pop()
self._update_history(t0, f)
return (dy, f0)
|
class AdamsBashforth(AdamsBashforthMoulton):
def __init__(self, func, y0, **kwargs):
super(AdamsBashforth, self).__init__(func, y0, implicit=False, **kwargs)
|
class Euler(FixedGridODESolver):
order = 1
def _step_func(self, func, t0, dt, t1, y0):
f0 = func(t0, y0, perturb=(Perturb.NEXT if self.perturb else Perturb.NONE))
return ((dt * f0), f0)
|
class Midpoint(FixedGridODESolver):
order = 2
def _step_func(self, func, t0, dt, t1, y0):
half_dt = (0.5 * dt)
f0 = func(t0, y0, perturb=(Perturb.NEXT if self.perturb else Perturb.NONE))
y_mid = (y0 + (f0 * half_dt))
return ((dt * func((t0 + half_dt), y_mid)), f0)
|
class RK4(FixedGridODESolver):
order = 4
def _step_func(self, func, t0, dt, t1, y0):
f0 = func(t0, y0, perturb=(Perturb.NEXT if self.perturb else Perturb.NONE))
return (rk4_alt_step_func(func, t0, dt, t1, y0, f0=f0, perturb=self.perturb), f0)
|
class Heun3(FixedGridODESolver):
order = 3
def _step_func(self, func, t0, dt, t1, y0):
f0 = func(t0, y0, perturb=(Perturb.NEXT if self.perturb else Perturb.NONE))
butcher_tableu = [[0.0, 0.0, 0.0, 0.0], [(1 / 3), (1 / 3), 0.0, 0.0], [(2 / 3), 0.0, (2 / 3), 0.0], [0.0, (1 / 4), 0.0, (3 / 4)]]
return (rk3_step_func(func, t0, dt, t1, y0, butcher_tableu=butcher_tableu, f0=f0, perturb=self.perturb), f0)
|
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
'Fit coefficients for 4th order polynomial interpolation.\n\n Args:\n y0: function value at the start of the interval.\n y1: function value at the end of the interval.\n y_mid: function value at the mid-point of the interval.\n f0: derivative value at the start of the interval.\n f1: derivative value at the end of the interval.\n dt: width of the interval.\n\n Returns:\n List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial\n `p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`\n between 0 (start of interval) and 1 (end of interval).\n '
a = ((((2 * dt) * (f1 - f0)) - (8 * (y1 + y0))) + (16 * y_mid))
b = ((((dt * ((5 * f0) - (3 * f1))) + (18 * y0)) + (14 * y1)) - (32 * y_mid))
c = ((((dt * (f1 - (4 * f0))) - (11 * y0)) - (5 * y1)) + (16 * y_mid))
d = (dt * f0)
e = y0
return [e, d, c, b, a]
|
def _interp_evaluate(coefficients, t0, t1, t):
'Evaluate polynomial interpolation at the given time point.\n\n Args:\n coefficients: list of Tensor coefficients as created by `interp_fit`.\n t0: scalar float64 Tensor giving the start of the interval.\n t1: scalar float64 Tensor giving the end of the interval.\n t: scalar float64 Tensor giving the desired interpolation point.\n\n Returns:\n Polynomial interpolation of the coefficients at time `t`.\n '
assert ((t0 <= t) & (t <= t1)), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(t0, t, t1)
x = ((t - t0) / (t1 - t0))
x = x.to(coefficients[0].dtype)
total = (coefficients[0] + (x * coefficients[1]))
x_power = x
for coefficient in coefficients[2:]:
x_power = (x_power * x)
total = (total + (x_power * coefficient))
return total
|
def odeint(func, y0, t, *, rtol=1e-07, atol=1e-09, method=None, options=None, event_fn=None):
'Integrate a system of ordinary differential equations.\n\n Solves the initial value problem for a non-stiff system of first order ODEs:\n ```\n dy/dt = func(t, y), y(t[0]) = y0\n ```\n where y is a Tensor or tuple of Tensors of any shape.\n\n Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.\n\n Args:\n func: Function that maps a scalar Tensor `t` and a Tensor holding the state `y`\n into a Tensor of state derivatives with respect to time. Optionally, `y`\n can also be a tuple of Tensors.\n y0: N-D Tensor giving starting value of `y` at time point `t[0]`. Optionally, `y0`\n can also be a tuple of Tensors.\n t: 1-D Tensor holding a sequence of time points for which to solve for\n `y`, in either increasing or decreasing order. The first element of\n this sequence is taken to be the initial time point.\n rtol: optional float64 Tensor specifying an upper bound on relative error,\n per element of `y`.\n atol: optional float64 Tensor specifying an upper bound on absolute error,\n per element of `y`.\n method: optional string indicating the integration method to use.\n options: optional dict of configuring options for the indicated integration\n method. Can only be provided if a `method` is explicitly set.\n event_fn: Function that maps the state `y` to a Tensor. The solve terminates when\n event_fn evaluates to zero. If this is not None, all but the first elements of\n `t` are ignored.\n\n Returns:\n y: Tensor, where the first dimension corresponds to different\n time points. Contains the solved value of y for each desired time point in\n `t`, with the initial value `y0` being the first element along the first\n dimension.\n\n Raises:\n ValueError: if an invalid `method` is provided.\n '
(shapes, func, y0, t, rtol, atol, method, options, event_fn, t_is_reversed) = _check_inputs(func, y0, t, rtol, atol, method, options, event_fn, SOLVERS)
solver = SOLVERS[method](func=func, y0=y0, rtol=rtol, atol=atol, **options)
if (event_fn is None):
solution = solver.integrate(t)
else:
(event_t, solution) = solver.integrate_until_event(t[0], event_fn)
event_t = event_t.to(t)
if t_is_reversed:
event_t = (- event_t)
if (shapes is not None):
solution = _flat_to_shape(solution, (len(t),), shapes)
if (event_fn is None):
return solution
else:
return (event_t, solution)
|
def odeint_dense(func, y0, t0, t1, *, rtol=1e-07, atol=1e-09, method=None, options=None):
assert torch.is_tensor(y0)
t = torch.tensor([t0, t1]).to(t0)
(shapes, func, y0, t, rtol, atol, method, options, _, _) = _check_inputs(func, y0, t, rtol, atol, method, options, None, SOLVERS)
assert (method == 'dopri5')
solver = Dopri5Solver(func=func, y0=y0, rtol=rtol, atol=atol, **options)
solution = torch.empty(len(t), *solver.y0.shape, dtype=solver.y0.dtype, device=solver.y0.device)
solution[0] = solver.y0
t = t.to(solver.dtype)
solver._before_integrate(t)
t0 = solver.rk_state.t0
times = [t0]
interp_coeffs = []
for i in range(1, len(t)):
next_t = t[i]
while (next_t > solver.rk_state.t1):
solver.rk_state = solver._adaptive_step(solver.rk_state)
t1 = solver.rk_state.t1
if (t1 != t0):
t0 = t1
times.append(t1)
interp_coeffs.append(torch.stack(solver.rk_state.interp_coeff))
solution[i] = _interp_evaluate(solver.rk_state.interp_coeff, solver.rk_state.t0, solver.rk_state.t1, next_t)
times = torch.stack(times).reshape((- 1)).cpu()
interp_coeffs = torch.stack(interp_coeffs)
def dense_output_fn(t_eval):
idx = torch.searchsorted(times, t_eval, side='right')
t0 = times[(idx - 1)]
t1 = times[idx]
coef = [interp_coeffs[(idx - 1)][i] for i in range(interp_coeffs.shape[1])]
return _interp_evaluate(coef, t0, t1, t_eval)
return dense_output_fn
|
def odeint_event(func, y0, t0, *, event_fn, reverse_time=False, odeint_interface=odeint, **kwargs):
'Automatically links up the gradient from the event time.'
if reverse_time:
t = torch.cat([t0.reshape((- 1)), (t0.reshape((- 1)).detach() - 1.0)])
else:
t = torch.cat([t0.reshape((- 1)), (t0.reshape((- 1)).detach() + 1.0)])
(event_t, solution) = odeint_interface(func, y0, t, event_fn=event_fn, **kwargs)
(shapes, _func, _, t, _, _, _, _, event_fn, _) = _check_inputs(func, y0, t, 0.0, 0.0, None, None, event_fn, SOLVERS)
if (shapes is not None):
state_t = torch.cat([s[(- 1)].reshape((- 1)) for s in solution])
else:
state_t = solution[(- 1)]
if reverse_time:
event_t = (- event_t)
(event_t, state_t) = ImplicitFnGradientRerouting.apply(_func, event_fn, event_t, state_t)
if reverse_time:
event_t = (- event_t)
if (shapes is not None):
state_t = _flat_to_shape(state_t, (), shapes)
solution = tuple((torch.cat([s[:(- 1)], s_t[None]], dim=0) for (s, s_t) in zip(solution, state_t)))
else:
solution = torch.cat([solution[:(- 1)], state_t[None]], dim=0)
return (event_t, solution)
|
class ImplicitFnGradientRerouting(torch.autograd.Function):
@staticmethod
def forward(ctx, func, event_fn, event_t, state_t):
' event_t is the solution to event_fn '
ctx.func = func
ctx.event_fn = event_fn
ctx.save_for_backward(event_t, state_t)
return (event_t.detach(), state_t.detach())
@staticmethod
def backward(ctx, grad_t, grad_state):
func = ctx.func
event_fn = ctx.event_fn
(event_t, state_t) = ctx.saved_tensors
event_t = event_t.detach().clone().requires_grad_(True)
state_t = state_t.detach().clone().requires_grad_(True)
f_val = func(event_t, state_t)
with torch.enable_grad():
(c, (par_dt, dstate)) = vjp(event_fn, (event_t, state_t))
dcdt = (par_dt + torch.sum((dstate * f_val)))
grad_t = (grad_t + torch.sum((grad_state * f_val)))
dstate = (dstate * ((- grad_t) / (dcdt + 1e-12)).reshape_as(c))
grad_state = (grad_state + dstate)
return (None, None, None, grad_state)
|
class ScipyWrapperODESolver(metaclass=abc.ABCMeta):
def __init__(self, func, y0, rtol, atol, min_step=0, max_step=float('inf'), solver='LSODA', **unused_kwargs):
unused_kwargs.pop('norm', None)
unused_kwargs.pop('grid_points', None)
unused_kwargs.pop('eps', None)
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.dtype = y0.dtype
self.device = y0.device
self.shape = y0.shape
self.y0 = y0.detach().cpu().numpy().reshape((- 1))
self.rtol = rtol
self.atol = atol
self.min_step = min_step
self.max_step = max_step
self.solver = solver
self.func = convert_func_to_numpy(func, self.shape, self.device, self.dtype)
def integrate(self, t):
if (t.numel() == 1):
return torch.tensor(self.y0)[None].to(self.device, self.dtype)
t = t.detach().cpu().numpy()
sol = solve_ivp(self.func, t_span=[t.min(), t.max()], y0=self.y0, t_eval=t, method=self.solver, rtol=self.rtol, atol=self.atol, min_step=self.min_step, max_step=self.max_step)
sol = torch.tensor(sol.y).T.to(self.device, self.dtype)
sol = sol.reshape((- 1), *self.shape)
return sol
@classmethod
def valid_callbacks(cls):
return set()
|
def convert_func_to_numpy(func, shape, device, dtype):
def np_func(t, y):
t = torch.tensor(t).to(device, dtype)
y = torch.reshape(torch.tensor(y).to(device, dtype), shape)
with torch.no_grad():
f = func(t, y)
return f.detach().cpu().numpy().reshape((- 1))
return np_func
|
class AdaptiveStepsizeODESolver(metaclass=abc.ABCMeta):
def __init__(self, dtype, y0, norm, **unused_kwargs):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.y0 = y0
self.dtype = dtype
self.norm = norm
def _before_integrate(self, t):
pass
@abc.abstractmethod
def _advance(self, next_t):
raise NotImplementedError
@classmethod
def valid_callbacks(cls):
return set()
def integrate(self, t):
solution = torch.empty(len(t), *self.y0.shape, dtype=self.y0.dtype, device=self.y0.device)
solution[0] = self.y0
t = t.to(self.dtype)
self._before_integrate(t)
for i in range(1, len(t)):
solution[i] = self._advance(t[i])
return solution
|
class AdaptiveStepsizeEventODESolver(AdaptiveStepsizeODESolver, metaclass=abc.ABCMeta):
@abc.abstractmethod
def _advance_until_event(self, event_fn):
raise NotImplementedError
def integrate_until_event(self, t0, event_fn):
t0 = t0.to(self.y0.device, self.dtype)
self._before_integrate(t0.reshape((- 1)))
(event_time, y1) = self._advance_until_event(event_fn)
solution = torch.stack([self.y0, y1], dim=0)
return (event_time, solution)
|
class FixedGridODESolver(metaclass=abc.ABCMeta):
order: int
def __init__(self, func, y0, step_size=None, grid_constructor=None, interp='linear', perturb=False, **unused_kwargs):
self.atol = unused_kwargs.pop('atol')
unused_kwargs.pop('rtol', None)
unused_kwargs.pop('norm', None)
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.dtype = y0.dtype
self.device = y0.device
self.step_size = step_size
self.interp = interp
self.perturb = perturb
if (step_size is None):
if (grid_constructor is None):
self.grid_constructor = (lambda f, y0, t: t)
else:
self.grid_constructor = grid_constructor
elif (grid_constructor is None):
self.grid_constructor = self._grid_constructor_from_step_size(step_size)
else:
raise ValueError('step_size and grid_constructor are mutually exclusive arguments.')
@classmethod
def valid_callbacks(cls):
return {'callback_step'}
@staticmethod
def _grid_constructor_from_step_size(step_size):
def _grid_constructor(func, y0, t):
start_time = t[0]
end_time = t[(- 1)]
niters = torch.ceil((((end_time - start_time) / step_size) + 1)).item()
t_infer = ((torch.arange(0, niters, dtype=t.dtype, device=t.device) * step_size) + start_time)
t_infer[(- 1)] = t[(- 1)]
return t_infer
return _grid_constructor
@abc.abstractmethod
def _step_func(self, func, t0, dt, t1, y0):
pass
def integrate(self, t):
time_grid = self.grid_constructor(self.func, self.y0, t)
assert ((time_grid[0] == t[0]) and (time_grid[(- 1)] == t[(- 1)]))
solution = torch.empty(len(t), *self.y0.shape, dtype=self.y0.dtype, device=self.y0.device)
solution[0] = self.y0
j = 1
y0 = self.y0
for (t0, t1) in zip(time_grid[:(- 1)], time_grid[1:]):
dt = (t1 - t0)
self.func.callback_step(t0, y0, dt)
(dy, f0) = self._step_func(self.func, t0, dt, t1, y0)
y1 = (y0 + dy)
while ((j < len(t)) and (t1 >= t[j])):
if (self.interp == 'linear'):
solution[j] = self._linear_interp(t0, t1, y0, y1, t[j])
elif (self.interp == 'cubic'):
f1 = self.func(t1, y1)
solution[j] = self._cubic_hermite_interp(t0, y0, f0, t1, y1, f1, t[j])
else:
raise ValueError(f'Unknown interpolation method {self.interp}')
j += 1
y0 = y1
return solution
def integrate_until_event(self, t0, event_fn):
assert (self.step_size is not None), 'Event handling for fixed step solvers currently requires `step_size` to be provided in options.'
t0 = t0.type_as(self.y0.abs())
y0 = self.y0
dt = self.step_size
sign0 = torch.sign(event_fn(t0, y0))
max_itrs = 20000
itr = 0
while True:
itr += 1
t1 = (t0 + dt)
(dy, f0) = self._step_func(self.func, t0, dt, t1, y0)
y1 = (y0 + dy)
sign1 = torch.sign(event_fn(t1, y1))
if (sign0 != sign1):
if (self.interp == 'linear'):
interp_fn = (lambda t: self._linear_interp(t0, t1, y0, y1, t))
elif (self.interp == 'cubic'):
f1 = self.func(t1, y1)
interp_fn = (lambda t: self._cubic_hermite_interp(t0, y0, f0, t1, y1, f1, t))
else:
raise ValueError(f'Unknown interpolation method {self.interp}')
(event_time, y1) = find_event(interp_fn, sign0, t0, t1, event_fn, float(self.atol))
break
else:
(t0, y0) = (t1, y1)
if (itr >= max_itrs):
raise RuntimeError(f'Reached maximum number of iterations {max_itrs}.')
solution = torch.stack([self.y0, y1], dim=0)
return (event_time, solution)
def _cubic_hermite_interp(self, t0, y0, f0, t1, y1, f1, t):
h = ((t - t0) / (t1 - t0))
h00 = (((1 + (2 * h)) * (1 - h)) * (1 - h))
h10 = ((h * (1 - h)) * (1 - h))
h01 = ((h * h) * (3 - (2 * h)))
h11 = ((h * h) * (h - 1))
dt = (t1 - t0)
return ((((h00 * y0) + ((h10 * dt) * f0)) + (h01 * y1)) + ((h11 * dt) * f1))
def _linear_interp(self, t0, t1, y0, y1, t):
if (t == t0):
return y0
if (t == t1):
return y1
slope = ((t - t0) / (t1 - t0))
return (y0 + (slope * (y1 - y0)))
|
def bohrToMeters(value, dimension=1):
BOHR_CONSTANT = 5.2917725e-11
return (value * (BOHR_CONSTANT ** dimension))
|
def fileExists(filename):
chk = os.path.exists(filename)
return chk
|
class ParseError(Exception):
def __init__(self, message, errorTags):
Exception.__init__(self, message)
self.errorTags = errorTags
|
def parse_win_mp_grid(f):
parse_line_list = (lambda line, delimiter, T: [T(y) for y in [x.strip() for x in line.strip().split(delimiter)] if y])
for line in f.readlines():
if ('mp_grid' in line):
return parse_line_list(line.split(':')[1], ' ', int)
|
def parse_nnkp_nnkpts(f):
nnkpts = []
with f as input_data:
for line in input_data:
if (line.strip() == 'begin nnkpts'):
break
for line in input_data:
if (line.strip() == 'end nnkpts'):
break
line1 = line.strip()
line2 = line1.split()
line3 = map(int, line2)
line4 = tuple(line3)
if (len(line2) == 5):
nnkpts.append(line4)
return nnkpts
|
def parse_pair_info_line(line):
'Converts a pair-info line into k1, k2, and a G vector\n '
k1 = float(line[0:8])
k2 = float(line[9:16])
G = (float(line[17:24]), float(line[25:32]), float(line[33:40]))
return (k1, k2, G)
|
def parse_matrix_element_line(line):
'Converts a matrix element line into a value\n '
real_part = float(line[0:18])
imaginary_part = float(line[19:36])
return (real_part + (imaginary_part * 1j))
|
def parse_mmn_info_line(line):
n_energy = int(line[0:12])
n_pairs = int(line[13:24])
n_neighbours = int(line[25:36])
return (n_energy, n_pairs, n_neighbours)
|
def determine_neighbours(D, d, P=[0, 1, 2]):
"Computes a bidirectional graph of points who are adjacent in the\n grid of dimensions `D', in the forward direction given by `d'.\n\n The value at each node in the graph is a tuple containing the\n linear index of the neighbour in the direction `d' and another\n containing the linear index of the neighbour in the direction `-d'.\n\n The resulting graph will be acyclic. The end-points of each path\n are forward or backward values of None.\n\n The order of the dimensions for computing the linear index are\n given by P (C-style [0,1,2], FORTRAN-style [2,1,0])\n\n Additionally, a list of all tuples of pairs of points is generated.\n These tuples contain the linear index of the first point, the\n linear index of the second point, and the G vector of the second\n point.\n\n Returns: pair tuple list, neighbour graph\n "
product = (lambda l: functools.reduce((lambda x, y: (x * y)), l, 1))
vector_add = (lambda v1, v2: [(x + y) for (x, y) in zip(v1, v2)])
permute = (lambda v, P: [v[i] for i in P])
linear_index = (lambda v, D: sum(((c * i) for (i, c) in zip(v, [product(D[:i]) for i in range(len(D))]))))
def wrap_vector(v, d, D):
G = [0, 0, 0]
for (i, j) in enumerate(d):
if (j != 0):
if ((v[i] < 0) or (v[i] >= D[i])):
v[i] = (v[i] % D[i])
G = d
return (v, G)
nnkpts = []
neighbour_graph = {}
for a in range(D[0]):
for b in range(D[1]):
for c in range(D[2]):
v = [a, b, c]
(v_neighbour, G) = wrap_vector(vector_add(v, d), d, D)
i = (linear_index(permute(v, P), permute(D, P)) + 1)
i_neighbour = (linear_index(permute(v_neighbour, P), permute(D, P)) + 1)
if (i not in neighbour_graph):
neighbour_graph[i] = [None, None]
if (i_neighbour not in neighbour_graph):
neighbour_graph[i_neighbour] = [None, None]
if (G.count(0) == 3):
neighbour_graph[i][1] = i_neighbour
neighbour_graph[i_neighbour][0] = i
nnkpts.append((i, i_neighbour, G[0], G[1], G[2]))
return (nnkpts, neighbour_graph)
|
def print_usage():
print('Usage: mmn2pathphase case [direction] [-w]')
print(' direction x, y, or z; for <x, y, z> (default x)')
print(' -w option is for Weyl k-path calculation')
|
def main(args):
VERBOSE = False
parse_line_list = (lambda line, delimiter, T: [T(y) for y in [x.strip() for x in line.strip().split(delimiter)] if y])
if (len(args) < 2):
print('Error: no case or direction provided')
exit(1)
spOption = ''
wCalc = False
for arg in args:
if ('-up' in arg):
spOption = 'up'
elif ('-dn' in arg):
spOption = 'dn'
elif ('-w' in arg):
wCalc = True
case_name = args[0]
direction_args = {'x': [1, 0, 0], 'y': [0, 1, 0], 'z': [0, 0, 1]}
if (len(args) > 2):
if (args[1] not in direction_args):
print('Error: unknown direction', args[1])
exit(1)
direction = direction_args[args[1]]
else:
direction = direction_args['x']
Mmn = {}
phase_sums = []
f_win = open(((case_name + '.win') + spOption), 'r')
kmesh = parse_win_mp_grid(f_win)
f_win.close()
if wCalc:
f_nnkp = open(((case_name + '.nnkp') + spOption), 'r')
nnkpts = parse_nnkp_nnkpts(f_nnkp)
f_nnkp.close()
else:
(nnkpts, neighbour_graph) = determine_neighbours(kmesh, direction, [2, 1, 0])
if VERBOSE:
print(nnkpts)
f_mmn = open(((case_name + '.mmn') + spOption), 'r')
f_mmn.readline()
(n_energy, n_pairs, n_neighbours) = parse_mmn_info_line(f_mmn.readline())
for i in range((n_pairs * n_neighbours)):
(k1, k2, G) = parse_pair_info_line(f_mmn.readline())
if ((k1, k2, G[0], G[1], G[2]) in nnkpts):
Mmnk1k2 = numpy.zeros(shape=(n_energy, n_energy), dtype=complex)
for a in range(n_energy):
for b in range(n_energy):
element_value = parse_matrix_element_line(f_mmn.readline())
Mmnk1k2[(b, a)] = element_value
Mmn[k1] = Mmnk1k2
else:
for a in range(n_energy):
for b in range(n_energy):
parse_matrix_element_line(f_mmn.readline())
f_mmn.close()
if wCalc:
for (i, Mmni) in Mmn.items():
i = int(i)
if (i == 1):
L = Mmni
else:
L = numpy.matmul(L, Mmni)
(leign, vect) = numpy.linalg.eig(L)
del vect
wcc = numpy.array([((numpy.angle(z) / (2 * numpy.pi)) % 1) for z in leign])
idx = numpy.argsort(wcc)
wcc = wcc[idx]
numpy.set_printoptions(linewidth=numpy.inf)
numpy.savetxt('wcc_i.csv', [wcc], delimiter=',', footer='', comments='', fmt='%f')
psin = ((wcc * 2) * numpy.pi)
psi = sum(psin)
print('[ BerryPI ]', 'Berry phase sum (rad) =', psi)
return
testdat = open('test.dat', 'w')
for (k, neighbours) in neighbour_graph.items():
k_prev = neighbours[0]
k_next = neighbours[1]
if (k_next is None):
kpath = []
kpath.append(k)
kpath.append(k_prev)
while k_prev:
neighbours = neighbour_graph[k_prev]
k_prev = neighbours[0]
kpath.append(k_prev)
kpath = kpath[:(- 1)]
kpath.reverse()
L = Mmn[kpath[0]]
if (len(kpath) > 1):
for ki in kpath[1:]:
Mmni = Mmn[ki]
L = numpy.matmul(L, Mmni)
(leign, vect) = numpy.linalg.eig(L)
del vect
psin = numpy.array([(numpy.angle(z) % (2 * numpy.pi)) for z in leign])
psi = sum(psin)
phase_sums.append((kpath[0], psi))
testdat.close()
phase_sums.sort(key=(lambda x: x[0]))
f_pathphase = open(((case_name + '.pathphase') + spOption), 'w')
f_pathphase.write(('%4d\n' % len(phase_sums)))
f_pathphase.write((' %2d %2d %2d\n' % (direction[0], direction[1], direction[2])))
for (k, phase_sum) in phase_sums:
f_pathphase.write((' %6d %.12f\n' % (k, phase_sum)))
f_pathphase.close()
return phase_sums
|
def rmerror(corename):
pattern = (('*' + corename) + '*.error')
print(DEFAULT_PREFIX, 'Cleaning error files:', pattern)
for errfilename in glob.glob(pattern):
os.remove(errfilename)
|
def getStringFromList(theList):
if (len(theList) > 1):
theList = functools.reduce((lambda i, j: ((str(i) + ' ') + str(j))), theList)
return str(theList)
else:
return str(theList[0])
|
class VirtualShellInstance():
def __init__(self, command, *arguments, **options):
if arguments:
self._arguments = getStringFromList(arguments)
else:
self._arguments = ''
self._command = command
self.output = None
if ('input' in options):
theInput = options['input']
if (type(theInput) == type([])):
theInput = functools.reduce((lambda i, j: ((str(i) + '\n') + str(j))), theInput)
self._command = ((((('echo ' + '"') + str(theInput)) + '"') + ' | ') + self._command)
def __call__(self):
self.run()
def run(self):
commandString = ((self._command + ' ') + self._arguments)
print(((DEFAULT_PREFIX + 'Calling command: ') + self.getCommandString()))
self.output = subprocess.check_call(commandString, shell=True, executable='/bin/bash')
def progress(self):
print(self.output)
def getCommandString(self):
commandString = ((self._command + ' ') + self._arguments)
commandString = commandString.replace('\n', ' ')
return commandString
|
def testerror(corename):
pattern = (('*' + corename) + '*.error')
for errfilename in glob.glob(pattern):
errfilesize = os.path.getsize(errfilename)
if (errfilesize != 0):
print('ERROR detected in', corename)
print('Please check the error file:', errfilename)
sys.exit(1)
|
def WloopIN_Z(X1, X2, S, E):
Data = np.append(X1, X2, axis=1)
(row, col) = Data.shape
ab = np.ones(row)
ab.shape = (1, row)
Data = np.insert(Data, 2, (S * ab), axis=1)
Data = np.insert(Data, 3, X1.T, axis=1)
Data = np.insert(Data, 4, X2.T, axis=1)
Data = np.insert(Data, 5, (E * ab), axis=1)
return Data
|
def WloopIN_X(X1, X2, S, E):
Data = np.append(X1, X2, axis=1)
(row, col) = Data.shape
ab = np.ones(row)
ab.shape = (1, row)
Data = np.insert(Data, 0, (S * ab), axis=1)
Data = np.insert(Data, 3, (E * ab), axis=1)
Data = np.insert(Data, 4, X1.T, axis=1)
Data = np.insert(Data, 5, X2.T, axis=1)
return Data
|
def WloopIN_Y(X1, X2, S, E):
Data = np.append(X1, X2, axis=1)
(row, col) = Data.shape
ab = np.ones(row)
ab.shape = (1, row)
Data = np.insert(Data, 1, (S * ab), axis=1)
Data = np.insert(Data, 3, X1.T, axis=1)
Data = np.insert(Data, 4, (E * ab), axis=1)
Data = np.insert(Data, 5, X2.T, axis=1)
return Data
|
def write_date(f):
t = datetime.now()
f.write('File written on ')
f.write(t.strftime('%d%b%Y at %H:%M:%S'))
f.write('\n\n')
|
def write_calc_only_A(f):
f.write('calc_only_A : F\n\n')
|
def write_real_lattice(f, real_lattice):
f.write('begin real_lattice\n')
for i in range(3):
a = real_lattice[i]
f.write(' {0:>11.7f} {1:>11.7f} {2:>11.7f}\n'.format(*a))
f.write('end real_lattice\n\n')
|
def write_recip_lattice(f, recip_lattice):
f.write('begin recip_lattice\n')
for i in range(3):
a = recip_lattice[i]
f.write(' {0:>11.7f} {1:>11.7f} {2:>11.7f}\n'.format(*a))
f.write('end recip_lattice\n\n')
|
def write_kpoints(f, kpoints):
f.write('begin kpoints\n')
f.write('{0:>6d}\n'.format(len(kpoints)))
for p in kpoints:
f.write(' {0:>13.8f} {1:>13.8f} {2:>13.8f}\n'.format(*p))
f.write('end kpoints\n\n')
|
def write_projections(f):
f.write('begin projections\n')
f.write('end projections\n\n')
|
def write_nnkpts(f, nnkpts, wCalc):
neighbours_per_kpoint = 3
f.write('begin nnkpts\n')
if wCalc:
f.write('{0:4d}\n'.format(1))
else:
f.write('{0:4d}\n'.format(neighbours_per_kpoint))
for p in nnkpts:
f.write(' {0:5d} {1:5d} {2:3d} {3:3d} {4:3d}\n'.format(*p))
f.write('end nnkpts\n\n')
|
def write_exclude_bands(f):
f.write('begin exclude_bands\n')
f.write('{0:4d}\n'.format(0))
f.write('end exclude_bands\n')
|
def calculate_nnkpts(D, wCalc, wTranslDir, nkpt):
'Calculates neighbours pairs for all paths. \n D - k-mesh (#,#,#)\n wCalc - Logical var to indicate Weyl path calculation (True/False)\n wTranslDir - Direction for k(1)+G[dir] at the end of the loop.\n nkpt - number of k-points in the list\n '
product = (lambda l: functools.reduce((lambda x, y: (x * y)), l, 1))
vector_add = (lambda v1, v2: [(x + y) for (x, y) in zip(v1, v2)])
permute = (lambda v, P: [v[i] for i in P])
linear_index = (lambda v, D: sum(((c * i) for (i, c) in zip(v, [product(D[:i]) for i in range(len(D))]))))
def wrap_vector(v, d, D):
G = [0, 0, 0]
for (i, j) in enumerate(d):
if (j != 0):
if ((v[i] < 0) or (v[i] >= D[i])):
v[i] = (v[i] % D[i])
G = d
return (v, G)
P = [2, 1, 0]
directions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
nnkpts = []
for a in range(D[0]):
for b in range(D[1]):
for c in range(D[2]):
for d in directions:
v = [a, b, c]
(v_neighbour, G) = wrap_vector(vector_add(v, d), d, D)
i = (linear_index(permute(v, P), permute(D, P)) + 1)
i_neighbour = (linear_index(permute(v_neighbour, P), permute(D, P)) + 1)
nnkpts.append((i, i_neighbour, G[0], G[1], G[2]))
if wCalc:
nnkpts = []
for i in range((nkpt - 1)):
nnkpts.append(((i + 1), (i + 2), 0, 0, 0))
if (wTranslDir == 0):
nnkpts.append((nkpt, 1, 0, 0, 0))
elif (wTranslDir == 1):
nnkpts.append((nkpt, 1, 1, 0, 0))
elif (wTranslDir == 2):
nnkpts.append((nkpt, 1, 0, 1, 0))
elif (wTranslDir == 3):
nnkpts.append((nkpt, 1, 0, 0, 1))
else:
raise ValueError(f'Error in win2nnkp wTranslDir={wTranslDir}, while expected one of [0,1,2,3]')
return nnkpts
|
def parse_win_kpoints(f):
while ('begin kpoints' not in f.readline()):
pass
kpoints = []
for line in f.readlines():
if ('end kpoints' in line):
break
kpoint = tuple(parse_line_list(line, ' ', float))
kpoints.append(kpoint)
return kpoints
|
def parse_win_mp_grid(f):
for line in f.readlines():
if ('mp_grid' in line):
return parse_line_list(line.split(':')[1], ' ', int)
|
def parse_win_unit_cell_cart(f):
reciprocal = (lambda a: numpy.transpose((6.28318 * numpy.linalg.inv(a))))
real_lattice = numpy.zeros(shape=(3, 3))
while ('begin unit_cell_cart' not in f.readline()):
pass
f.readline()
for i in range(3):
real_lattice[i] = parse_line_list(f.readline(), ' ', float)
real_lattice = (real_lattice * 0.52917720859)
return (real_lattice, reciprocal(real_lattice))
|
def parse_win(case_name, spinLable):
ext = ('.win' + spinLable)
file_name = (case_name + ext)
f = open(file_name, 'r')
(real_lattice, recip_lattice) = parse_win_unit_cell_cart(f)
f.close()
f = open(file_name, 'r')
dimensions = parse_win_mp_grid(f)
f.close()
f = open(file_name, 'r')
kpoints = parse_win_kpoints(f)
f.close()
return (real_lattice, recip_lattice, dimensions, kpoints)
|
class InputExample(object):
'A single training/test example for simple sequence classification.'
def __init__(self, guid, text_a, text_b=None, label=None):
'Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n '
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
|
class InputFeatures(object):
'A single set of features of data.'
def __init__(self, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None, ori_label=None, subword=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.ori_label = ori_label
self.subword = subword
|
def readfile(filename, schema='BIO', sep=' '):
'\n 数据在txt中格式应该为 \n John B-PER\n Wick I-PER\n say O\n 若schema为IO, 则会强制将B改为I,若为其他,则正常读取\n '
f = open(filename)
data = []
sentence = []
label = []
for line in f:
if ((len(line) == 0) or line.startswith('-DOCSTART') or (line[0] == '\n')):
if (len(sentence) > 0):
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.strip().split()
sentence.append(splits[0])
if ((schema == 'IO') and splits[(- 1)].startswith('B-')):
label.append('I-{}'.format(splits[(- 1)][2:]))
else:
label.append(splits[(- 1)])
if (len(sentence) > 0):
data.append((sentence, label))
sentence = []
label = []
return data
|
def collect_label_list(data_path, label_type='fine', sep='\t'):
f = open(data_path)
label_list = []
for line in f:
if ((len(line) == 0) or line.startswith('-DOCSTART') or (line[0] == '\n')):
continue
splits = line.strip().split(sep)
if (label_type == 'fine'):
label = splits[(- 1)].split('-')[(- 1)]
else:
label = splits[(- 1)].split('-')[0]
if (label not in label_list):
label_list.append(label)
return label_list
|
class DataProcessor(object):
'Base class for data converters for sequence classification data sets.'
def get_examples(self, data_path):
'Gets a collection of `InputExample`s for the train set.'
raise NotImplementedError()
def get_labels(self):
'Gets the list of labels for this data set.'
raise NotImplementedError()
@classmethod
def _read_file(cls, input_file, schema='BIO', sep=' ', quotechar=None):
'Reads a tab separated value file.'
return readfile(input_file, schema, sep)
def _create_examples(self, lines, set_type):
examples = []
for (i, (sentence, label)) in tqdm(enumerate(lines), desc='Create {} examples'.format(set_type)):
guid = ('%s-%s' % (set_type, i))
text_a = ' '.join(sentence)
text_b = None
label = label
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class NerGeneralProcessor(DataProcessor):
'Processor for the general ner data set.'
def get_examples(self, data_path, schema='IO', sep=' ', data_type='train', label_type='fine'):
return self._create_examples(self._read_file(data_path, schema=schema, sep=sep), data_type)
def get_label_map(self, dataset=''):
'\n Returns a mapping dict from label to label token\n '
labels_map = {'ontonotes': {'I-CARDINAL': 'three', 'I-DATE': 'years', 'I-EVENT': 'Christmas', 'I-FAC': 'their', 'I-GPE': 'China', 'I-LANGUAGE': 'language', 'I-LAW': 'this', 'I-LOC': 'South', 'I-MONEY': 'millions', 'I-NORP': 'Arab', 'I-ORDINAL': 'second', 'I-ORG': 'Corporation', 'I-PERCENT': 'percent', 'I-PERSON': 'John', 'I-PRODUCT': 'ship', 'I-QUANTITY': 'feet', 'I-TIME': 'evening', 'I-WORK_OF_ART': 'with'}, 'conll': {'I-LOC': 'Australia', 'I-PER': 'John', 'I-ORG': 'Company', 'I-MISC': 'German'}}
return labels_map[dataset]
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
'Loads a data file into a list of `InputBatch`s.'
label_map = {label: i for (i, label) in enumerate(label_list, 0)}
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc='Examples2Features'):
textlist = example.text_a.split(' ')
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for (i, word) in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if (m == 0):
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
labels.append('O')
valid.append(0)
label_mask.append(0)
if (len(tokens) >= (max_seq_length - 1)):
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append('[CLS]')
segment_ids.append(0)
valid.insert(0, 0)
label_mask.insert(0, 0)
label_ids.append((- 100))
for (i, token) in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if (len(labels) > i):
label_ids.append(label_map[labels[i]])
else:
print(labels[i])
ntokens.append('[SEP]')
segment_ids.append(0)
valid.append(0)
label_mask.append(0)
label_ids.append((- 100))
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = ([1] * len(input_ids))
assert (len(label_mask) == len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append((- 100))
valid.append(1)
label_mask.append(0)
while (len(label_ids) < max_seq_length):
label_ids.append((- 100))
label_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
assert (len(valid) == max_seq_length)
assert (len(label_mask) == max_seq_length)
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, valid_ids=valid, label_mask=label_mask))
return features
|
def convert_examples_to_features_lm(examples, label_map, max_seq_length, tokenizer, subword_map):
"\n label_map = {'I-PER':'person' ......}\n \n "
ori_label_map = {key: (idx + 1) for (idx, key) in enumerate(label_map.keys())}
ori_label_map['O'] = 0
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc='Examples2Features'):
text_list = example.text_a.split(' ')
label_list = example.label
subword_list = []
label_token_list = []
subword_mask = []
ori_label_list = []
for (label, token) in zip(label_list, text_list):
subwords = tokenizer.tokenize(token)
for i in range(len(subwords)):
subword_list.append(subwords[i])
if (i == 0):
subword_mask.append(1)
ori_label_list.append(label)
if (label == 'O'):
label_token_list.append(tokenizer.convert_tokens_to_ids(subwords[i]))
else:
label_token_list.append(tokenizer.convert_tokens_to_ids(label_map[label]))
else:
subword_mask.append(0)
ori_label_list.append(label)
if (label == 'O'):
label_token_list.append(tokenizer.convert_tokens_to_ids(subwords[i]))
else:
label_token_list.append(tokenizer.convert_tokens_to_ids(label_map[label]))
assert (len(subword_list) == len(label_token_list))
assert (len(subword_list) == len(subword_mask))
assert (len(subword_list) == len(ori_label_list))
if (len(label_token_list) >= (max_seq_length - 1)):
subword_list = subword_list[:(max_seq_length - 2)]
label_token_list = label_token_list[:(max_seq_length - 2)]
subword_mask = subword_mask[:(max_seq_length - 2)]
ori_label_list = ori_label_list[:(max_seq_length - 2)]
subword_list.insert(0, '[CLS]')
subword_list.append('[SEP]')
label_token_list.insert(0, tokenizer.convert_tokens_to_ids('[CLS]'))
label_token_list.append(tokenizer.convert_tokens_to_ids('[SEP]'))
subword_mask.insert(0, 0)
subword_mask.append(0)
ori_label_list.insert(0, 'O')
ori_label_list.append('O')
input_ids = tokenizer.convert_tokens_to_ids(subword_list)
label_ids = label_token_list
ori_label_ids = [ori_label_map[label] for label in ori_label_list]
segment_ids = ([0] * len(subword_list))
input_mask = ([1] * len(subword_list))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
subword_mask.append(0)
segment_ids.append(0)
label_ids.append((- 100))
ori_label_ids.append(0)
assert (len(input_ids) == len(label_ids))
assert (len(input_ids) == len(input_mask))
assert (len(input_ids) == len(segment_ids))
assert (len(input_ids) == len(subword_mask))
assert (len(input_ids) == len(ori_label_ids))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, subword=subword_mask, ori_label=ori_label_ids))
return features
|
def get_data_loader(train_examples, label_list, max_seq_length, tokenizer, batch_size, sampler):
train_features = convert_examples_to_features(train_examples, label_list, max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_valid_ids, all_lmask_ids)
train_sampler = sampler(train_data)
dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
return dataloader
|
def get_data_loader_lm(train_examples, label_map, max_seq_length, tokenizer, batch_size, sampler, subword_map):
train_features = convert_examples_to_features_lm(train_examples, label_map, max_seq_length, tokenizer, subword_map)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_ori_label_ids = torch.tensor([f.ori_label for f in train_features], dtype=torch.long)
subword = torch.tensor([f.subword for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, subword, all_ori_label_ids)
train_sampler = sampler(train_data)
dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
return dataloader
|
def show_topk_frac(label_token_map, k=2, filter_ratio=0.8, lm_entity_freq=None):
(entity_freq, data_label_token_map) = count_entity_freq(args.raw_data_file)
label_map = {}
for (label_name, token_frac_dict) in label_token_map.items():
cnt = 0
if (args.sort_method == 'timesup'):
if (not args.ignore_single_appear):
for token in data_label_token_map[label_name].keys():
if (token not in token_frac_dict):
token_frac_dict[token] = 1
sort_key = (lambda x: (x[1] * entity_freq[x[0]].get(label_name, 1.0)))
else:
sort_key = (lambda x: (x[1] * entity_freq[x[0]].get(label_name, 0.0)))
elif (args.sort_method == 'data'):
token_frac_dict = data_label_token_map[label_name]
sort_key = (lambda x: x[1])
elif (args.sort_method == 'LM'):
sort_key = (lambda x: x[1])
for (token, frac) in sorted(token_frac_dict.items(), key=sort_key, reverse=True):
if (label_name not in label_map):
label_map[label_name] = {}
if ((len(token) > 1) and (token in entity_freq) and entity_freq[token]):
entity_label_ratio = (entity_freq[token].get(label_name, 0) / sum(entity_freq[token].values()))
if (entity_label_ratio > filter_ratio):
label_map[label_name][token] = (frac, entity_freq[token])
cnt += 1
if (cnt >= k):
break
return label_map
|
def filter_is_overlap(token, label_name, label_filter, entity_label_map):
if ((len(token) > 3) and (token not in label_filter) and ('##' not in token)):
for (key, value) in entity_label_map.items():
if (key == label_name):
continue
if (token in value):
return False
return True
else:
return False
|
def collect_entity_token(data_path):
label_map = {}
with open(data_path, 'r') as f:
data = f.readlines()
for row in data:
item = row.strip()
if ((item != '') and (item != '-DOCSTART- -X- -X- O')):
splits = item.split()
token = splits[0]
label = splits[(- 1)]
if (label != 'O'):
label = label[2:]
if (label not in label_map):
label_map[label] = []
if (token not in label_map[label]):
label_map[label].append(token)
return label_map
|
def count_entity_freq(data_path):
entity_freq = collections.defaultdict(dict)
label_map = collections.defaultdict(dict)
with open(data_path, 'r') as f:
lines = f.readlines()
for line in lines:
if ((len(line) < 2) or ('-DOCSTART-' in line)):
continue
line = line.strip().split()
word = line[0]
label = line[(- 1)]
if (label != 'O'):
label = label[2:]
entity_freq[word][label] = (entity_freq[word].get(label, 0) + 1)
label_map[label][word] = (label_map[label].get(word, 0) + 1)
return (entity_freq, label_map)
|
def count_entity_freq_roberta(data_path):
entity_freq = collections.defaultdict(dict)
label_map = collections.defaultdict(dict)
with open(data_path, 'r') as f:
lines = f.readlines()
first = True
for line in lines:
if ((len(line) < 2) or ('-DOCSTART-' in line)):
first = True
continue
line = line.strip().split()
word = line[0]
label = line[(- 1)]
if (not first):
word = ('Ġ' + word)
if (label != 'O'):
label = label[2:]
entity_freq[word][label] = (entity_freq[word].get(label, 0) + 1)
label_map[label][word] = (label_map[label].get(word, 0) + 1)
first = False
return (entity_freq, label_map)
|
def get_lm_entity_freq(label_frac):
entity_freq = collections.defaultdict(dict)
for (label, token_frac_dict) in label_frac.items():
for (token, freq) in token_frac_dict.items():
entity_freq[token][label] = freq
return entity_freq
|
def get_label_from_label_token(token_list, label_map, mode='IO'):
"\n label_map = {'person':'PER',\n 'location': 'LOC'\n ...\n ...\n }\n "
label_list = []
past_label = ''
for i in range(len(token_list)):
token = token_list[i]
if (token in label_map.keys()):
current_label = label_map[token]
if ((mode == 'BIO') and (current_label != past_label)):
label_list.append('B-{}'.format(current_label))
else:
label_list.append('I-{}'.format(current_label))
else:
current_label = 'O'
label_list.append(current_label)
past_label = current_label
return label_list
|
def filter_item(item_list, subword_mask, input_mask):
clean_item_list = []
for (item, not_subword, not_mask) in zip(item_list, subword_mask, input_mask):
if (not_subword == 0):
continue
if (not_mask == 0):
break
clean_item_list.append(item)
return clean_item_list
|
def get_label_token_from_topk(pred_ids_topk, tokenizer, label_map, seq_len=None):
if (seq_len == None):
seq_len = pred_ids_topk.shape[0]
label_list = label_map.values()
pred_token = []
for i in range(seq_len):
top_k_token = tokenizer.convert_ids_to_tokens(pred_ids_topk[i][:])
for token in top_k_token:
if (token in label_list):
pred_token.append(token)
break
if (len(pred_token) == i):
pred_token.append(top_k_token[0])
assert (len(pred_token) == seq_len)
return pred_token
|
def get_label_from_ids(ori_label_ids, subword, input_mask, label_map):
ori_label_map = {key: (idx + 1) for (idx, key) in enumerate(label_map.keys())}
ids_label_map = {value: key for (key, value) in ori_label_map.items()}
ids_label_map[0] = 'O'
batch_size = ori_label_ids.shape[0]
label_list = []
for i in range(batch_size):
batch_label = []
for j in range(len(ori_label_ids[i])):
if (subword[i][j] == 0):
continue
if (input_mask[i][j] == 0):
break
batch_label.append(ids_label_map[ori_label_ids[i][j].item()])
label_list.append(batch_label)
return label_list
|
def get_label_from_logits(logits, label_ids, input_ids, subword, input_mask, tokenizer, label_map, k=1, mode='IO', print_topk=0):
pred_ids_topk = torch.topk(logits, k=k, dim=2).indices
if (print_topk > 0):
(pred_value_top5, pred_ids_top5) = torch.topk(logits, k=print_topk, dim=2)
pred_labels = []
pred_tokens = []
gold_tokens = []
pred_tokens_top5 = []
batch_size = label_ids.shape[0]
for i in range(batch_size):
gold_token = tokenizer.convert_ids_to_tokens(input_ids[i])
pred_token = get_label_token_from_topk(pred_ids_topk[i], tokenizer, label_map)
gold_token = filter_item(gold_token, subword_mask=subword[i], input_mask=input_mask[i])
pred_token = filter_item(pred_token, subword_mask=subword[i], input_mask=input_mask[i])
if (print_topk > 0):
pred_tokens_top5_ = [(tokenizer.convert_ids_to_tokens(word_ids), values) for (word_ids, values) in zip(pred_ids_top5[i], pred_value_top5[i])]
pred_tokens_top5.append(filter_item(pred_tokens_top5_, subword_mask=subword[i], input_mask=input_mask[i]))
reverse_label_map = {value: key[2:] for (key, value) in label_map.items()}
pred_label = get_label_from_label_token(pred_token, reverse_label_map, mode)
assert (len(gold_token) == len(pred_token))
assert (len(gold_token) == len(pred_label))
gold_tokens.append(gold_token)
pred_tokens.append(pred_token)
pred_labels.append(pred_label)
if (print_topk > 0):
return (pred_labels, pred_tokens, gold_tokens, pred_tokens_top5)
else:
return (pred_labels, pred_tokens, gold_tokens)
|
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task (NER) with accelerate library')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--text_column_name', type=str, default=None, help='The column name of text to input in the file (a csv or JSON file).')
parser.add_argument('--label_column_name', type=str, default=None, help='The column name of label to input in the file (a csv or JSON file).')
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lenght` is passed.')
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--per_device_train_batch_size', type=int, default=4, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--label_all_tokens', action='store_true', help='Setting labels of all special tokens to -100 and thus PyTorch will ignore them.')
parser.add_argument('--return_entity_level_metrics', action='store_true', help='Indication whether entity level metrics are to be returner.')
parser.add_argument('--task_name', type=str, default='ner', choices=['ner', 'pos', 'chunk'], help='The name of the task.')
parser.add_argument('--debug', action='store_true', help='Activate debug mode and run training only with a subset of data.')
parser.add_argument('--label_schema', type=str, default='BIO')
parser.add_argument('--label_list', type=str, default=None, help='Path of label list.')
args = parser.parse_args()
if ((args.task_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a task name or a training/validation file.')
else:
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
return args
|
def main():
args = parse_args()
accelerator = Accelerator()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state)
logger.setLevel((logging.INFO if accelerator.is_local_main_process else logging.ERROR))
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if (args.dataset_name is not None):
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if args.debug:
for split in raw_datasets.keys():
raw_datasets[split] = raw_datasets[split].select(range(100))
if (raw_datasets['train'] is not None):
column_names = raw_datasets['train'].column_names
features = raw_datasets['train'].features
else:
column_names = raw_datasets['validation'].column_names
features = raw_datasets['validation'].features
if (args.text_column_name is not None):
text_column_name = args.text_column_name
elif ('tokens' in column_names):
text_column_name = 'tokens'
else:
text_column_name = column_names[0]
if (args.label_column_name is not None):
label_column_name = args.label_column_name
elif (f'{args.task_name}_tags' in column_names):
label_column_name = f'{args.task_name}_tags'
else:
label_column_name = column_names[1]
def get_label_list(labels):
with open(args.label_list, 'r') as f:
label_list = [l.strip() for l in f.readlines()]
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name, num_labels=num_labels)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
tokenizer_name_or_path = (args.tokenizer_name if args.tokenizer_name else args.model_name_or_path)
if (not tokenizer_name_or_path):
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if (config.model_type in {'gpt2', 'roberta'}):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, do_lower_case=False)
if args.model_name_or_path:
model = AutoModelForTokenClassification.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
else:
logger.info('Training new model from scratch')
model = AutoModelForTokenClassification.from_config(config)
model.resize_token_embeddings(len(tokenizer))
padding = ('max_length' if args.pad_to_max_length else False)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples[text_column_name], max_length=args.max_length, padding=padding, truncation=True, is_split_into_words=True)
labels = []
ori_labels = []
for (i, label) in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
ori_label_ids = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
ori_label_ids.append((- 100))
elif (word_idx != previous_word_idx):
ori_label_ids.append(label_to_id[label[word_idx]])
label_ids.append(label_to_id[(('I-' + label[word_idx][2:]) if (label[word_idx] != 'O') else label[word_idx])])
else:
ori_label_ids.append((label_to_id[label[word_idx]] if args.label_all_tokens else (- 100)))
label_ids.append((label_to_id[(('I-' + label[word_idx][2:]) if (label[word_idx] != 'O') else label[word_idx])] if args.label_all_tokens else (- 100)))
previous_word_idx = word_idx
labels.append(label_ids)
ori_labels.append(ori_label_ids)
tokenized_inputs['labels'] = labels
tokenized_inputs['ori_labels'] = ori_labels
return tokenized_inputs
processed_raw_datasets = raw_datasets.map(tokenize_and_align_labels, batched=True, remove_columns=raw_datasets['train'].column_names, desc='Running tokenizer on dataset')
train_dataset = processed_raw_datasets['train']
eval_dataset = processed_raw_datasets['validation']
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForLMTokanClassification(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
device = accelerator.device
model.to(device)
(model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
metric = load_metric('seqeval')
def switch_to_BIO(labels):
past_label = 'O'
labels_BIO = []
for label in labels:
if (label.startswith('I-') and ((past_label == 'O') or (past_label[2:] != label[2:]))):
labels_BIO.append(('B-' + label[2:]))
else:
labels_BIO.append(label)
past_label = label
return labels_BIO
def get_labels(predictions, references, tokens):
if (device.type == 'cpu'):
y_pred = predictions.detach().clone().numpy()
y_true = references.detach().clone().numpy()
x_tokens = tokens.detach().clone().numpy()
else:
y_pred = predictions.detach().cpu().clone().numpy()
y_true = references.detach().cpu().clone().numpy()
x_tokens = tokens.detach().cpu().clone().tolist()
true_predictions = [[label_list[p] for (p, l) in zip(pred, gold_label) if (l != (- 100))] for (pred, gold_label) in zip(y_pred, y_true)]
if (args.label_schema == 'IO'):
true_predictions = list(map(switch_to_BIO, true_predictions))
true_labels = [[label_list[l] for (p, l) in zip(pred, gold_label) if (l != (- 100))] for (pred, gold_label) in zip(y_pred, y_true)]
ori_tokens = [[tokenizer.convert_ids_to_tokens(t) for (p, l, t) in zip(pred, gold_label, token) if (l != (- 100))] for (pred, gold_label, token) in zip(y_pred, y_true, x_tokens)]
return (true_predictions, true_labels, ori_tokens)
def compute_metrics():
results = metric.compute()
if args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
best_metric = (- 1)
for epoch in range(args.num_train_epochs):
model.train()
for (step, batch) in enumerate(train_dataloader):
ori_labels = batch.pop('ori_labels')
outputs = model(**batch)
loss = outputs.loss
loss = (loss / args.gradient_accumulation_steps)
accelerator.backward(loss)
if (((step % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (completed_steps >= args.max_train_steps):
break
model.eval()
start = time.time()
token_list = []
y_true = []
y_pred = []
for (step, batch) in enumerate(eval_dataloader):
labels = batch.pop('ori_labels')
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=(- 1))
token_labels = batch.pop('input_ids')
if (not args.pad_to_max_length):
predictions = accelerator.pad_across_processes(predictions, dim=1, pad_index=(- 100))
labels = accelerator.pad_across_processes(labels, dim=1, pad_index=(- 100))
token_labels = accelerator.pad_across_processes(token_labels, dim=1, pad_index=(- 100))
predictions_gathered = accelerator.gather(predictions)
labels_gathered = accelerator.gather(labels)
token_labels_gathered = accelerator.gather(token_labels)
(preds, refs, tokens) = get_labels(predictions_gathered, labels_gathered, token_labels_gathered)
token_list.extend(tokens)
y_true.extend(refs)
y_pred.extend(preds)
metric.add_batch(predictions=preds, references=refs)
eval_metric = compute_metrics()
print('Decoding time: {}'.format((time.time() - start)))
for key in eval_metric.keys():
if (('f1' in key) and ('overall' not in key)):
label = key[:(- 3)]
print('{}: {}, {}: {}, {}: {}, {}: {}'.format((label + '_precision'), eval_metric[(label + '_precision')], (label + '_recall'), eval_metric[(label + '_recall')], (label + '_f1'), eval_metric[(label + '_f1')], (label + '_number'), eval_metric[(label + '_number')]))
label = 'overall'
print('{}: {}, {}: {}, {}: {}, {}: {}'.format((label + '_precision'), eval_metric[(label + '_precision')], (label + '_recall'), eval_metric[(label + '_recall')], (label + '_f1'), eval_metric[(label + '_f1')], (label + '_accuracy'), eval_metric[(label + '_accuracy')]))
if ((best_metric == (- 1)) or (best_metric['overall_f1'] < eval_metric['overall_f1'])):
best_metric = eval_metric
with open(os.path.join(args.output_dir, 'predictions.txt'), 'w') as f:
for i in range(len(y_true)):
for j in range(len(y_true[i])):
f.write(f'''{token_list[i][j]} {y_true[i][j]} {y_pred[i][j]}
''')
f.write('\n')
print('Finish training, best metric: ')
print(best_metric)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
tokenizer.save_pretrained(args.output_dir)
|
class DataCollatorForLMTokanClassification(DataCollatorForTokenClassification):
def __call__(self, features):
label_name = ('label' if ('label' in features[0].keys()) else 'labels')
labels = ([feature[label_name] for feature in features] if (label_name in features[0].keys()) else None)
ori_labels = ([feature['ori_labels'] for feature in features] if ('ori_labels' in features[0].keys()) else None)
batch = self.tokenizer.pad(features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=('pt' if (labels is None) else None))
if (labels is None):
return batch
sequence_length = torch.tensor(batch['input_ids']).shape[1]
padding_side = self.tokenizer.padding_side
if (padding_side == 'right'):
batch['labels'] = [(label + ([self.label_pad_token_id] * (sequence_length - len(label)))) for label in labels]
batch['ori_labels'] = [(label + ([self.label_pad_token_id] * (sequence_length - len(label)))) for label in ori_labels]
else:
batch['labels'] = [(([self.label_pad_token_id] * (sequence_length - len(label))) + label) for label in labels]
batch['ori_labels'] = [(([self.label_pad_token_id] * (sequence_length - len(label))) + label) for label in ori_labels]
batch = {k: torch.tensor(v, dtype=torch.int64) for (k, v) in batch.items()}
return batch
|
def sample_data(data_path, output_path, k=10):
with open(data_path, 'r') as f:
few_shot_data = []
label_cnt_dict = {}
data = f.readlines()
random.shuffle(data)
for row in data:
item = eval(row)
label = item['label']
if (len(label) <= 10):
continue
is_add = True
temp_cnt_dict = {}
for l in label:
if l.startswith('B-'):
if (l not in temp_cnt_dict):
temp_cnt_dict[l] = 0
temp_cnt_dict[l] += 1
if (l not in label_cnt_dict):
label_cnt_dict[l] = 0
if ((label_cnt_dict[l] + temp_cnt_dict[l]) > k):
is_add = False
if (len(temp_cnt_dict) == 0):
is_add = False
if is_add:
few_shot_data.append(item)
for key in temp_cnt_dict.keys():
label_cnt_dict[key] += temp_cnt_dict[key]
with open(output_path, 'w') as wf:
for row in few_shot_data:
json.dump(row, wf)
wf.write('\n')
return label_cnt_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.