repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
benchmarking_graph | benchmarking_graph-main/src/models.py | from functools import partial
import jax
import jax.numpy as jnp
from jax import lax, random, vmap
from .io import loadfile, savefile
def initialize_mlp(sizes, key, affine=[False], scale=1.0):
""" Initialize the weights of all layers of a linear layer network """
keys = random.split(key, len(sizes))
# Initialize a single layer with Gaussian weights - helper function
if len(affine) != len(sizes):
affine = [affine[0]]*len(sizes)
affine[-1] = True
def initialize_layer(m, n, key, affine=True, scale=1e-2):
w_key, b_key = random.split(key)
if affine:
return scale * random.normal(w_key, (n, m)), 0 * random.normal(b_key, (n,))
else:
return scale * random.normal(w_key, (n, m)), scale * random.normal(b_key, (n,))
return [initialize_layer(m, n, k, affine=a, scale=scale) for m, n, k, a in zip(sizes[:-1], sizes[1:], keys, affine)]
def SquarePlus(x):
return lax.mul(0.5, lax.add(x, lax.sqrt(lax.add(lax.square(x), 4.))))
def ReLU(x):
""" Rectified Linear Unit (ReLU) activation function """
return jnp.maximum(0, x)
def layer(params, x):
""" Simple ReLu layer for single sample """
return jnp.dot(params[0], x) + params[1]
def forward_pass(params, x, activation_fn=SquarePlus):
""" Compute the forward pass for each example individually """
h = x
# Loop over the ReLU hidden layers
for p in params[:-1]:
h = activation_fn(layer(p, h))
# Perform final traformation
p = params[-1]
h = layer(p, h)
return h
# Make a batched version of the `predict` function
def batch_forward(params, x, activation_fn=SquarePlus):
return vmap(partial(forward_pass, activation_fn=activation_fn), in_axes=(None, 0), out_axes=0)(params, x)
def MSE(y_act, y_pred):
return jnp.mean(jnp.square(y_pred - y_act))
def MME(y_act, y_pred):
return jnp.mean(jnp.absolute(y_pred - y_act))
def batch_MSE(ys_act, ys_pred):
return vmap(MSE, in_axes=(0, 0), out_axes=0)(ys_act, ys_pred)
def loadmodel(filename):
model, metadata = loadfile(filename)
if "multimodel" in metadata:
params = {k: _makedictmodel(v) for k, v in model.items()}
else:
params = _makedictmodel(model)
return params, metadata
def _makedictmodel(model):
params = []
for ind in range(len(model)):
layer = model[f'layer_{ind}']
w, b = layer["w"], layer["b"]
params += [(w, b)]
return params
def savemodel(filename, params, metadata={}):
if type(params) is type({}):
m = {k: _makemodeldict(v) for k, v in params.items()}
metadata = {**metadata, "multimodel": True}
else:
m = _makemodeldict(params)
savefile(filename, m, metadata=metadata)
def _makemodeldict(params):
m = {}
for ind, layer in enumerate(params):
w, b = layer
w_, b_ = jnp.array(w), jnp.array(b)
m[f'layer_{ind}'] = {'w': w_, 'b': b_}
return m
def _pprint_model(params, indent=""):
for ind, layer in enumerate(params):
w, b = layer
print(
f"{indent}#Layer {ind}: W ({w.shape}), b({b.shape}), {w.shape[1]} --> {w.shape[0]}")
def pprint_model(params, Iindent=""):
if type(params) != type({}):
_pprint_model(params, indent=Iindent)
else:
for key, value in params.items():
print(Iindent + ">" + key)
indent = Iindent + "-"
pprint_model(value, Iindent=indent)
| 3,474 | 27.483607 | 120 | py |
benchmarking_graph | benchmarking_graph-main/src/io.py | """
"""
import pickle
import jax.numpy as jnp
def loadfile(filename, verbose=False):
if verbose:
print(f"Loading {filename}")
return pickle.load(open(filename, "rb"))
def savefile(filename, data, metadata={}, verbose=False):
if verbose:
print(f"Saving {filename}")
pickle.dump((data, metadata), open(filename, "wb+"))
def save_ovito(filename, traj, species=None, lattice=None, length=None):
"""Save trajectory as ovito xyz file.
Args:
filename (string): File path.
traj (list of states): Trajectory.
"""
print(f"Saving ovito file: {filename}")
with open(filename, "w+") as ofile:
for state in traj:
N, dim = state.position.shape
if species is None:
species = jnp.array([1]*N).reshape(-1, 1)
else:
species = jnp.array(species).reshape(-1, 1)
hinting = f"Properties=id:I:1:species:R:1:pos:R:{dim}:vel:R:{dim}:force:R:{dim}"
tmp = jnp.eye(dim).flatten()
if length is not None:
lattice = " ".join(
[("{length}" if i != 0 else "0") for i in tmp])
Lattice = f'Lattice="{lattice}"'
if lattice is not None:
Lattice = f'Lattice="{lattice}"'
str_ = f"{N}" + f"\n{Lattice} {hinting}\n"
ofile.write(str_)
data = jnp.concatenate(
[species, state.position, state.velocity, state.force], axis=1)
for j in range(N):
line = "\t".join([str(item) for item in data[j, :]])
str_ = f"{j+1}\t" + line + "\n"
ofile.write(str_)
| 1,685 | 31.423077 | 92 | py |
benchmarking_graph | benchmarking_graph-main/src/lnn1.py | from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, vmap
from numpy.core.fromnumeric import reshape
from .models import ReLU, SquarePlus, forward_pass
def MAP(input_fn):
"""Map vmap for first input.
:param input_fn: function to map
:type input_fn: function
"""
def temp_g(x, *args, **kwargs):
def temp_f(x):
return input_fn(x, *args, **kwargs)
return vmap(temp_f, in_axes=0)(x)
return temp_g
def nonan(input_fn):
"""Apply nonan macro.
:param input_fn: input function
:type input_fn: function
"""
def out_fn(*args, **kwargs):
return jnp.nan_to_num(input_fn(*args, **kwargs))
out_fn.__doc__ = input_fn.__doc__
return out_fn
def describe_params(params_):
"""Print parameters.
:param params_: Parameters
:type params_: dict or list
:return: description of parameters.
:rtype: string
"""
if isinstance(params_, dict):
str_ = ""
for k, params in params_.items():
str_ = str_ + f"{k}\n" + \
"\n".join([f"\tLayer {ind}\n\tW: {p[0].shape}, b: {p[1].shape}"
for ind, p in enumerate(params)])
return str_
else:
return "\n".join([f"Layer {ind}\n\tW: {p[0].shape}, b: {p[1].shape}"
for ind, p in enumerate(params_)])
def FFLNN(x, v, params):
x_ = x.reshape(-1,)
return _T(v) - forward_pass(params, x_)[0]
def LNN(x, v, params):
"""
x: Vector
v: Vector
"""
x_ = x.reshape(-1, )
v_ = v.reshape(-1, )
return forward_pass(params, jnp.vstack([x_, v_]))[0]
def _V(x, params):
pass
def _T(v, mass=jnp.array([1.0])):
if len(mass) != len(v):
mass = mass[0]*jnp.ones((len(v)))
out = mass*jnp.square(v).sum(axis=1)
return 0.5*out.sum()
def _L(x, v, params):
pass
def lagrangian(x, v, params):
"""
lagrangian calls lnn._L
x: Vector
v: Vector
"""
return _L(x, v, params)
def calM(x, v, params):
return jax.hessian(lagrangian, 1)(x, v, params)
jcalM = jit(calM)
def calMinv(x, v, params):
return jnp.linalg.pinv(calM(x, v, params))
jcalMinv = jit(calMinv)
def acceleration(x, v, params):
Dim = x.shape[1]
N = x.shape[0]*Dim
M_1 = jcalMinv(x, v, params).reshape(N, N)
dx_L = jax.grad(lagrangian, 0)(x, v, params).reshape(N, 1)
dxdv_L = jax.jacobian(jax.jacobian(lagrangian, 1),
0)(x, v, params).reshape(N, N)
out = M_1 @ (dx_L - dxdv_L @ v.reshape(N, 1))
return out.reshape(-1, Dim)
def accelerationTV(x, v, params):
Dim = x.shape[1]
N = x.shape[0]
M_1 = jnp.linalg.pinv(jax.hessian(_T, 0)(v).reshape(N*Dim, N*Dim))
dx_L = jax.grad(lagrangian, 0)(x, v, params).reshape(-1, 1)
out = M_1 @ (dx_L)
return out.reshape(-1, Dim)
def accelerationFull(n, Dim, lagrangian=lagrangian, non_conservative_forces=None, external_force=None, constraints=None):
""" ̈q = M⁻¹(-C ̇q + Π + Υ - Aᵀ(AM⁻¹Aᵀ)⁻¹ ( AM⁻¹ (-C ̇q + Π + Υ + F ) + Adot ̇qdot ) + F )
:param T: [description], defaults to _T
:type T: [type], optional
:param lagrangian: [description], defaults to lagrangian
:type lagrangian: [type], optional
"""
def inv(x, *args, **kwargs):
return jnp.linalg.pinv(x, *args, **kwargs)
if non_conservative_forces == None:
def non_conservative_forces(x, v, params): return 0
if external_force == None:
def external_force(x, v, params): return 0
if constraints == None:
def constraints(x, v, params): return jnp.zeros((1, n*Dim))
eye = jnp.eye(n*Dim)
def dL_dv(R, V, params):
return jax.grad(lagrangian, 1)(R.reshape(n, Dim),
V.reshape(n, Dim), params).flatten()
def d2L_dv2(R, V, params):
return jax.jacobian(dL_dv, 1)(R, V, params)
# return eye*jnp.diag(jax.jacobian(dL_dv, 1)(R, V, params))
def fn(x, v, params):
N = n*Dim
# M⁻¹ = (∂²L/∂²v)⁻¹
M = d2L_dv2(x.flatten(), v.flatten(), params)
M_1 = inv(M)
# Π = ∂L/∂x
Π = jax.grad(lagrangian, 0)(x, v, params).reshape(
N, 1)
# C = ∂²L/∂v∂x
C = jax.jacobian(jax.jacobian(lagrangian, 1),
0)(x, v, params).reshape(N, N)
Υ = non_conservative_forces(x, v, params)
F = external_force(x, v, params)
A = constraints(x.reshape(-1), v.reshape(-1), params)
Aᵀ = A.T
AM_1 = A @ M_1
v = v.reshape(N, 1)
Ax = jax.jacobian(constraints, 0)(x.reshape(-1), v.reshape(-1), None)
Adot = Ax @ v.reshape(-1)
xx = (AM_1 @ (-C @ v + Π + Υ + F) + Adot @ v)
tmp = Aᵀ @ inv(AM_1 @ Aᵀ) @ xx
out = M_1 @ (-C @ v + Π + Υ - tmp + F)
return out.reshape(-1, Dim)
return fn
def acceleration_GNODE(n, Dim, F_q_qdot, non_conservative_forces=None, constraints=None):
""" ̈q = M⁻¹(F(q,qdot) - Aᵀ(AM⁻¹Aᵀ)⁻¹ (AM⁻¹ F(q,qdot) + Adot ̇qdot))
:param T: [description], defaults to _T
:type T: [type], optional
:param lagrangian: [description], defaults to lagrangian
:type lagrangian: [type], optional
"""
def inv(x, *args, **kwargs):
return jnp.linalg.pinv(x, *args, **kwargs)
if constraints == None:
def constraints(x, v, params): return jnp.zeros((1, n*Dim))
eye = jnp.eye(n*Dim)
def fn(x, v, params):
N = n*Dim
# M⁻¹ = (∂²L/∂²v)⁻¹
# masses = jnp.diag(jnp.ones(N))
# M = masses
# M_1 = inv(M)
_F_q_qdot,_mass=jnp.hsplit(F_q_qdot(x, v, params), [Dim])
_F_q_qdot = _F_q_qdot.flatten()
_mass = _mass.flatten()
masses = jnp.diag(jnp.append(_mass,_mass))
M = masses
M_1 = inv(M)
A = constraints(x.reshape(-1), v.reshape(-1), params)
Aᵀ = A.T
AM_1 = A @ M_1
v = v.reshape(N, 1)
Ax = jax.jacobian(constraints, 0)(x.reshape(-1), v.reshape(-1), None)
Adot = Ax @ v.reshape(-1)
xx = (AM_1 @ _F_q_qdot + Adot @ v.reshape(-1))
tmp = Aᵀ @ inv(AM_1 @ Aᵀ) @ xx
out = M_1 @ (_F_q_qdot - tmp)
return out.reshape(-1, Dim)
return fn
def accelerationModified(x, v, params):
Dim = x.shape[1]
N = x.shape[0]
M_1 = forward_pass(params["M_1"], v.reshape(-1, ))
M_1 = M_1.reshape(N*Dim, N*Dim)
dx_L = jax.grad(lagrangian, 0)(x, v, params["PEF"]).reshape(-1, )
dxdv_L = jax.jacobian(jax.jacobian(lagrangian, 1), 0)(
x, v, params["PEF"]).reshape(N*Dim, N*Dim)
F = (dx_L - dxdv_L @ v.reshape(-1, ))
out = M_1 @ F
return out.reshape(-1, Dim)
def force(x, v, params):
dx_L = jax.grad(lagrangian, 0)(x, v, params)
dxdv_L = jax.jacobian(jax.jacobian(lagrangian, 1), 0)(x, v, params)
out = dx_L - dxdv_L @ v
return out
def prediction_fn(X, params):
x, v = jnp.split(X, 2)
return acceleration(x, v, params)
# Make a batched version of the `predict` function
batch_prediction = vmap(prediction_fn, in_axes=(None, 0), out_axes=0)
# PEFs
# =============================================
def useNN(norm=True):
"""Create NNP function.
:param norm: if take norm of input, defaults to True
:type norm: bool, optional
:return: NNP function
:rtype: function
"""
if norm:
def f(x, params=None, cutoff=None):
x_ = jnp.linalg.norm(x, keepdims=True)
return jnp.where(x_ < cutoff, forward_pass(params, x_, activation_fn=SquarePlus), forward_pass(params, cutoff, activation_fn=SquarePlus))
return f
else:
def f(x, params=None, cutoff=None):
if cutoff is None:
return forward_pass(params, x, activation_fn=SquarePlus)
else:
return jnp.where(x[-1] < cutoff, forward_pass(params, x, activation_fn=SquarePlus),
forward_pass(params, jax.ops.index_update(x, -1, cutoff), activation_fn=SquarePlus))
return f
def NNP(*args, **kwargs):
"""FFNN potential with cutoff.
:param x: Inter-particle distance
:type x: float
:param params: NN parameters
:type params: NN parameters
:param cutoff: potential cutoff, defaults to None
:type cutoff: float, optional
:return: energy
:rtype: float
"""
return useNN()(*args, **kwargs)
def SPRING(x, stiffness=1.0, length=1.0):
"""Linear spring, v=0.5kd^2.
:param x: Inter-particle distance
:type x: float
:param stiffness: Spring stiffness constant, defaults to 1.0
:type stiffness: float, optional
:param length: Equillibrium length, defaults to 1.0
:type length: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return 0.5*stiffness*(x_ - length)**2
def SPRING4(x, stiffness=1.0, length=1.0):
"""Non-linear spring, v=0.5kd^4.
:param x: Inter-particle distance
:type x: float
:param stiffness: Spring stiffness constant, defaults to 1.0
:type stiffness: float, optional
:param length: Equillibrium length, defaults to 1.0
:type length: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return 0.5*stiffness*(x_ - length)**4
@ nonan
def GRAVITATIONAL(x, Gc=1.0):
"""Gravitational energy, Gc/r.
:param x: Inter-particle distance.
:type x: float
:param Gc: Gravitational constant, defaults to 1.0
:type Gc: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return -Gc/x_
@ nonan
def VANDERWALLS(x, C=4.0):
"""Van Der Walls energy, C/r^12.
:param x: Interatomic distance.
:type x: float
:param C: C, defaults to 4.0
:type C: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return C/x_**12
@ nonan
def x_6(x):
"""x^6
:param x: value
:type x: float
:return: value
:rtype: float
"""
return 1.0/x**6
@ nonan
def x_3(x):
"""x^3
:param x: value
:type x: float
:return: value
:rtype: float
"""
return 1.0/x**3
def LJ(x, sigma=1.0, epsilon=1.0):
"""Lennard-Jones (12-6) interatomic potential function.
:param x: Interatomic distance
:type x: float
:param sigma: sigma, defaults to 1.0
:type sigma: float, optional
:param epsilon: epsilon, defaults to 1.0
:type epsilon: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.sum(jnp.square(x), keepdims=True)
r = x_3(x_)*sigma**6
return 4.0*epsilon*(r**2 - r)
# =============================================
def t1(displacement=lambda a, b: a-b):
"""Create transformation function using displacement function.
:param displacement: Dispalcement function to calculate euclidian displacemnt, defaults to lambda a, b: a - b
:type displacement: Function, optional
"""
def f(R):
Dim = R.shape[1]
# dd = displacement(R.reshape(-1, 1, Dim), R.reshape(1, -1, Dim))
dd = vmap(vmap(displacement, in_axes=(0, None)),
in_axes=(None, 0))(R, R)
indexs = jax.numpy.tril_indices(R.shape[0], k=-1)
# R1, R2 = R[:20], R
# dd = vmap(vmap(displacement, in_axes=(0, None)),
# in_axes=(None, 0))(R1, R2)
# indexs = jax.numpy.triu_indices(R1.shape[0], 1, R2.shape[0])
out = vmap(lambda i, j, dd: dd[i, j], in_axes=(
0, 0, None))(indexs[0], indexs[1], dd)
return out
return f
def t2(q):
"""Apply transformation q -> q - q.mean(axis=0).
:param q: Input array
:type q: Array
:return: Modified array
:rtype: Array
"""
q -= q.mean(axis=0, keepdims=True)
return q
def t3(q):
"""No transformation.
:param q: Input array.
:type q: Array
:return: Same as input.
:rtype: Array
"""
return q
# ================================
def cal_energy_parameters(params, states):
kineticenergy = jnp.array([_T(state.velocity) for state in states])
totallagrangian = jnp.array([lagrangian(state.position.reshape(-1,), state.velocity.reshape(-1,), params)
for state in states])
hamiltonian = 2*kineticenergy - totallagrangian
return totallagrangian, hamiltonian, kineticenergy
def linear_mom_fn(states):
return jnp.array([jnp.sqrt(jnp.square(state.velocity.sum(axis=0)).sum()) for state in states])
def angular_mom_fn(states):
return jnp.array([jnp.sqrt(jnp.square(jnp.cross(state.position, state.velocity).sum(axis=0)).sum()) for state in states])
| 12,736 | 26.68913 | 149 | py |
benchmarking_graph | benchmarking_graph-main/src/fgn.py | from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, lax, random
from jax_md.nn import GraphNetEncoder
from jraph import GraphMapFeatures, GraphNetwork, GraphsTuple
from src.models import SquarePlus, forward_pass, initialize_mlp
class GraphEncodeNet():
def __init__(self, N, embedding_fn, model_fn, final_fn):
self.N = N
self._encoder = GraphMapFeatures(
embedding_fn('EdgeEncoder'),
embedding_fn('NodeEncoder'),
embedding_fn('GlobalEncoder'))
self._propagation_network = GraphNetwork(
model_fn('EdgeFunction'),
model_fn('NodeFunction'),
model_fn('GlobalFunction'), aggregate_edges_for_globals_fn=lambda *x: jnp.array([0.0]))
self._final = GraphNetwork(
final_fn('EdgeFunction'),
final_fn('NodeFunction'),
final_fn('GlobalFunction'), aggregate_edges_for_globals_fn=lambda *x: jnp.array([0.0]))
def __call__(self, graph):
output = self._encoder(graph)
for _ in range(self.N):
output = self._propagation_network(output)
output = self._final(output)
return output
def cal(params, graph, mpass=1):
ee_params = params["ee_params"]
ne_params = params["ne_params"]
e_params = params["e_params"]
n_params = params["n_params"]
g_params = params["g_params"]
#mass_params = params["mass_params"]
def node_em(nodes):
out = jnp.hstack([v for k, v in nodes.items()])
def fn(out):
return forward_pass(ne_params, out, activation_fn=SquarePlus)
out = jax.vmap(fn)(out)
return {"embed": out}
def edge_em(edges):
out = edges["dij"]
out = jax.vmap(lambda p, x: forward_pass(p, x.reshape(-1)),
in_axes=(None, 0))(ee_params, out)
return {"embed": out}
embedding = {
"EdgeEncoder": edge_em,
"NodeEncoder": node_em,
"GlobalEncoder": None,
}
def embedding_fn(arg): return embedding[arg]
def edge_fn(edges, sent_attributes, received_attributes, global_):
out = jnp.hstack([edges["embed"], sent_attributes["embed"],
received_attributes["embed"]])
out = jax.vmap(forward_pass, in_axes=(None, 0))(e_params, out)
return {"embed": out}
def node_fn(nodes, sent_attributes, received_attributes, global_):
out = jnp.hstack([nodes["embed"], sent_attributes["embed"],
received_attributes["embed"]])
out = jax.vmap(forward_pass, in_axes=(None, 0))(n_params, out)
return {"embed": out}
model = {
"EdgeFunction": edge_fn,
"NodeFunction": node_fn,
"GlobalFunction": None,
}
def model_fn(arg): return model[arg]
final = {
"EdgeFunction": lambda *x: x[0],
"NodeFunction": lambda *x: x[0],
"GlobalFunction": lambda node_attributes, edge_attribtutes, globals_:
forward_pass(g_params, node_attributes["embed"].reshape(-1)),
# "GlobalFunction": lambda node_attributes, edge_attribtutes, globals_:
# node_attributes["embed"].sum()
}
def final_fn(arg): return final[arg]
net = GraphEncodeNet(mpass, embedding_fn, model_fn, final_fn)
graph = net(graph)
return graph
def cal_energy(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
return graph.globals.sum()
def cal_acceleration(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
acc_params = params["acc_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
acc_params, graph.nodes["embed"])
return out
def cal_cacceleration(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
acc_params = params["acc_params"]
mass_params = params["mass_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
acc_params, graph.nodes["embed"])
mass = jax.vmap(forward_pass, in_axes=(None, 0))(
mass_params, graph.nodes["embed"])
# mass = [[1],
# [1],
# [1]]
return jnp.hstack([out,mass])
def cal_delta(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
delta_params = params["delta_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
delta_params, graph.nodes["embed"])
return out
def cal_deltap(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
delta_params = params["deltap_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
delta_params, graph.nodes["embed"])
return out
def cal_deltav(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
delta_params = params["deltav_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
delta_params, graph.nodes["embed"])
return out
def cal_lgn(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
delta_params = params["lgn_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
delta_params, graph.nodes["embed"])
return out.sum()
| 5,146 | 31.371069 | 99 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-HGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
Zs_dot = jnp.concatenate([Vs,Fs], axis=1)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
dt=1.0e-3
# useN=None
withdata=None
datapoints=None
# mpass=1
# grid=False
stride=100
ifdrag=0
seed=42
rname=0
saveovito=1
trainm=1
runs=100
semilog=1
maxtraj=10
plotthings=True
redo=0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
PSYS = f"peridynamics"
TAG = f"HGN"
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[0]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
origin_acceleration = []
origin_mass = []
origin_position = []
origin_velocity = []
import pandas as pd
for num in range(1000):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-MCGNODE/test/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
origin_acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
origin_mass += [np.array(split_df[[5]]).astype('float64')]
origin_position += [np.array(split_df[[6,7,8]]).astype('float64')]
origin_velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
origin_Rs = jnp.array(origin_position)
origin_Vs = jnp.array(origin_velocity)
origin_Fs = jnp.array(origin_acceleration)
origin_mass = jnp.array(origin_mass)
origin_Zs_dot = jnp.concatenate([origin_Vs,origin_Fs], axis=1)
################################################
################### ML Model ###################
################################################
def H_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=None,
useT=True)
return T + V
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
graph = jraph.GraphsTuple(**my_graph0_disc)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["H"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
#params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
# acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# constraints=None)
# def force_fn_model(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_model(R, V, params)
# else:
# return acceleration_fn_model(R, V, params)
# return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"perignode_trained_model_low.dil")[0]
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
# my_sim = sim_model(R, V)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(ya, yp):
return norm(ya-yp)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"AbsZerr":[],
"Perr": [],
"AbsPerr": []
}
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R, V = Rs[runs*ind], Vs[runs*ind]
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t += end - start
# ll = [state for state in NVEStates(pred_traj)]
# save_ovito(f"pred_{ind}.data",[state for state in NVEStates(pred_traj)], lattice="")
# if ind>20:
# break
sim_size = runs
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [origin_Rs[runs*ind:runs+runs*ind]]
nexp["Zerr"] += [RelErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)+1e-30]
# nexp["AbsZerr"] += [AbsErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
nexp["AbsZerr"] += [jnp.abs(norm(origin_Rs[runs*ind:runs+runs*ind]) - norm(pred_traj.position))]
ac_mom = jnp.square(origin_Vs[runs*ind:runs+runs*ind].sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
# nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind], pred_traj.velocity)])
nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind][6:], pred_traj.velocity[6:])+1e-30])
nexp["AbsPerr"] += ([jnp.abs(ac_mom - pr_mom)+1e-30])
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)[2:]
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)[2:]
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||\hat{p}-p||_2}{||\hat{p}||_2+||p||_2}$")
np.savetxt(f"../peridynamics-simulation-time/hgn.txt", [t/maxtraj], delimiter = "\n")
# make_plots(nexp, "AbsZerr", yl=r"${||\hat{z}-z||_2}$")
# make_plots(nexp, "Herr",
# yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
# make_plots(nexp, "AbsHerr", yl=r"${||H(\hat{z})-H(z)||_2}$")
| 12,889 | 27.836689 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CFGNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
#import matplotlib.pyplot as plt
from shadow.plot import *
# from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, ifDataEfficiency = 0, if_noisy_data = 1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, error_fn=error_fn, mpass=mpass,
dt=dt, ifdrag=ifdrag, trainm=trainm, stride=stride, lr=lr, datapoints=datapoints,
batch_size=batch_size, saveat=saveat, ifDataEfficiency = ifDataEfficiency, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"cfgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
mass_params = initialize_mlp([ne, 5, 1], key),
)
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_cacceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["L"])
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=constraints,
non_conservative_forces=None)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
params = {"L": Lparams}
print("here")
print(acceleration_fn_model(R, V, params))
print("dom")
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
#v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
# LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs}: train={larray[-1]}, test={ltarray[-1]}")
# print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % 1 == 0:
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1,1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
plt.clf()
fig, axs = panel(1,1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/cfgnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/cfgnode-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/cfgnode-test.txt", ltarray, delimiter = "\n")
Main()
| 16,284 | 30.560078 | 165 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-LGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
# from psystems.nsprings import (chain, edge_order, get_connections,
# get_fully_connected_senders_and_receivers,
# get_fully_edge_order, get_init)
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
plt.rcParams["font.family"] = "Arial"
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dt=1.0e-3, useN=4, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-body"
TAG = f"lgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else ("0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/0_test/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R = model_states.position[0]
V = model_states.velocity[0]
print(f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
# return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return kin_energy(graph.nodes["velocity"]) - V
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
# senders, receivers = [np.array(i)
# for i in get_fully_connected_senders_and_receivers(N)]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R = dataset_states[0].position[ind*69]
V = dataset_states[0].velocity[ind*69]
try:
actual_traj = sim_orig2(R, V)
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGNN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGNN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"LGNN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"LGNN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-nbody-zerr/lgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/lgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/lgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/lgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 19,807 | 32.802048 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-data.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.nbody import ( get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def main(N1=4, N2=1, dim=3, grid=False, saveat=100, runs=10000, nconfig=1, ifdrag=0, train = False):
if N2 is None:
N2 = N1
N = N1*N2
tag = f"{N}-body-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = "0" if train else "0_test"
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# init_confs = [chain(N)[:2]
# for i in range(nconfig)]
init_confs = get_init_conf(train)
senders, receivers = get_fully_connected_senders_and_receivers(N)
# if grid:
# senders, receivers = get_connections(N1, N2)
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
R, V = init_confs[0]
print("Saving init configs...")
savefile(f"initial-configs_{ifdrag}.pkl",
init_confs, metadata={"N1": N1, "N2": N2})
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
dt = 1.0e-3
stride = 100
lr = 0.001
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# print(R)
# print(senders)
# print(receivers)
# print("here")
# print(pot_energy_orig(R))
# sys.exit()
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
@jit
def forward_sim(R, V):
return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=runs)
@jit
def v_forward_sim(init_conf):
return vmap(lambda x: forward_sim(x[0], x[1]))(init_conf)
################################################
############### DATA GENERATION ################
################################################
print("Data generation ...")
ind = 0
dataset_states = []
for R, V in init_confs:
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
model_states = forward_sim(R, V)
dataset_states += [model_states]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
def cal_energy(states):
KE = vmap(kin_energy)(states.velocity)
PE = vmap(pot_energy_orig)(states.position)
L = vmap(Lactual, in_axes=(0, 0, None))(
states.position, states.velocity, None)
return jnp.array([PE, KE, L, KE+PE]).T
print("plotting energy...")
ind = 0
for states in dataset_states:
ind += 1
Es = cal_energy(states)
fig, axs = plt.subplots(1, 1, figsize=(20, 5))
plt.plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
plt.legend(bbox_to_anchor=(1, 1))
plt.ylabel("Energy")
plt.xlabel("Time step")
title = f"{N}-nbody random state {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "_")+".png"), dpi=300)
save_ovito(f"dataset_{ind}.data", [state for state in NVEStates(states)], lattice="")
if ind >= 10:
break
fire.Fire(main)
| 6,696 | 28.244541 | 100 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-HGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
print(Rs.shape)
print(Fs.shape)
Zs_dot = jnp.concatenate([Vs,Fs], axis=1)
print(Zs_dot.shape)
#sys.exit()
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
epochs=10000
seed=42
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"HGNN"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 8
Nei = 8
Nei_ = 5 ##Nei for mass
hidden = 8
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
Hparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
#params = {"Fqqdot": Fparams}
def H_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=None,
useT=True)
return T + V
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
graph = jraph.GraphsTuple(**my_graph0_disc)
def energy_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
print(zdot_model(R,V, params))
# sys.exit()
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Zs_dot)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst ,Zst_dot)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,231 | 26.798319 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-data-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init, get_init_spring)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
from pyexpat import model
from statistics import mode
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
#create a new state for storing data
class Datastate:
def __init__(self, model_states):
self.position = model_states.position[:-1]
self.velocity = model_states.velocity[:-1]
self.force = model_states.force[:-1]
self.mass = model_states.mass[:-1]
self.index = 0
self.change_position = model_states.position[1:]-model_states.position[:-1]
self.change_velocity = model_states.velocity[1:]-model_states.velocity[:-1]
def main(N1=5, N2=1, dim=2, grid=False, saveat=100, runs=101, nconfig=100, ifdrag=0):
if N2 is None:
N2 = N1
N = N1*N2
tag = f"{N}-Spring-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "1" #+ str(nconfig * (runs - 1))
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
init_confs = [chain(N)[:2]
for i in range(nconfig)]
_, _, senders, receivers = chain(N)
# if grid:
# senders, receivers = get_connections(N1, N2)
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
R, V = init_confs[0]
print("Saving init configs...")
savefile(f"initial-configs_{ifdrag}.pkl",
init_confs, metadata={"N1": N1, "N2": N2})
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
dt = 1.0e-3
stride = 100
lr = 0.001
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
@jit
def forward_sim(R, V):
return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=runs)
@jit
def v_forward_sim(init_conf):
return vmap(lambda x: forward_sim(x[0], x[1]))(init_conf)
################################################
############### DATA GENERATION ################
################################################
print("Data generation ...")
ind = 0
dataset_states = []
for R, V in init_confs:
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
model_states = forward_sim(R, V)
dataset_states += [Datastate(model_states)]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
def cal_energy(states):
KE = vmap(kin_energy)(states.velocity)
PE = vmap(pot_energy_orig)(states.position)
L = vmap(Lactual, in_axes=(0, 0, None))(
states.position, states.velocity, None)
return jnp.array([PE, KE, L, KE+PE]).T
print("plotting energy...")
ind = 0
for states in dataset_states:
ind += 1
Es = cal_energy(states)
fig, axs = plt.subplots(1, 1, figsize=(20, 5))
plt.plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
plt.legend(bbox_to_anchor=(1, 1))
plt.ylabel("Energy")
plt.xlabel("Time step")
title = f"{N}-Spring random state {ind}"
plt.title(title)
plt.savefig(
_filename(title.replace(" ", "_")+".png"), dpi=300)
save_ovito(f"dataset_{ind}.data", [
state for state in NVEStates(states)], lattice="")
if ind >= 10:
break
fire.Fire(main)
| 7,185 | 29.320675 | 105 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-FGNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dt=1.0e-3, useN=5, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if useN is None:
useN = N
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"fgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else ("0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
# senders, receivers = get_fully_connected_senders_and_receivers(N)
# eorder = get_fully_edge_order(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
senders = jnp.array(senders)
receivers = jnp.array(receivers)
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_acceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
if ind > maxtraj+skip:
break
_ind = ind*runs
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R = dataset_states[ind].position[0]
V = dataset_states[ind].velocity[0]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"FGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"FGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
print("skipped")
if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-spring-zerr/fgnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/fgnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/fgnode.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/fgnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 5)
# main(N = 50)
| 20,256 | 32.538079 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-GNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
N=5
epochs=10000
seed=42
rname=True
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
# mpass=1
lr=0.001
withdata=None
datapoints=None
batch_size=100
ifDataEfficiency = 0
if_noisy_data = 0
# def main(N=3, epochs=100, seed=42, rname=True, saveat=1,
# dt=1.0e-5, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"gnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, 1, N, dim)
Vs = Vs.reshape(-1, 1, N, dim)
Fs = Fs.reshape(-1, 1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
Ef = dim # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
Nei_ = 5 ##Nei for mass
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
fb_params = mlp(Ef, Eei, key) #
fv_params = mlp(Nei+Eei, Nei, key) #
fe_params = mlp(Nei, Eei, key) #
ff1_params = mlp(Eei, dim, key)
ff2_params = mlp(Nei, dim, key) #
ff3_params = mlp(Nei+dim+dim, dim, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
Fparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
params = {"Fqqdot": Fparams}
def graph_force_fn(params, graph):
_GForce = a_cdgnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
R, V = Rs[0][0], Vs[0][0]
def _force_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
# v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# x=R
# v=V
# print(F_q_qdot(x, v, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = F_q_qdot
# acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
# constraints=None,
# non_conservative_forces=None)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
# x=Rs[0]
# v=Vs[0]
# F_q_qdot(x[0], v[0], params)
# acceleration_fn_model(x[0], v[0], params)
# hhhh = v_v_acceleration_fn_model(Rs, Vs, params)
# print(hhhh)
# print(hhhh.shape)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count+=1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 100 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../5-spring-training-time/cgnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../5-spring-training-loss/cgnode-train.txt", larray, delimiter = "\n")
np.savetxt("../5-spring-training-loss/cgnode-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,629 | 26.816327 | 134 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-HGNN.py | ################################################
################## IMPORT ######################
################################################
from posixpath import split
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import ode
# from shadow.plot import *
# from shadow.plot import panel
import matplotlib.pyplot as plt
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N = 5, epochs = 10000, seed = 42, rname = False, saveat = 100, dt = 1.0e-3, stride = 100, ifdrag = 0, trainm = 1, grid = False, mpass = 1, lr = 0.001, withdata = None, datapoints = None, batch_size = 100, ifDataEfficiency = 0, if_noisy_data = 1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname, dt, lr, ifdrag, batch_size, namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"hgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "2" if (tag == "data") else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
################################################
################## SYSTEM ######################
################################################
# def phi(x):
# X = jnp.vstack([x[:1, :]*0, x])
# return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
# constraints = get_constraints(N, dim, phi)
################################################
################### ML Model ###################
################################################
if grid:
print("It's a grid?")
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("It's a random?")
# senders, receivers = get_fully_connected_senders_and_receivers(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Hparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
def H_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T + V
R, V = jnp.split(Zs[0], 2, axis=0)
species = jnp.zeros(N, dtype=int)
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
H_energy_fn(Hparams, state_graph)
def energy_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
l = l/len(bRs)
if epoch % 1 == 0:
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Zs_dot)
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if ifDataEfficiency ==0:
np.savetxt("../5-spring-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../5-spring-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../5-spring-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
#fire.Fire(main)
main()
| 12,742 | 26.823144 | 255 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-data-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
from pyexpat import model
from statistics import mode
import sys
import os
from datetime import datetime
from functools import partial, wraps
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
#from shadow.plot import *
#from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from psystems.npendulum import PEF, get_init, hconstraints
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import fire
#import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
class Datastate:
def __init__(self, model_states):
self.position = model_states.position[:-1]
self.velocity = model_states.velocity[:-1]
self.force = model_states.force[:-1]
self.mass = model_states.mass[:-1]
self.index = 0
self.change_position = model_states.position[1:]-model_states.position[:-1]
self.change_velocity = model_states.velocity[1:]-model_states.velocity[:-1]
def main(N=3, dim=2, saveat=100, nconfig=100, ifdrag=0, runs=101):
tag = f"{N}-Pendulum-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "1"
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
init_confs = [get_init(N, dim=dim) for i in range(nconfig)]
print("Saving init configs...")
savefile(f"initial-configs_{ifdrag}.pkl", init_confs)
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
dt = 1.0e-5
stride = 1000
lr = 0.001
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
@jit
def forward_sim(R, V):
return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=runs)
@jit
def v_forward_sim(init_conf):
return vmap(lambda x: forward_sim(x[0], x[1]))(init_conf)
################################################
############### DATA GENERATION ################
################################################
print("Data generation ...")
ind = 0
dataset_states = []
for R, V in init_confs:
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
model_states = forward_sim(R, V)
dataset_states += [Datastate(model_states)]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
def cal_energy(states):
KE = vmap(kin_energy)(states.velocity)
PE = vmap(pot_energy_orig)(states.position)
L = vmap(Lactual, in_axes=(0, 0, None))(states.position, states.velocity, None)
return jnp.array([PE, KE, L, KE+PE]).T
print("plotting energy...")
ind = 0
for states in dataset_states:
ind += 1
Es = cal_energy(states)
fig, axs = plt.subplots(1, 1, figsize=(20, 5))
plt.plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
plt.legend(bbox_to_anchor=(1, 1))
plt.ylabel("Energy")
plt.xlabel("Time step")
title = f"{N}-Pendulum random state {ind} {ifdrag}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "_")+".png"), dpi=300)
if ind >= 10:
break
fire.Fire(main)
| 6,168 | 28.516746 | 97 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-HGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
# from shadow.plot import *
# from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5,stride=1000, useN=3, ifdrag=0, seed=100, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_hidden_search = 0, hidden = 5, if_nhidden_search = 0, nhidden = 2, if_mpass_search = 0, mpass = 1, if_lr_search = 0, lr = 0.001, if_act_search = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, ifdrag, namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"hgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
elif (if_hidden_search == 1):
rstring = "2_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "2_" + str(nhidden)
elif (if_mpass_search == 1):
rstring = "2_" + str(mpass)
elif (if_lr_search == 1):
rstring = "2_" + str(lr)
elif (if_act_search == 1):
rstring = "2_" + str("softplus")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
V = V
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params):
return kin_energy(p) + pot_energy_orig(x)
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*p")
def drag(x, p, params):
# return -0.1 * (p*p).sum()
return (-0.1*p).reshape(-1,1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=None, constraints=constraints, external_force=None)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=maxtraj*runs)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
def H_energy_fn(params, graph):
g, g_PE, g_KE = cal_graph(params, graph, eorder=eorder,
useT=True)
return g_PE + g_KE
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=None, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
# z_model_out = sim_model(R, V)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
# Es_pred_fn(pred_traj)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t+= end - start
# def get_hinge(x):
# return jnp.append(x, jnp.zeros([1, 2]), axis=0)
# h_actual_traj = actual_traj
# h_actual_traj.position = jax.vmap(
# get_hinge, in_axes=0)(actual_traj.position)
# h_actual_traj.velocity = jax.vmap(
# get_hinge, in_axes=0)(actual_traj.velocity)
# h_actual_traj.force = jax.vmap(get_hinge, in_axes=0)(actual_traj.force)
if saveovito:
# if ind < 1:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
# else:
# pass
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
if ind < 1:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"(HGNN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(
_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = plt.subplots(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.xlabel("Time step")
plt.ylabel("Energy")
plt.ylabel("Energy")
title = f"HGNN {N}-Pendulum Exp {ind} Hmodel"
axs[1].set_title(title)
title = f"HGNN {N}-Pendulum Exp {ind} Hactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = plt.subplots(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = plt.subplots(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/hgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/hgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/hgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
main(N = 5)
| 18,563 | 32.814208 | 354 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-HGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
Zs_dot = jnp.concatenate([Vs,Fs], axis=1)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
dt=1.0e-3
# useN=None
withdata=None
datapoints=None
# mpass=1
# grid=False
stride=100
ifdrag=0
seed=42
rname=0
saveovito=1
trainm=1
runs=100
semilog=1
maxtraj=10
plotthings=True
redo=0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
PSYS = f"peridynamics"
TAG = f"HGNN"
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[0]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
origin_acceleration = []
origin_mass = []
origin_position = []
origin_velocity = []
import pandas as pd
for num in range(1000):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-MCGNODE/test/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
origin_acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
origin_mass += [np.array(split_df[[5]]).astype('float64')]
origin_position += [np.array(split_df[[6,7,8]]).astype('float64')]
origin_velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
origin_Rs = jnp.array(origin_position)
origin_Vs = jnp.array(origin_velocity)
origin_Fs = jnp.array(origin_acceleration)
origin_mass = jnp.array(origin_mass)
origin_Zs_dot = jnp.concatenate([origin_Vs,origin_Fs], axis=1)
################################################
################### ML Model ###################
################################################
def H_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=None,
useT=True)
return T + V
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
graph = jraph.GraphsTuple(**my_graph0_disc)
def energy_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
#params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
# acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# constraints=None)
# def force_fn_model(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_model(R, V, params)
# else:
# return acceleration_fn_model(R, V, params)
# return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"perignode_trained_model_low.dil")[0]
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
# my_sim = sim_model(R, V)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(ya, yp):
return norm(ya-yp)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"AbsZerr":[],
"Perr": [],
"AbsPerr": []
}
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R, V = Rs[runs*ind], Vs[runs*ind]
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t += end - start
# ll = [state for state in NVEStates(pred_traj)]
# save_ovito(f"pred_{ind}.data",[state for state in NVEStates(pred_traj)], lattice="")
# if ind>20:
# break
sim_size = runs
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [origin_Rs[runs*ind:runs+runs*ind]]
nexp["Zerr"] += [RelErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)+1e-30]
# nexp["AbsZerr"] += [AbsErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
nexp["AbsZerr"] += [jnp.abs(norm(origin_Rs[runs*ind:runs+runs*ind]) - norm(pred_traj.position))]
ac_mom = jnp.square(origin_Vs[runs*ind:runs+runs*ind].sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
# nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind], pred_traj.velocity)])
nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind][6:], pred_traj.velocity[6:])+1e-30])
nexp["AbsPerr"] += ([jnp.abs(ac_mom - pr_mom)+1e-30])
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)[2:]
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)[2:]
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||\hat{p}-p||_2}{||\hat{p}||_2+||p||_2}$")
np.savetxt(f"../peridynamics-simulation-time/hgnn.txt", [t/maxtraj], delimiter = "\n")
# make_plots(nexp, "AbsZerr", yl=r"${||\hat{z}-z||_2}$")
# make_plots(nexp, "Herr",
# yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
# make_plots(nexp, "AbsHerr", yl=r"${||H(\hat{z})-H(z)||_2}$")
| 12,346 | 27.983568 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-FGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dt=1.0e-3, useN=5, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, if_noisy_data=0):
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"fgnn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
# senders, receivers = get_fully_connected_senders_and_receivers(N)
# eorder = get_fully_edge_order(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
senders = jnp.array(senders)
receivers = jnp.array(receivers)
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag)
def change_R_V(N, dim):
def fn(Rs, Vs, params):
return Lmodel(Rs, Vs, params)
return fn
change_R_V_ = change_R_V(N, dim)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
def get_forward_sim_full_graph_network(params = None, run = runs):
@jit
def fn(R, V):
return predition2(R, V, params, change_R_V_, dt, masses, stride=stride, runs=run)
return fn
sim_model = get_forward_sim_full_graph_network(params=params, run=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
# net_force_model_fn = net_force_fn(
# force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0
for ind in range(maxtraj):
if ind > maxtraj+skip:
break
_ind = ind*runs
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R = dataset_states[ind].position[0]
V = dataset_states[ind].velocity[0]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
nexp["Perr"] += [RelErr(actual_traj.velocity,
pred_traj.velocity)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"FGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"FGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
print("skipped")
#if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
np.savetxt(f"../{N}-spring-zerr/fgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/fgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/fgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/fgnn.txt", [t/maxtraj], delimiter = "\n")
print(skip)
main(N = 5)
# main(N = 50)
| 19,912 | 32.023217 | 225 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-HGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn1, lnn
from src.graph import *
# from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
# N=3
# epochs=10000
# seed=42
# rname=True
# saveat=100
# error_fn="L2error"
# dt=1.0e-5
# ifdrag=0
# stride=1000
# trainm=1
# grid=False
# mpass=1
# lr=0.001
# withdata=None
# datapoints=None
# batch_size=100
# config=None
def Main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-5, ifdrag=0, stride=1000, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, ifDataEfficiency=0, if_noisy_data = 1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, ifDataEfficiency = ifDataEfficiency, if_noisy_data=if_noisy_data)
def main(N=5, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-5, ifdrag=0, stride=1000, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=100, config=None, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"hgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "2" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = "2" if (tag == "data") else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
################################################
################## SYSTEM ######################
################################################
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Hparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
# acc_params=initialize_mlp([ne, *hidden_dim, 2*dim], key),
l_params=initialize_mlp([ne, *hidden_dim, 1], key),
)
Z = Zs[0]
R, V = jnp.split(Zs[0], 2, axis=0)
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
def acceleration_fn(params, graph):
acc = fgn1.cal_l(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot, lamda_force = get_zdot_lambda(N, dim, hamiltonian=Hmodel, drag=None, constraints=None)
zdot = jit(zdot)
v_acceleration_fn_model = vmap(zdot, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
LOSS=MSE
# pred = v_acceleration_fn_model(bRs[0], bVs[0], params)
# pred.shape
# zdot_pred = jnp.vstack(jnp.split(pred, 2, axis=2))
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_acceleration_fn_model(Rs, Vs, params)
zdot_pred = pred #jnp.hstack(jnp.split(pred, 2, axis=2))
return LOSS(zdot_pred, Zs_dot)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
# bZs_dot.shape
# bRs.shape
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
larray += [loss_fn(params, Rs, Vs, Zs_dot)]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % saveat == 0:
larray += [loss_fn(params, Rs, Vs, Zs_dot)]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/hgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/hgn-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/hgn-test.txt", ltarray, delimiter = "\n")
Main()
| 14,684 | 29.030675 | 184 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-FGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
dt=1.0e-3
# useN=None
withdata=None
datapoints=None
# mpass=1
# grid=False
stride=100
ifdrag=0
seed=42
rname=0
saveovito=1
trainm=1
runs=100
semilog=1
maxtraj=10
plotthings=True
redo=0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
PSYS = f"peridynamics"
TAG = f"FGNN"
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[0]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
origin_acceleration = []
origin_mass = []
origin_position = []
origin_velocity = []
import pandas as pd
for num in range(1000):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-MCGNODE/test/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
origin_acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
origin_mass += [np.array(split_df[[5]]).astype('float64')]
origin_position += [np.array(split_df[[6,7,8]]).astype('float64')]
origin_velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
origin_Rs = jnp.array(origin_position)
origin_Vs = jnp.array(origin_velocity)
origin_Fs = jnp.array(origin_acceleration)
origin_mass = jnp.array(origin_mass)
print(origin_Rs.shape)
#sys.exit()
################################################
################### ML Model ###################
################################################
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
# dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
graph = jraph.GraphsTuple(**my_graph0_disc)
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# constraints=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)
# return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"perignode_trained_model_low.dil")[0]
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition2(R, V, params, force_fn, dt, masses, stride=stride, runs=runs)
return fn
sim_model = get_forward_sim(params=params, force_fn=force_fn_model, runs=runs)
# my_sim = sim_model(R, V)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(ya, yp):
return norm(ya-yp)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"AbsZerr":[],
"Perr": [],
"AbsPerr": []
}
import time
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R, V = Rs[runs*ind], Vs[runs*ind]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+=end-start
# ll = [state for state in NVEStates(pred_traj)]
# save_ovito(f"pred_{ind}.data",[state for state in NVEStates(pred_traj)], lattice="")
# if ind>20:
# break
sim_size = runs
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [origin_Rs[runs*ind:runs+runs*ind]]
nexp["Zerr"] += [RelErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
# nexp["AbsZerr"] += [AbsErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
nexp["AbsZerr"] += [jnp.abs(norm(origin_Rs[runs*ind:runs+runs*ind]) - norm(pred_traj.position))]
ac_mom = jnp.square(origin_Vs[runs*ind:runs+runs*ind].sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
# nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind], pred_traj.velocity)])
nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind][6:], pred_traj.velocity[6:])])
nexp["AbsPerr"] += ([jnp.abs(ac_mom - pr_mom)])
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||\hat{p}-p||_2}{||\hat{p}||_2+||p||_2}$")
np.savetxt(f"../peridynamics-simulation-time/fgnn.txt", [t/maxtraj], delimiter = "\n")
# make_plots(nexp, "AbsZerr", yl=r"${||\hat{z}-z||_2}$")
# make_plots(nexp, "Herr",
# yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
# make_plots(nexp, "AbsHerr", yl=r"${||H(\hat{z})-H(z)||_2}$")
| 10,978 | 27.740838 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-LGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn, fgn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
plt.rcParams["font.family"] = "Arial"
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dt=1.0e-3, useN=5, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"lgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{psys}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
senders = jnp.array(senders)
receivers = jnp.array(receivers)
R = model_states.position[0]
V = model_states.velocity[0]
print(f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
def pot_energy_orig(x):
dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
################################################
################### ML Model ###################
################################################
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R = dataset_states[ind].position[0]
V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V)
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGNN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"LGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(_filename(f"LGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-spring-zerr/lgn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/lgn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/lgn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/lgn.txt", [t/maxtraj], delimiter = "\n")
main(N = 5)
| 18,363 | 31.676157 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-HGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
print(Rs.shape)
print(Fs.shape)
Zs_dot = jnp.concatenate([Vs,Fs], axis=1)
print(Zs_dot.shape)
#sys.exit()
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
epochs=10000
seed=42
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"HGN"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
# Ef = 1 # eij dim
# Nf = dim
# Oh = 1
# Eei = 8
# Nei = 8
# Nei_ = 5 ##Nei for mass
# hidden = 8
# nhidden = 2
# def get_layers(in_, out_):
# return [in_] + [hidden]*nhidden + [out_]
# def mlp(in_, out_, key, **kwargs):
# return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# fneke_params = initialize_mlp([Oh, Nei], key)
# fne_params = initialize_mlp([Oh, Nei], key)
# fb_params = mlp(Ef, Eei, key)
# fv_params = mlp(Nei+Eei, Nei, key)
# fe_params = mlp(Nei, Eei, key)
# ff1_params = mlp(Eei, 1, key)
# ff2_params = mlp(Nei, 1, key)
# ff3_params = mlp(dim+Nei, 1, key)
# ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
# mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
# Hparams = dict(fb=fb_params,
# fv=fv_params,
# fe=fe_params,
# ff1=ff1_params,
# ff2=ff2_params,
# ff3=ff3_params,
# fne=fne_params,
# fneke=fneke_params,
# ke=ke_params,
# mass=mass_params)
# #params = {"Fqqdot": Fparams}
# def H_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=None,
# useT=True)
# return T + V
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
graph = jraph.GraphsTuple(**my_graph0_disc)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Hparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["H"])
params = {"H": Hparams}
# def energy_fn(species):
# state_graph = graph
# def apply(R, V, params):
# state_graph.nodes.update(position=R)
# state_graph.nodes.update(velocity=V)
# return H_energy_fn(params, state_graph)
# return apply
# apply_fn = energy_fn(species)
# v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
# def Hmodel(x, v, params):
# return apply_fn(x, v, params["H"])
# params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
print(zdot_model(R,V, params))
# sys.exit()
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Zs_dot)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst ,Zst_dot)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/hgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgn-test.txt", ltarray, delimiter = "\n")
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/hgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/hgn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 14,681 | 27.019084 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-LGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
dt=1.0e-3
# useN=None
withdata=None
datapoints=None
# mpass=1
# grid=False
stride=100
ifdrag=0
seed=42
rname=0
saveovito=1
trainm=1
runs=10
semilog=1
maxtraj=10
plotthings=True
redo=0
mpass=1
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
PSYS = f"peridynamics"
TAG = f"LGNN"
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[0]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
origin_acceleration = []
origin_mass = []
origin_position = []
origin_velocity = []
import pandas as pd
for num in range(1000):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-MCGNODE/test/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
origin_acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
origin_mass += [np.array(split_df[[5]]).astype('float64')]
origin_position += [np.array(split_df[[6,7,8]]).astype('float64')]
origin_velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
origin_Rs = jnp.array(origin_position)
origin_Vs = jnp.array(origin_velocity)
origin_Fs = jnp.array(origin_acceleration)
origin_mass = jnp.array(origin_mass)
################################################
################### ML Model ###################
################################################
def graph_force_fn(params, graph):
_GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
graph = jraph.GraphsTuple(**my_graph0_disc)
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=None,
useT=True, useonlyedge=False)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=None,
useT=True, useonlyedge=False)
return kin_energy(graph.nodes["velocity"]) - V
def energy_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# constraints=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)
# return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"perignode_trained_model_low.dil")[0]
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_model = get_forward_sim(params=params, force_fn=force_fn_model, runs=runs)
# my_sim = sim_model(R, V)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(ya, yp):
return norm(ya-yp)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"AbsZerr":[],
"Perr": [],
"AbsPerr": []
}
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R, V = Rs[runs*ind], Vs[runs*ind]
pred_traj = sim_model(R, V)
# ll = [state for state in NVEStates(pred_traj)]
# save_ovito(f"pred_{ind}.data",[state for state in NVEStates(pred_traj)], lattice="")
# if ind>20:
# break
sim_size = runs
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [origin_Rs[runs*ind:runs+runs*ind]]
nexp["Zerr"] += [RelErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
# nexp["AbsZerr"] += [AbsErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
nexp["AbsZerr"] += [jnp.abs(norm(origin_Rs[runs*ind:runs+runs*ind]) - norm(pred_traj.position))]
ac_mom = jnp.square(origin_Vs[runs*ind:runs+runs*ind].sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
# nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind], pred_traj.velocity)])
nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind][6:], pred_traj.velocity[6:])])
nexp["AbsPerr"] += ([jnp.abs(ac_mom - pr_mom)])
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||\hat{p}-p||_2}{||\hat{p}||_2+||p||_2}$")
# make_plots(nexp, "AbsZerr", yl=r"${||\hat{z}-z||_2}$")
# make_plots(nexp, "Herr",
# yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
# make_plots(nexp, "AbsHerr", yl=r"${||H(\hat{z})-H(z)||_2}$")
| 11,922 | 27.93932 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-HGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
#from sklearn.metrics import r2_score
#from sympy import fu
import time
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn1, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.hamiltonian import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dt=1.0e-5, useN=3, withdata=None, datapoints=100, mpass=1, grid=False, stride=1000, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if useN is None:
useN = N
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"hgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dim=2
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# senders = jnp.array(senders)
# receivers = jnp.array(receivers)
# z_out, zdot_out = model_states
# R,V = jnp.split(z_out[0], 2, axis=0)
# print(
# f"Total number of training data points: {len(dataset_states)}x{z_out.shape[0]}")
# N2, dim = z_out.shape[-2:]
# N=int(N2/2)
# species = jnp.zeros(N, dtype=int)
# masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
pot_energy_orig = PEF
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, v, params):
return kin_energy(v) + pot_energy_orig(x)
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=drag, constraints=constraints)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
# t = jnp.linspace(0.0, runs*dt, runs)
# ode.odeint(zdot_func, z0(R, V), t)
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=runs)
# z_out = sim_orig(R, V)
# print(z_out)
# def simGT():
# print("Simulating ground truth ...")
# _traj = sim_orig(R, V)
# metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
# savefile("gt_trajectories.pkl",
# _traj, metadata=metadata)
# return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
def acceleration_fn(params, graph):
acc = fgn1.cal_l(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["H"])
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force = get_zdot_lambda(N, dim, hamiltonian=Hmodel, drag=None, constraints=None)
zdot_model = jit(zdot_model)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
print(sim_model(R,V).shape)
print(sim_orig(R,V).shape)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
# Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
# Es_pred_fn(pred_traj)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
"simulation_time": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
skip=0
t=0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# z_out, _ = dataset_states[ind]
# xout, pout = jnp.split(z_out, 2, axis=1)
# R = xout[0]
# V = pout[0]
# try:
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
nexp["simulation_time"] += [end-start]
t += end -start
if saveovito:
if ind<1:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
if plotthings:
if ind<1:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
# Es_pred = Es_pred_fn(traj)
# Es_pred = Es_pred - Es_pred[0] + Es[0]
# fig, axs = panel(1, 1, figsize=(20, 5))
# axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
# # axs[0].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
# plt.legend(bbox_to_anchor=(1, 1), loc=2)
# axs[0].set_facecolor("w")
# xlabel("Time step", ax=axs[0])
# ylabel("Energy", ax=axs[0])
# title = f"(HGNN) {N}-Pendulum Exp {ind}"
# plt.title(title)
# plt.savefig(_filename(title.replace(
# " ", "-")+f"_{key}.png")) # , dpi=500)
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
# , dpi=500)
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = panel(1, 1, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[0].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
ylabel("Energy", ax=axs[0])
title = f"HGNN {N}-Pendulum Exp {ind}"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png")) # , dpi=500)
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
herrrr = RelErr(H, Hhat)
herrrr = herrrr.at[0].set(herrrr[1])
nexp["Herr"] += [herrrr]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
zerrrr = RelErr(actual_traj.position, pred_traj.position)
zerrrr = zerrrr.at[0].set(zerrrr[1])
nexp["Zerr"] += [zerrrr]
# actual_traj.velocity[1:]
# print(actual_traj.velocity[1:], pred_traj.velocity[1:])
# perrrr = RelErr(actual_traj.velocity[1:], pred_traj.velocity[1:])
# perrrr = perrrr.at[0].set(perrrr[1])
# nexp["Perr"] += [perrrr]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [ac_mom - pr_mom]
if ind%10==0:
savefile("trajectories.pkl", trajectories)
savefile(f"error_parameter.pkl", nexp)
# except:
# print("skipped")
# if skip < 20:
# skip += 1
print(f'skipped loop: {skip}')
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png")) # , dpi=500)
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png")) # , dpi=500)
make_plots(
nexp, "Zerr", yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/hgn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/hgn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/hgn.txt", [t/maxtraj], delimiter = "\n")
main()
# main(N = 4)
# main(N = 5)
| 20,724 | 32.481422 | 248 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-GNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_hidden_search = 0, hidden = 5, if_nhidden_search = 0, nhidden = 2, if_mpass_search = 0, mpass = 1, if_lr_search = 0, lr = 0.001, if_act_search = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"gnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
elif (if_hidden_search == 1):
rstring = "0_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "0_" + str(nhidden)
elif (if_mpass_search == 1):
rstring = "0_" + str(mpass)
elif (if_lr_search == 1):
rstring = "0_" + str(lr)
elif (if_act_search == 1):
rstring = "0_" + str("softplus")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
def graph_force_fn(params, graph):
_GForce = cdgnode_cal_force_q_qdot(params, graph, eorder=eorder,
useT=True)
return _GForce
def _force_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=None,
non_conservative_forces=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
print(F_q_qdot(R, V, params))
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
"simulation_time": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+= end - start
nexp["simulation_time"] += [end-start]
if saveovito:
if ind<5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
if plotthings:
if ind<5:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
# Es_pred = Es_pred_fn(traj)
# Es_pred = Es_pred - Es_pred[0] + Es[0]
# fig, axs = panel(1, 2, figsize=(20, 5))
# axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
# # axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
# plt.legend(bbox_to_anchor=(1, 1), loc=2)
# axs[0].set_facecolor("w")
# xlabel("Time step", ax=axs)
# ylabel("Energy", ax=axs)
# title = f"{N}-Pendulum Exp {ind}"
# plt.title(title)
# plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"{N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"{N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
# nexp["Perr"] += [RelErr(actual_traj.velocity,
# pred_traj.velocity)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [ac_mom - pr_mom]
if ind%10==0:
savefile(f"error_parameter.pkl", nexp)
savefile("trajectories.pkl", trajectories)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/gnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/gnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/gnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 3)
main(N = 4)
main(N = 5)
| 19,024 | 33.403255 | 355 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-FGNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
epochs=10000
seed=42
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"FGNODE"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
# Ef = dim # eij dim
# Nf = dim
# Oh = 1
# Eei = 8
# Nei = 8
# Nei_ = 5 ##Nei for mass
# hidden = 8
# nhidden = 2
# def get_layers(in_, out_):
# return [in_] + [hidden]*nhidden + [out_]
# def mlp(in_, out_, key, **kwargs):
# return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# fneke_params = initialize_mlp([Oh, Nei], key)
# fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
# fb_params = mlp(Ef, Eei, key) #
# fv_params = mlp(Nei+Eei, Nei, key) #
# fe_params = mlp(Nei, Eei, key) #
# ff1_params = mlp(Eei, dim, key)
# ff2_params = mlp(Nei, dim, key) #
# ff3_params = mlp(Nei, dim, key)
# ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
# mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
# Fparams = dict(fb=fb_params,
# fv=fv_params,
# fe=fe_params,
# ff1=ff1_params,
# ff2=ff2_params,
# ff3=ff3_params,
# fne=fne_params,
# fneke=fneke_params,
# ke=ke_params,
# mass=mass_params)
# params = {"Fqqdot": Fparams}
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
graph = jraph.GraphsTuple(**my_graph0_disc)
# def _force_fn(species):
# state_graph = graph
# def apply(R, V, params):
# state_graph.nodes.update(position=R)
# state_graph.nodes.update(velocity=V)
# return graph_force_fn(params, state_graph)
# return apply
# apply_fn = _force_fn(species)
# # v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
# apply_fn(R, V, Fparams)
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_acceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
print(acceleration_fn_model(R, V, params))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst ,Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/fgnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/fgnode-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/fgnode-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,050 | 27.066667 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-data.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init, get_init_spring)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def main(N1=5, N2=1, dim=2, grid=False, saveat=100, runs=100, nconfig=100, ifdrag=0):
if N2 is None:
N2 = N1
N = N1*N2
tag = f"{N}-Spring-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0_10000"
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
init_confs = [chain(N)[:2]
for i in range(nconfig)]
_, _, senders, receivers = chain(N)
# if grid:
# senders, receivers = get_connections(N1, N2)
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
R, V = init_confs[0]
print("Saving init configs...")
savefile(f"initial-configs_{ifdrag}.pkl",
init_confs, metadata={"N1": N1, "N2": N2})
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
dt = 1.0e-3
stride = 100
lr = 0.001
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
@jit
def forward_sim(R, V):
return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=runs)
@jit
def v_forward_sim(init_conf):
return vmap(lambda x: forward_sim(x[0], x[1]))(init_conf)
################################################
############### DATA GENERATION ################
################################################
print("Data generation ...")
ind = 0
dataset_states = []
for R, V in init_confs:
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
model_states = forward_sim(R, V)
dataset_states += [model_states]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
def cal_energy(states):
KE = vmap(kin_energy)(states.velocity)
PE = vmap(pot_energy_orig)(states.position)
L = vmap(Lactual, in_axes=(0, 0, None))(
states.position, states.velocity, None)
return jnp.array([PE, KE, L, KE+PE]).T
print("plotting energy...")
ind = 0
for states in dataset_states:
ind += 1
Es = cal_energy(states)
fig, axs = plt.subplots(1, 1, figsize=(20, 5))
plt.plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
plt.legend(bbox_to_anchor=(1, 1))
plt.ylabel("Energy")
plt.xlabel("Time step")
title = f"{N}-Spring random state {ind}"
plt.title(title)
plt.savefig(
_filename(title.replace(" ", "_")+".png"), dpi=300)
save_ovito(f"dataset_{ind}.data", [
state for state in NVEStates(states)], lattice="")
if ind >= 10:
break
fire.Fire(main)
| 6,624 | 28.444444 | 97 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-LGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn, fgn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
plt.rcParams["font.family"] = "Arial"
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dt=1.0e-3, useN=4, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-body"
TAG = f"lgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{psys}-{tag}/0/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
# senders = jnp.array(senders)
# receivers = jnp.array(receivers)
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
# return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R = dataset_states[0].position[ind*69]
V = dataset_states[0].velocity[ind*69]
actual_traj = sim_orig2(R, V)
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGNN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"LGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(_filename(f"LGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-nbody-zerr/lgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/lgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/lgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/lgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 19,621 | 31.922819 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CHGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn1, lnn
from src.graph import *
# from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
# N=3
# epochs=10000
# seed=42
# rname=True
# saveat=100
# error_fn="L2error"
# dt=1.0e-5
# ifdrag=0
# stride=1000
# trainm=1
# grid=False
# mpass=1
# lr=0.001
# withdata=None
# datapoints=None
# batch_size=100
# config=None
def Main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-5, ifdrag=0, stride=1000, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, ifDataEfficiency = 0, if_noisy_data = 1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, ifDataEfficiency = 0, if_noisy_data=if_noisy_data)
def main(N=5, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-5, ifdrag=0, stride=1000, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=100, config=None, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"chgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "2" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = "2" if (tag == "data") else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(
f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
################################################
################## SYSTEM ######################
################################################
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Hparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
# acc_params=initialize_mlp([ne, *hidden_dim, 2*dim], key),
l_params=initialize_mlp([ne, *hidden_dim, 1], key),
)
Z = Zs[0]
R, V = jnp.split(Zs[0], 2, axis=0)
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
def acceleration_fn(params, graph):
acc = fgn1.cal_l(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot, lamda_force = get_zdot_lambda(N, dim, hamiltonian=Hmodel, drag=None, constraints=constraints)
zdot = jit(zdot)
v_acceleration_fn_model = vmap(zdot, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
LOSS=MSE
# pred = v_acceleration_fn_model(bRs[0], bVs[0], params)
# pred.shape
# zdot_pred = jnp.vstack(jnp.split(pred, 2, axis=2))
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_acceleration_fn_model(Rs, Vs, params)
zdot_pred = pred #jnp.hstack(jnp.split(pred, 2, axis=2))
return LOSS(zdot_pred, Zs_dot)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
# bZs_dot.shape
# bRs.shape
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
larray += [loss_fn(params, Rs, Vs, Zs_dot)]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % saveat == 0:
larray += [loss_fn(params, Rs, Vs, Zs_dot)]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/chgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/chgn-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/chgn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(Main)
Main()
| 14,712 | 28.843813 | 184 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CHGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
# from shadow.plot import *
# from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5,stride=1000, useN=3, ifdrag=0, seed=100, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, ifdrag, namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"chgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
V = V
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params):
return kin_energy(p) + pot_energy_orig(x)
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*p")
def drag(x, p, params):
# return -0.1 * (p*p).sum()
return (-0.1*p).reshape(-1,1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=None, constraints=constraints, external_force=None)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=maxtraj*runs)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
def H_energy_fn(params, graph):
g, g_PE, g_KE = cal_graph(params, graph, eorder=eorder,
useT=True)
return g_PE + g_KE
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=None, constraints=constraints)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
# z_model_out = sim_model(R, V)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
# Es_pred_fn(pred_traj)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t+= end - start
# def get_hinge(x):
# return jnp.append(x, jnp.zeros([1, 2]), axis=0)
# h_actual_traj = actual_traj
# h_actual_traj.position = jax.vmap(
# get_hinge, in_axes=0)(actual_traj.position)
# h_actual_traj.velocity = jax.vmap(
# get_hinge, in_axes=0)(actual_traj.velocity)
# h_actual_traj.force = jax.vmap(get_hinge, in_axes=0)(actual_traj.force)
if saveovito:
# if ind < 1:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
# else:
# pass
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
if ind < 1:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"(CHGNN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(
_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = plt.subplots(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.xlabel("Time step")
plt.ylabel("Energy")
plt.ylabel("Energy")
title = f"CHGNN {N}-Pendulum Exp {ind} Hmodel"
axs[1].set_title(title)
title = f"CHGNN {N}-Pendulum Exp {ind} Hactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = plt.subplots(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = plt.subplots(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/chgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/chgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/chgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
main(N = 5)
| 17,699 | 32.333333 | 203 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-FGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
# from psystems.nsprings import (chain, edge_order, get_connections,
# get_fully_connected_senders_and_receivers,
# get_fully_edge_order, get_init)
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dt=1.0e-3, useN=4, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=50, semilog=1, maxtraj=100, plotthings=False, redo=0, if_noisy_data=0):
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-body"
TAG = f"fgnn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/0/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
# senders = jnp.array(senders)
# receivers = jnp.array(receivers)
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
# return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag)
def change_R_V(N, dim):
def fn(Rs, Vs, params):
return Lmodel(Rs, Vs, params)
return fn
change_R_V_ = change_R_V(N, dim)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
def get_forward_sim_full_graph_network(params = None, run = runs):
@jit
def fn(R, V):
return predition2(R, V, params, change_R_V_, dt, masses, stride=stride, runs=run)
return fn
sim_model = get_forward_sim_full_graph_network(params=params, run=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
# net_force_model_fn = net_force_fn(
# force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R = dataset_states[0].position[ind*69]
V = dataset_states[0].velocity[ind*69]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
nexp["Perr"] += [RelErr(actual_traj.velocity,
pred_traj.velocity)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"FGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"FGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
print("skipped")
#if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
np.savetxt(f"../{N}-nbody-zerr/fgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/fgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/fgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/fgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 20,094 | 32.603679 | 224 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-LGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
import time
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, withdata=None, saveovito=1, trainm=1, runs=10, semilog=1, maxtraj=100, plotthings=False, redo=0, if_noisy_data=1):
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"lgn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else ("0" if (tag == "data") or (withdata == None) else f"{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
skip = 0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito and ind < 5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs)
ylabel("Energy", ax=axs)
title = f"(FGN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGN {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"LGN {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
# plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
except:
print("skipped")
#if skip < 20:
skip += 1
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
np.savetxt(f"../{N}-pendulum-zerr/lgn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/lgn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/lgn.txt", [t/maxtraj], delimiter = "\n")
main(N = 10)
| 18,959 | 32.321617 | 196 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-GNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
epochs=10000
seed=100
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
mpass=1
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"GNODE"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
Rs = Rs.reshape(-1, 1, N, dim)
Vs = Vs.reshape(-1, 1, N, dim)
Fs = Fs.reshape(-1, 1, N, dim)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
Ef = dim # eij dim
Nf = dim
Oh = 1
Eei = 8
Nei = 8
Nei_ = 5 ##Nei for mass
hidden = 8
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
fb_params = mlp(Ef, Eei, key) #
fv_params = mlp(Nei+Eei, Nei, key) #
fe_params = mlp(Nei, Eei, key) #
ff1_params = mlp(Eei, dim, key)
ff2_params = mlp(Nei, dim, key) #
ff3_params = mlp(Nei+dim+dim, dim, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
Fparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
#params = {"Fqqdot": Fparams}
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0][0], Vs[0][0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
graph = jraph.GraphsTuple(**my_graph0_disc)
# def _force_fn(species):
# state_graph = graph
# def apply(R, V, params):
# state_graph.nodes.update(position=R)
# state_graph.nodes.update(velocity=V)
# return graph_force_fn(params, state_graph)
# return apply
# apply_fn = _force_fn(species)
# # v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
# apply_fn(R, V, Fparams)
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
def graph_force_fn(params, graph):
_GForce = a_cdgnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
def _force_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
params = {"Fqqdot": Fparams}
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = F_q_qdot
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
# print(R)
# #print(V)
# print(Lmodel(R, V, params))
print(acceleration_fn_model(R,V, params))
# sys.exit()
# print(Lmodel(R,V,params))
# sys.exit()
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst ,Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/gnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/gnode-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/gnode-test.txt", ltarray, delimiter = "\n")
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/gnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/gnode-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/gnode-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,557 | 27.364017 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-LGNN-post.py | ################################################
################## IMPORT ######################
################################################
# from fcntl import F_SEAL_SEAL
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
#from shadow.plot import *
import matplotlib.pyplot as plt
#from sklearn.metrics import r2_score
# from scipy.stats import gmean
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_hidden_search = 0, hidden = 5, if_nhidden_search = 0, nhidden = 2, if_mpass_search = 0, mpass = 1, if_lr_search = 0, lr = 0.001, if_act_search = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"lgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
elif (if_hidden_search == 1):
rstring = "0_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "0_" + str(nhidden)
elif (if_mpass_search == 1):
rstring = "0_" + str(mpass)
elif (if_lr_search == 1):
rstring = "0_" + str(lr)
elif (if_act_search == 1):
rstring = "0_" + str("softplus")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return kin_energy(graph.nodes["velocity"]) - V
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
if ind < 5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"(LGNN) {N}-Pendulum Exp {ind}"
plt.title(title)
# plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
plt.clf()
fig, axs = plt.subplots(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5))
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
# plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
plt.clf()
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
#xlabel("Time step", ax=axs[0])
#xlabel("Time step", ax=axs[1])
#ylabel("Energy", ax=axs[0])
#ylabel("Energy", ax=axs[1])
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"LGNN {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"LGNN {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
# plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
plt.clf()
fig, axs = plt.subplots(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
plt.clf()
fig, axs = plt.subplots(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (if_hidden_search == 0):
np.savetxt(_filename("lgnn_zerr.txt"), gmean_zerr, delimiter = "\n")
np.savetxt(_filename("lgnn_herr.txt"), gmean_herr, delimiter = "\n")
np.savetxt(_filename("lgnn_sim_time.txt"), [t/maxtraj], delimiter = "\n")
else:
np.savetxt(f"../{N}-pendulum-zerr/lgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/lgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/lgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
main(N = 5)
| 18,884 | 31.786458 | 356 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-data-HGNN.py | ################################################
################## IMPORT ######################
################################################
import sys
import fire
import os
from datetime import datetime
from functools import partial, wraps
from psystems.nsprings import chain
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.experimental import ode
# from shadow.plot import panel
import matplotlib.pyplot as plt
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import src
from jax.config import config
from src import hamiltonian
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def ps(*args):
for i in args:
print(i.shape)
# N = 3fig, axs = plt.subplots(1, 1)
# ifdrag = 0
# dt = 1e-3
# stride = 100
# runs = 1000
def main(N=5, dim=2, nconfig=100, saveat=100, ifdrag=0, dt=1e-3, stride = 100, runs=100):
tag = f"{N}-Spring-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "2_" + str(nconfig * runs)
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
init_confs = [chain(N)[:2] for i in range(nconfig)]
_, _, senders, receivers = chain(N)
R, V = init_confs[0]
print("Saving init configs...")
savefile = OUT(src.io.savefile)
savefile(f"initial-configs_{ifdrag}.pkl",
init_confs, metadata={"N1": N, "N2": N})
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
def drag(x, p, params):
return -0.1 * (p*p).sum()
def pot_energy_orig(x):
dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
return jax.vmap(partial(src.hamiltonian.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params): return kin_energy(p) + pot_energy_orig(x)
def external_force(x, p, params):
F = 0*x
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*p")
def drag(x, p, params):
return -0.1*p.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, Hactual, drag=drag, constraints=None, external_force=None)
def zdot_func(z, t):
x, p = jnp.split(z, 2)
return zdot(x, p, None)
def get_z(x, p):
return jnp.vstack([x, p])
################################################
############### DATA GENERATION ################
################################################
def zz(out, ind=None):
if ind is None:
x, p = jnp.split(out, 2, axis=1)
return x, p
else:
return jnp.split(out, 2, axis=1)[ind]
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
print("Data generation ...")
ind = 0
dataset_states = []
for x, p in init_confs:
_z_out = ode.odeint(zdot_func, get_z(x, p), t)
z_out = _z_out[0::stride]
xout, pout = zz(z_out)
zdot_out = jax.vmap(zdot, in_axes=(0, 0, None))(xout, pout, None)
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
# my_state = States()
# my_state.position = xout
# my_state.velocity = pout
# my_state.force = zdot_out
# my_state.mass = jnp.ones(xout.shape[0])
# model_states = my_state
model_states = z_out, zdot_out
dataset_states += [model_states]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("plotting traj")
ind = 0
for states in dataset_states:
z_out, _ = states
xout, pout = zz(z_out)
# xout = states.position
# pout = states.velocity
ind += 1
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
for i in range(N):
axs[0].scatter(xout[:, i, 0], xout[:, i, 1], c=t[0::stride],
s=10*(i+1), label=f"pend: {i+1}")
axs[0].set_xlabel("X-position")
axs[0].set_ylabel("Y-position")
axs[0].axis("square")
force = jax.vmap(lamda_force, in_axes=(0, 0, None))(xout, pout, None)
for i in range(N):
axs[1].scatter(force[:, N+i, 0], force[:, N+i, 1], c=t[0::stride],
s=10*(i+1), label=f"pend: {i+1}")
axs[1].set_xlabel(r"F$_x$ (constraints)")
axs[1].set_ylabel(r"F$_y$ (constraints)")
axs[1].axis("square")
title = f"{N}-spring random state {ind} {ifdrag}"
plt.suptitle(title, va="bottom")
plt.savefig(_filename(title.replace(" ", "_")+".png"), dpi=300)
if ind > 3:
break
fire.Fire(main)
| 6,035 | 27.880383 | 99 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-LGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
# print(my_graph0_disc['edges'])
# sys.exit()
epochs=10000
seed=42
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"LGN"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
# Ef = dim # eij dim
# Nf = dim
# Oh = 1
# Eei = 8
# Nei = 8
# Nei_ = 5 ##Nei for mass
# hidden = 8
# nhidden = 2
# def get_layers(in_, out_):
# return [in_] + [hidden]*nhidden + [out_]
# def mlp(in_, out_, key, **kwargs):
# return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# fneke_params = initialize_mlp([Oh, Nei], key)
# fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
# fb_params = mlp(Ef, Eei, key) #
# fv_params = mlp(Nei+Eei, Nei, key) #
# fe_params = mlp(Nei, Eei, key) #
# ff1_params = mlp(Eei, dim, key)
# ff2_params = mlp(Nei, dim, key) #
# ff3_params = mlp(Nei, dim, key)
# ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
# mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
# Fparams = dict(fb=fb_params,
# fv=fv_params,
# fe=fe_params,
# ff1=ff1_params,
# ff2=ff2_params,
# ff3=ff3_params,
# fne=fne_params,
# fneke=fneke_params,
# ke=ke_params,
# mass=mass_params)
# params = {"Fqqdot": Fparams}
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
graph = jraph.GraphsTuple(**my_graph0_disc)
# print(graph.edges)
# sys.exit()
# def _force_fn(species):
# state_graph = graph
# def apply(R, V, params):
# state_graph.nodes.update(position=R)
# state_graph.nodes.update(velocity=V)
# return graph_force_fn(params, state_graph)
# return apply
# apply_fn = _force_fn(species)
# # v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
# apply_fn(R, V, Fparams)
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
#print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
print(acceleration_fn_model(R,V, params))
# sys.exit()
# def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# params = {"L": Lparams}
# print(acceleration_fn_model(R, V, params))
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst ,Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/lgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgn-test.txt", ltarray, delimiter = "\n")
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/lgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 14,755 | 27.376923 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-data-HGNN.py | ################################################
################## IMPORT ######################
################################################
import sys
import fire
import os
from datetime import datetime
from functools import partial, wraps
from psystems.nsprings import chain
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.experimental import ode
# from shadow.plot import panel
import matplotlib.pyplot as plt
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import src
from jax.config import config
from src import hamiltonian
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
from psystems.nbody import (get_fully_connected_senders_and_receivers, get_fully_edge_order, get_init_conf)
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def ps(*args):
for i in args:
print(i.shape)
def main(N=4, dim=3, nconfig=1, saveat=100, ifdrag=0, dt=1e-3, stride = 100, runs=10000, train = False):
tag = f"{N}-body-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = "2" if train else "2_test"
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# init_confs = [chain(N)[:2] for i in range(nconfig)]
# _, _, senders, receivers = chain(N)
init_confs = get_init_conf(train)
senders, receivers = get_fully_connected_senders_and_receivers(N)
R, V = init_confs[0]
print("Saving init configs...")
savefile = OUT(src.io.savefile)
savefile(f"initial-configs_{ifdrag}.pkl",
init_confs, metadata={"N1": N, "N2": N})
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
def drag(x, p, params):
return -0.1 * (p*p).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params): return kin_energy(p) + pot_energy_orig(x)
def external_force(x, p, params):
F = 0*x
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*p")
def drag(x, p, params):
return -0.1*p.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, Hactual, drag=drag, constraints=None, external_force=None)
def zdot_func(z, t):
x, p = jnp.split(z, 2)
return zdot(x, p, None)
def get_z(x, p):
return jnp.vstack([x, p])
################################################
############### DATA GENERATION ################
################################################
def zz(out, ind=None):
if ind is None:
x, p = jnp.split(out, 2, axis=1)
return x, p
else:
return jnp.split(out, 2, axis=1)[ind]
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
print("Data generation ...")
ind = 0
dataset_states = []
for x, p in init_confs:
_z_out = ode.odeint(zdot_func, get_z(x, p), t)
z_out = _z_out[0::stride]
xout, pout = zz(z_out)
zdot_out = jax.vmap(zdot, in_axes=(0, 0, None))(xout, pout, None)
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
# my_state = States()
# my_state.position = xout
# my_state.velocity = pout
# my_state.force = zdot_out
# my_state.mass = jnp.ones(xout.shape[0])
# model_states = my_state
model_states = z_out, zdot_out
dataset_states += [model_states]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("plotting traj")
ind = 0
for states in dataset_states:
z_out, _ = states
xout, pout = zz(z_out)
# xout = states.position
# pout = states.velocity
ind += 1
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
for i in range(N):
axs[0].scatter(xout[:, i, 0], xout[:, i, 1], c=t[0::stride],
s=10*(i+1), label=f"pend: {i+1}")
axs[0].set_xlabel("X-position")
axs[0].set_ylabel("Y-position")
axs[0].axis("square")
force = jax.vmap(lamda_force, in_axes=(0, 0, None))(xout, pout, None)
for i in range(N):
axs[1].scatter(force[:, N+i, 0], force[:, N+i, 1], c=t[0::stride],
s=10*(i+1), label=f"pend: {i+1}")
axs[1].set_xlabel(r"F$_x$ (constraints)")
axs[1].set_ylabel(r"F$_y$ (constraints)")
axs[1].axis("square")
title = f"{N}-nbody random state {ind} {ifdrag}"
plt.suptitle(title, va="bottom")
plt.savefig(_filename(title.replace(" ", "_")+".png"), dpi=300)
if ind > 3:
break
fire.Fire(main)
| 6,086 | 28.838235 | 107 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CLGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, error_fn=error_fn, mpass=mpass,
dt=dt, ifdrag=ifdrag, trainm=trainm, stride=stride, lr=lr, datapoints=datapoints,
batch_size=batch_size, saveat=saveat, if_noisy_data=if_noisy_data)
def main(N=3, epochs=1, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, withdata=None, datapoints=None, batch_size=100, config=None, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"clgn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = jit(acc_fn(species))
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=constraints,
non_conservative_forces=drag)
print(acceleration_fn_model(R, V, params))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
# LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs}: train={larray[-1]}, test={ltarray[-1]}")
# print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % 1 == 0:
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
now = time.time()
train_time_arr.append((now - start))
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
# if epoch % 10000 == 0:
# savefile(f"trained_model_{ifdrag}_{trainm}_low_{epoch}.dil",
# params, metadata=metadata)
plt.clf()
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
np.savetxt("../3-pendulum-training-time/clgn-3.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/clgn-3-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/clgn-3-test.txt", ltarray, delimiter = "\n")
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
fire.Fire(Main)
| 15,534 | 30.194779 | 142 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
class Datastate:
def __init__(self, model_states):
self.position = model_states.position[:-1]
self.velocity = model_states.velocity[:-1]
self.force = model_states.force[:-1]
self.mass = model_states.mass[:-1]
self.index = 0
self.change_position = model_states.position[1:]-model_states.position[:-1]
self.change_velocity = model_states.velocity[1:]-model_states.velocity[:-1]
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=5, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"fgnn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "1" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = "1" if (tag == "data") else "0"
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{1}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# if len(dataset_states)*model_states.position.shape[0] != 10000:
# raise Exception("Invalid number of data points")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
# Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
# Rs = Rs.reshape(-1, N, dim)
# Vs = Vs.reshape(-1, N, dim)
# Fs = Fs.reshape(-1, N, dim)
Rs, Vs, Fs, Rds, Vds = States_modified().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
Rds = Rds.reshape(-1, N, dim)
Vds = Vds.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Rds = np.array(Rds)
Fs = np.array(Fs)
Vs = np.array(Vs)
Vds = np.array(Vds)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Rds[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Vds[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Rds = jnp.array(Rds)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
Vds = jnp.array(Vds)
# mask = np.random.choice(len(Rs), len(Rs), replace=False)
# allRs = Rs[mask]
# allVs = Vs[mask]
# allFs = Fs[mask]
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
allRds = Rds[mask]
allVds = Vds[mask]
# Ntr = int(0.75*len(Rs))
# Nts = len(Rs) - Ntr
# Rs = allRs[:Ntr]
# Vs = allVs[:Ntr]
# Fs = allFs[:Ntr]
# Rst = allRs[Ntr:]
# Vst = allVs[Ntr:]
# Fst = allFs[Ntr:]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rds = allRds[:Ntr]
Vds = allVds[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
Rdst = allRds[Ntr:]
Vdst = allVds[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
if grid:
print("It's a grid?")
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("It's a random?")
# senders, receivers = get_fully_connected_senders_and_receivers(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
R, V = Rs[0], Vs[0]
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
delta_params = initialize_mlp([ne, *hidden_dim, dim*2], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = jit(accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Rds, Vds):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, jnp.concatenate([Rds,Vds], axis=2))
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bRds, bVds = batching(Rs, Vs, Rds, Vds,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Rds, Vds)]
ltarray += [loss_fn(params, Rst, Vst, Rdst, Vdst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bRds, bVds):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Rdst, Vdst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
np.savetxt(f"../5-spring-training-time/fgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/fgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/fgnn-test.txt", ltarray, delimiter = "\n")
fire.Fire(Main)
| 17,514 | 30.054965 | 162 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-GNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
N=4
epochs=10000
seed=42
rname=True
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
# mpass=1
lr=0.001
withdata=None
datapoints=None
batch_size=100
ifDataEfficiency = 0
if_noisy_data = 0
# def main(N=3, epochs=100, seed=42, rname=True, saveat=1,
# dt=1.0e-5, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"gnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, 1, N, dim)
Vs = Vs.reshape(-1, 1, N, dim)
Fs = Fs.reshape(-1, 1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
Ef = dim # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
Nei_ = 5 ##Nei for mass
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
fb_params = mlp(Ef, Eei, key) #
fv_params = mlp(Nei+Eei, Nei, key) #
fe_params = mlp(Nei, Eei, key) #
ff1_params = mlp(Eei, dim, key)
ff2_params = mlp(Nei, dim, key) #
ff3_params = mlp(Nei+dim+dim, dim, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
Fparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
params = {"Fqqdot": Fparams}
def graph_force_fn(params, graph):
_GForce = a_cdgnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
R, V = Rs[0][0], Vs[0][0]
def _force_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
# v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# x=R
# v=V
# print(F_q_qdot(x, v, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = F_q_qdot
# acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
# constraints=None,
# non_conservative_forces=None)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
# x=Rs[0]
# v=Vs[0]
# F_q_qdot(x[0], v[0], params)
# acceleration_fn_model(x[0], v[0], params)
# hhhh = v_v_acceleration_fn_model(Rs, Vs, params)
# print(hhhh)
# print(hhhh.shape)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count+=1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 100 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-body-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,643 | 26.788187 | 134 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
Rds = Rs[1:] - Rs[:-1]
Vds = Vs[1:] - Vs[:-1]
Rs = Rs[:-1]
Vs = Vs[:-1]
Fs = Fs[:-1]
print(Rs.shape)
print(Rds.shape)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
epochs=10000
seed=42
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"FGNN"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
allRds = Rds[mask]
allVds = Vds[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rds = allRds[:Ntr]
Vds = allVds[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
Rdst = allRds[Ntr:]
Vdst = allVds[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
# Ef = dim # eij dim
# Nf = dim
# Oh = 1
# Eei = 8
# Nei = 8
# Nei_ = 5 ##Nei for mass
# hidden = 8
# nhidden = 2
# def get_layers(in_, out_):
# return [in_] + [hidden]*nhidden + [out_]
# def mlp(in_, out_, key, **kwargs):
# return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# fneke_params = initialize_mlp([Oh, Nei], key)
# fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
# fb_params = mlp(Ef, Eei, key) #
# fv_params = mlp(Nei+Eei, Nei, key) #
# fe_params = mlp(Nei, Eei, key) #
# ff1_params = mlp(Eei, dim, key)
# ff2_params = mlp(Nei, dim, key) #
# ff3_params = mlp(Nei, dim, key)
# ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
# mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
# Fparams = dict(fb=fb_params,
# fv=fv_params,
# fe=fe_params,
# ff1=ff1_params,
# ff2=ff2_params,
# ff3=ff3_params,
# fne=fne_params,
# fneke=fneke_params,
# ke=ke_params,
# mass=mass_params)
# params = {"Fqqdot": Fparams}
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
graph = jraph.GraphsTuple(**my_graph0_disc)
# def _force_fn(species):
# state_graph = graph
# def apply(R, V, params):
# state_graph.nodes.update(position=R)
# state_graph.nodes.update(velocity=V)
# return graph_force_fn(params, state_graph)
# return apply
# apply_fn = _force_fn(species)
# # v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
# apply_fn(R, V, Fparams)
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
delta_params = initialize_mlp([ne, *hidden_dim, dim*2], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
#print(acceleration_fn_model(R, V, params))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Rds, Vds):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, jnp.concatenate([Rds,Vds], axis=2))
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bRds, bVds = batching(Rs, Vs, Rds, Vds,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bRds, bVds):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Rds, Vds)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst, Rdst, Vdst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/fgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/fgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/fgnn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,407 | 27.050209 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-LGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
plt.rcParams["font.family"] = "Arial"
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dt=1.0e-3, useN=5, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"lgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
# senders, receivers = get_fully_connected_senders_and_receivers(N)
# eorder = get_fully_edge_order(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return kin_energy(graph.nodes["velocity"]) - V
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
# senders, receivers = [np.array(i)
# for i in get_fully_connected_senders_and_receivers(N)]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
if ind > maxtraj+skip:
break
_ind = ind*runs
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R = dataset_states[ind].position[0]
V = dataset_states[ind].velocity[0]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGNN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
# print("shape")
# print(actual_traj.velocity.shape)
# print(actual_traj.velocity.sum(axis=1).shape)
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGNN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"LGNN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"LGNN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-spring-zerr/lgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/lgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/lgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/lgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 5)
| 19,812 | 32.524535 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
class Datastate:
def __init__(self, model_states):
self.position = model_states.position[:-1]
self.velocity = model_states.velocity[:-1]
self.force = model_states.force[:-1]
self.mass = model_states.mass[:-1]
self.index = 0
self.change_position = model_states.position[1:]-model_states.position[:-1]
self.change_velocity = model_states.velocity[1:]-model_states.velocity[:-1]
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=4, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, if_noisy_data=0):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, if_noisy_data=0):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"fgnn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "1" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = "1" if (tag == "data") else "0"
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{1}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# if len(dataset_states)*model_states.position.shape[0] != 10000:
# raise Exception("Invalid number of data points")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
# Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
# Rs = Rs.reshape(-1, N, dim)
# Vs = Vs.reshape(-1, N, dim)
# Fs = Fs.reshape(-1, N, dim)
Rs, Vs, Fs, Rds, Vds = States_modified().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
Rds = Rds.reshape(-1, N, dim)
Vds = Vds.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Rds = np.array(Rds)
Fs = np.array(Fs)
Vs = np.array(Vs)
Vds = np.array(Vds)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Rds[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Vds[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Rds = jnp.array(Rds)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
Vds = jnp.array(Vds)
# mask = np.random.choice(len(Rs), len(Rs), replace=False)
# allRs = Rs[mask]
# allVs = Vs[mask]
# allFs = Fs[mask]
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
allRds = Rds[mask]
allVds = Vds[mask]
# Ntr = int(0.75*len(Rs))
# Nts = len(Rs) - Ntr
# Rs = allRs[:Ntr]
# Vs = allVs[:Ntr]
# Fs = allFs[:Ntr]
# Rst = allRs[Ntr:]
# Vst = allVs[Ntr:]
# Fst = allFs[Ntr:]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rds = allRds[:Ntr]
Vds = allVds[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
Rdst = allRds[Ntr:]
Vdst = allVds[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
# if grid:
# print("It's a grid?")
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# print("It's a random?")
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R, V = Rs[0], Vs[0]
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
delta_params = initialize_mlp([ne, *hidden_dim, dim*2], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = jit(accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Rds, Vds):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, jnp.concatenate([Rds,Vds], axis=2))
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bRds, bVds = batching(Rs, Vs, Rds, Vds,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Rds, Vds)]
ltarray += [loss_fn(params, Rst, Vst, Rdst, Vdst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bRds, bVds):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Rdst, Vdst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
np.savetxt(f"../{N}-body-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
fire.Fire(Main)
| 17,475 | 30.488288 | 162 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-GNODE-post.py | ################################################
################## IMPORT ######################
################################################
import imp
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
# from sympy import fu
# from psystems.nsprings import (chain, edge_order, get_connections,
# get_fully_connected_senders_and_receivers,
# get_fully_edge_order, get_init)
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn1
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV,acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dt=1.0e-3, useN=4, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-body"
TAG = f"gnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else ("0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/0_test/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
# return vmap(partial(lnn1.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn1._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn1.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# print(sim_orig(R, V))
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
# ################################################
# ################### ML Model ###################
# ################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
def graph_force_fn(params, graph):
_GForce = a_cdgnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
def _force_fn(species):
# senders, receivers = [np.array(i)
# for i in get_fully_connected_senders_and_receivers(N)]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# x=R
# v=V
# F_q_qdot(R, V, params)
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = F_q_qdot
# acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
# constraints=None,
# non_conservative_forces=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
# print(R.shape,V.shape)
# print(F_q_qdot(R, V, params))
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
# sim_model(R, V)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def normp(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def RelErrp(ya, yp):
return normp(ya-yp) / (normp(ya) + normp(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
"simulation_time": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip_count = 0
t=0
for ind in range(maxtraj):
try:
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R = dataset_states[0].position[ind*69]
V = dataset_states[0].velocity[ind*69]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+= end-start
nexp["simulation_time"] += [end-start]
if saveovito:
if ind<5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
if ind<5:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
# Es_pred = Es_pred_fn(traj)
# Es_pred = Es_pred - Es_pred[0] + Es[0]
# fig, axs = panel(1, 2, figsize=(20, 5))
# axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
# lw=6, alpha=0.5)
# # axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
# plt.legend(bbox_to_anchor=(1, 1), loc=2)
# axs[0].set_facecolor("w")
# xlabel("Time step", ax=axs[0])
# xlabel("Time step", ax=axs[1])
# ylabel("Energy", ax=axs[0])
# ylabel("Energy", ax=axs[1])
# title = f"LGNN {N}-Spring Exp {ind}"
# plt.title(title)
# plt.savefig(_filename(title.replace(
# " ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"{N}-Spring Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"{N}-Spring Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
else:
pass
# Es = Es_fn(actual_traj) #jnp.array([PE, KE, L, KE+PE]).T
# H = Es[:, -1]
# L = Es[:, 2]
# Eshat = Es_fn(pred_traj)
# KEhat = Eshat[:, 1]
# Lhat = Eshat[:, 2]
# k = L[5]/Lhat[5]
# print(f"scalling factor: {k}")
# Lhat = Lhat*k
# Hhat = 2*KEhat - Lhat
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
# nexp["Perr"] += [jnp.square(actual_traj.velocity.sum(axis=1) -
# pred_traj.velocity.sum(axis=1)).sum(axis=1)]#/(jnp.square(actual_traj.velocity.sum(axis=1)).sum(axis=1)+jnp.square(pred_traj.velocity.sum(axis=1)).sum(axis=1))]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
except:
skip_count += 1
pass
print(f'skipped loop: {skip_count}')
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
np.savetxt(f"../{N}-nbody-zerr/gnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/gnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/gnode.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/gnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 21,131 | 34.22 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-LGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
import time
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".."
sys.path.append(MAINPATH)
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, epochs=10000, seed=42, rname=False, saveat=10,dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=10, ifDataEfficiency = 0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"lgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
print(f(_filename(file, tag=tag), *args, **kwargs))
return f(_filename(file, tag=tag), *args, **kwargs)
#print(func)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
# if grid:
# print("It's a grid?")
# a = int(np.sqrt(N))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
# else:
# print("It's a random?")
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Lparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
L_energy_fn(Lparams, state_graph)
def energy_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-body-training-time/lgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/lgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/lgnn-test.txt", ltarray, delimiter = "\n")
main()
| 14,226 | 29.530043 | 204 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-HGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=5, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, ifDataEfficiency = 0, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"hgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"2" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(
f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
# def phi(x):
# X = jnp.vstack([x[:1, :]*0, x])
# return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
# constraints = get_constraints(N, dim, phi)
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
if grid:
print("It's a grid?")
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("It's a random?")
# senders, receivers = get_fully_connected_senders_and_receivers(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
R, V = jnp.split(Zs[0], 2, axis=0)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
#species = jnp.zeros(N, dtype=int)
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
#print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Zs_dot)]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../5-spring-training-time/hgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../5-spring-training-loss/hgn-train.txt", larray, delimiter = "\n")
np.savetxt("../5-spring-training-loss/hgn-test.txt", ltarray, delimiter = "\n")
#fire.Fire(Main)
Main()
| 15,726 | 29.361004 | 184 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-FGNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=5, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=1000, ifDataEfficiency = 0, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, ifDataEfficiency = ifDataEfficiency, if_noisy_data=1)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, ifDataEfficiency = 0, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"fgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
if grid:
print("It's a grid?")
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("It's a random?")
# senders, receivers = get_fully_connected_senders_and_receivers(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
R, V = Rs[0], Vs[0]
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_acceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = jit(accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../5-spring-training-time/fgnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../5-spring-training-loss/fgnode-train.txt", larray, delimiter = "\n")
np.savetxt("../5-spring-training-loss/fgnode-test.txt", ltarray, delimiter = "\n")
# fire.Fire(Main)
Main()
| 16,226 | 29.84981 | 184 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CLGNN-post.py | ################################################
################## IMPORT ######################
################################################
# from fcntl import F_SEAL_SEAL
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
#from shadow.plot import *
import matplotlib.pyplot as plt
#from sklearn.metrics import r2_score
# from scipy.stats import gmean
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"clgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return kin_energy(graph.nodes["velocity"]) - V
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=constraints,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"(CLGNN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
plt.clf()
fig, axs = plt.subplots(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5))
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
plt.clf()
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
#xlabel("Time step", ax=axs[0])
#xlabel("Time step", ax=axs[1])
#ylabel("Energy", ax=axs[0])
#ylabel("Energy", ax=axs[1])
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"CLGNN {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"CLGNN {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
plt.clf()
fig, axs = plt.subplots(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
plt.clf()
fig, axs = plt.subplots(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/clgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/clgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/clgnn.txt", [t/maxtraj], delimiter = "\n")
# main(N = 4)
main(N = 5)
| 17,750 | 31.392336 | 205 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-FGNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
# from psystems.nsprings import (chain, edge_order, get_connections,
# get_fully_connected_senders_and_receivers,
# get_fully_edge_order, get_init)
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dt=1.0e-3, useN=4, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=30, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if useN is None:
useN = N
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-body"
TAG = f"fgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else ("0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/0/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
# senders = jnp.array(senders)
# receivers = jnp.array(receivers)
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
# return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_acceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R = dataset_states[0].position[ind*69]
V = dataset_states[0].velocity[ind*69]
actual_traj = sim_orig2(R, V)
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
H = Es[:, -1]
L = Es[:, 2]
Eshat = Es_fn(pred_traj)
KEhat = Eshat[:, 1]
Lhat = Eshat[:, 2]
k = L[5]/Lhat[5]
print(f"scalling factor: {k}")
Lhat = Lhat*k
Hhat = 2*KEhat - Lhat
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"FGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"FGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
# except:
# print("skipped")
# if skip < 20:
# skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-nbody-zerr/fgnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/fgnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/fgnode.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/fgnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 20,013 | 32.580537 | 246 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-HGNN.py | ################################################
################## IMPORT ######################
################################################
from posixpath import split
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.experimental import ode
from shadow.plot import *
# from shadow.plot import panel
#import matplotlib.pyplot as plt
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N = 3, epochs = 10000, seed = 42, rname = False,dt = 1.0e-5, ifdrag = 0, trainm = 1, stride=1000, lr = 0.001, withdata = None, datapoints = None, batch_size = 100, ifDataEfficiency = 0, if_lr_search = 0, if_act_search = 0, mpass = 1, if_mpass_search = 0, if_hidden_search = 0, hidden = 5, if_nhidden_search=0, nhidden=2, if_noisy_data = 1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname, dt, lr, ifdrag, batch_size, namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"hgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "2" if (tag == "data") else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
elif (if_lr_search == 1):
rstring = "2_" + str(lr)
elif (if_mpass_search == 1):
rstring = "2_" + str(mpass)
elif (if_act_search == 1):
rstring = "2_softplus"
elif (if_hidden_search == 1):
rstring = "2_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "2_" + str(nhidden)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
################################################
################## SYSTEM ######################
################################################
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = hidden
nhidden = nhidden
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Hparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
def H_energy_fn(params, graph):
if (if_act_search == 1):
g, V, T = cal_graph(params, graph, eorder=eorder, useT=True, act_fn=models.SoftPlus)
else:
g, V, T = cal_graph(params, graph, eorder=eorder, useT=True, mpass=mpass)
return T + V
R, V = jnp.split(Zs[0], 2, axis=0)
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, p, params):
return vmap(nndrag, in_axes=(0, None))(p.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=None, constraints=None,external_force=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
start = time.time()
train_time_arr = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
l = l/len(bRs)
if epoch % 1 == 0:
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Zs_dot)
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
# print(
# f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if (ifDataEfficiency == 0 and if_lr_search == 0 and if_act_search == 0 and if_mpass_search == 0 and if_hidden_search == 0 and if_nhidden_search == 0):
np.savetxt(f"../{N}-pendulum-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
if (if_lr_search == 1):
np.savetxt(f"../lr_search/{N}-pendulum-training-time/hgnn_{lr}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/hgnn-train_{lr}.txt", larray, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/hgnn-test_{lr}.txt", ltarray, delimiter = "\n")
if (if_act_search == 1):
np.savetxt(f"../act_search/{N}-pendulum-training-time/hgnn_softplus.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/hgnn-train_softplus.txt", larray, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/hgnn-test_softplus.txt", ltarray, delimiter = "\n")
if (if_mpass_search == 1):
np.savetxt(f"../mpass_search/{N}-pendulum-training-time/hgnn_{mpass}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/hgnn-train_{mpass}.txt", larray, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/hgnn-test_{mpass}.txt", ltarray, delimiter = "\n")
if (if_hidden_search == 1):
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-time/hgnn_{hidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/hgnn-train_{hidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/hgnn-test_{hidden}.txt", ltarray, delimiter = "\n")
if (if_nhidden_search == 1):
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-time/hgnn_{nhidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/hgnn-train_{nhidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/hgnn-test_{nhidden}.txt", ltarray, delimiter = "\n")
main()
# main(lr=0.3)
# main(nhidden=4)
# main(nhidden=8)
# main(nhidden=16)
| 15,437 | 30.962733 | 353 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-GNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp,batch_MSE
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
N=3
epochs=10000
seed=42
rname=True
dt=1.0e-5
ifdrag=0
trainm=1
stride=1000
lr=0.3
withdata=None
datapoints=None
batch_size=100
ifDataEfficiency = 0
if_lr_search = 0
if_act_search = 0
if_mpass_search = 0
mpass = 1
if_hidden_search = 0
hidden = 5
if_nhidden_search = 0
nhidden = 2
if_noisy_data = 1
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size, ifDataEfficiency,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
PSYS = f"{N}-Pendulum"
TAG = f"gnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
elif (if_lr_search == 1):
rstring = "0_" + str(lr)
elif (if_act_search == 1):
rstring = "0_softplus"
elif (if_mpass_search == 1):
rstring = "0_" + str(mpass)
elif (if_hidden_search == 1):
rstring = "0_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "0_" + str(nhidden)
else:
rstring = "0"
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
# Rs = Rs.reshape(-1, N, dim)
# Vs = Vs.reshape(-1, N, dim)
# Fs = Fs.reshape(-1, N, dim)
Rs = Rs.reshape(-1, 1, N, dim)
Vs = Vs.reshape(-1, 1, N, dim)
Fs = Fs.reshape(-1, 1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
Nei_ = 5 # for mass learning
hidden = hidden
nhidden = nhidden
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
fb_params = mlp(Ef, Eei, key) #
fv_params = mlp(Nei+Eei, Nei, key) #
fe_params = mlp(Nei, Eei, key) #
ff1_params = mlp(Eei, dim, key)
ff2_params = mlp(Nei, dim, key) #
ff3_params = mlp(Nei+dim+dim, dim, key)
ke_params = initialize_mlp([1+Nei, 5, 5, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_,5, 1], key, affine=[True]) #
Fparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
params = {"Fqqdot": Fparams}
def graph_force_fn(params, graph):
if (if_act_search == 1):
_GForce = cdgnode_cal_force_q_qdot(params, graph, eorder=eorder, useT=True, act_fn=models.SoftPlus)
else:
_GForce = cdgnode_cal_force_q_qdot(params, graph, eorder=eorder, useT=True, mpass=mpass)
return _GForce
R, V = Rs[0][0], Vs[0][0]
def force_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = force_fn(species)
# v_apply_fn = vmap(apply_fn, in_axes=(0, 0, None))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# print(qddot(R,V,params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = qddot
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=None,
non_conservative_forces=None)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
x=Rs[0]
v=Vs[0]
# F_q_qdot(x[0], v[0], params)
# acceleration_fn_model(x[0], v[0], params)
# hhhh = v_v_acceleration_fn_model(Rs, Vs, params)
# # print(hhhh)
# print(hhhh.shape)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count+=1
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 100 == 0:
metadata = {
"savedat": epoch,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0 and if_lr_search == 0 and if_act_search == 0 and if_mpass_search == 0):
np.savetxt("../3-pendulum-training-time/gnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/gnode-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/gnode-test.txt", ltarray, delimiter = "\n")
if (if_lr_search == 1):
np.savetxt(f"../lr_search/{N}-pendulum-training-time/gnode_{lr}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/gnode-train_{lr}.txt", larray, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/gnode-test_{lr}.txt", ltarray, delimiter = "\n")
if (if_act_search == 1):
np.savetxt(f"../act_search/{N}-pendulum-training-time/gnode_softplus.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/gnode-train_softplus.txt", larray, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/gnode-test_softplus.txt", ltarray, delimiter = "\n")
if (if_mpass_search == 1):
np.savetxt(f"../mpass_search/{N}-pendulum-training-time/gnode_{mpass}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/gnode-train_{mpass}.txt", larray, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/gnode-test_{mpass}.txt", ltarray, delimiter = "\n")
if (if_hidden_search == 1):
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-time/gnode_{hidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/gnode-train_{hidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/gnode-test_{hidden}.txt", ltarray, delimiter = "\n")
if (if_nhidden_search == 1):
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-time/gnode_{nhidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/gnode-train_{nhidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/gnode-test_{nhidden}.txt", ltarray, delimiter = "\n")
| 16,262 | 29.060998 | 121 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-HGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
# from psystems.nsprings import (chain, edge_order, get_connections,
# get_fully_connected_senders_and_receivers,
# get_fully_edge_order, get_init)
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dim=3, dt=1.0e-3, stride=100, useN=4, withdata=None, datapoints=100, grid=False, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, ifdrag, namespace=locals())
PSYS = f"{N}-body"
TAG = f"hgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = "0" if (tag != "data" ) else "2"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/2_test/"
elif (trained is not None):
psys = f"{trained}-{PSYS.split('-')[1]}"
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
z_out, zdot_out = dataset_states[0]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[0]
V = pout[0]
print(f"Total number of training data points: {len(dataset_states)}x{z_out.shape}")
N, dim = xout.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# def pot_energy_orig(x):
# dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
# return jax.vmap(partial(src.hamiltonian.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params):
return kin_energy(p) + pot_energy_orig(x)
# def phi(x):
# X = jnp.vstack([x[:1, :]*0, x])
# return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
# constraints = get_constraints(N, dim, phi)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, p, params):
return -0.1*p.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=drag, constraints=None)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=maxtraj*runs)
# z_out = sim_orig(R, V)
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
def H_energy_fn(params, graph):
g, g_PE, g_KE = cal_graph(params, graph, eorder=eorder,
useT=True)
return g_PE + g_KE
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
# senders, receivers = [np.array(i)
# for i in Spring_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
# jax.tree_util.tree_map(lambda a: print(a.shape), state_graph.nodes)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
params = loadfile(f"trained_model.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
# z_model_out = sim_model(R, V)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
z_out, _ = dataset_states[0]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[ind*69]
V = pout[ind*69]
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t += end - start
if saveovito:
if ind < 1:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
if plotthings:
if ind<1:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 1, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[0].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
ylabel("Energy", ax=axs[0])
title = f"(HGNN) {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}.png")) # , dpi=500)
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
# , dpi=500)
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
nexp["Perr"] += [RelErr(actual_traj.velocity,
pred_traj.velocity)+1e-30]
fig, axs = panel(1, 1, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[0].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
ylabel("Energy", ax=axs[0])
title = f"HGNN {N}-Spring Exp {ind}"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png")) # , dpi=500)
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)+1e-30]
if ind%10==0:
savefile("trajectories.pkl", trajectories)
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()[2:]
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)[2:]
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)[2:]
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png")) # , dpi=500)
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-nbody-zerr/hgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/hgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/hgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/hgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 19,693 | 33.795053 | 245 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-HGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dim=2, dt=1.0e-3, stride=100, useN=5, withdata=None, datapoints=100, grid=False, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, ifdrag, namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"hgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "0" if (tag != "data" ) else "2"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
elif (trained is not None):
psys = f"{trained}-{PSYS.split('-')[1]}"
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
# senders, receivers = get_fully_connected_senders_and_receivers(N)
# eorder = get_fully_edge_order(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
z_out, zdot_out = dataset_states[0]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[0]
V = pout[0]
print(
f"Total number of training data points: {len(dataset_states)}x{z_out.shape}")
N, dim = xout.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
def pot_energy_orig(x):
dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
return jax.vmap(partial(src.hamiltonian.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params):
return kin_energy(p) + pot_energy_orig(x)
# def phi(x):
# X = jnp.vstack([x[:1, :]*0, x])
# return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
# constraints = get_constraints(N, dim, phi)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, p, params):
return -0.1*p.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=drag, constraints=None)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=maxtraj*runs)
# z_out = sim_orig(R, V)
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
def H_energy_fn(params, graph):
g, g_PE, g_KE = cal_graph(params, graph, eorder=eorder,
useT=True)
return g_PE + g_KE
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
# senders, receivers = [np.array(i)
# for i in Spring_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
# jax.tree_util.tree_map(lambda a: print(a.shape), state_graph.nodes)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
params = loadfile(f"trained_model.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
# z_model_out = sim_model(R, V)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
t = 0.0
for ind in range(len(dataset_states)):
if ind > maxtraj:
break
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
z_out, _ = dataset_states[ind]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[0]
V = pout[0]
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t += end - start
if saveovito:
if ind < 1:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
if plotthings:
if ind<1:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 1, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[0].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
ylabel("Energy", ax=axs[0])
title = f"(HGNN) {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}.png")) # , dpi=500)
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
# , dpi=500)
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
nexp["Perr"] += [RelErr(actual_traj.velocity,
pred_traj.velocity)+1e-30]
fig, axs = panel(1, 1, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[0].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
ylabel("Energy", ax=axs[0])
title = f"HGNN {N}-Spring Exp {ind}"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png")) # , dpi=500)
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)+1e-30]
if ind%10==0:
savefile("trajectories.pkl", trajectories)
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()[2:]
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)[2:]
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)[2:]
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png")) # , dpi=500)
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-spring-zerr/hgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/hgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/hgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/hgnn.txt", [t/maxtraj], delimiter = "\n")
# main(N = 20)
main(N = 5)
| 19,637 | 33.452632 | 245 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-FGNN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, withdata=None, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_hidden_search = 0, hidden = 5, if_nhidden_search = 0, nhidden = 2, if_mpass_search = 0, mpass = 1, if_lr_search = 0, lr = 0.001, if_act_search = 0, if_noisy_data=0):
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"fgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = "0"
if (if_hidden_search == 1):
rstring = "1_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "1_" + str(nhidden)
elif (if_mpass_search == 1):
rstring = "1_" + str(mpass)
elif (if_lr_search == 1):
rstring = "1_" + str(lr)
elif (if_act_search == 1):
rstring = "1_" + str("softplus")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_delta(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
#sim_model = get_forward_sim(
# params=params, force_fn=force_fn_model, runs=runs)
def get_forward_sim_FGN(params = None, run = runs):
@jit
def fn(R, V):
return predition2(R, V, params, acceleration_fn_model, dt, masses, stride=stride, runs=run)
return fn
sim_model = get_forward_sim_FGN(params=params, run=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t=0.0
skip = 0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end-start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs)
ylabel("Energy", ax=axs)
title = f"(FGN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGNN {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"FGNN {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
except:
print("skipped")
#if skip < 20:
skip += 1
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
np.savetxt("../pendulum-zerr/fgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt("../pendulum-herr/fgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt("../pendulum-simulation-time/fgnn.txt", [t/maxtraj], delimiter = "\n")
# main(N = 3)
# main(N = 4)
# main(N = 10)
main(N = 20)
| 19,478 | 32.354452 | 370 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-HGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=4, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, if_noisy_data=0):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, ifDataEfficiency = 0, if_noisy_data=0):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"hgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"2" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(
f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
# def phi(x):
# X = jnp.vstack([x[:1, :]*0, x])
# return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
# constraints = get_constraints(N, dim, phi)
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
# if grid:
# print("It's a grid?")
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# print("It's a random?")
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R, V = jnp.split(Zs[0], 2, axis=0)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
#species = jnp.zeros(N, dtype=int)
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
#print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Zs_dot)]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-body-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
Main()
| 15,645 | 29.982178 | 184 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-LGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=4, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, if_noisy_data=0):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"lgn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
# if grid:
# print("It's a grid?")
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# print("It's a random?")
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R, V = Rs[0], Vs[0]
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1+2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
#print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 100000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
now = time.time()
train_time_arr.append((now - start))
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
np.savetxt(f"../5-spring-training-time/lgn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgn-test.txt", ltarray, delimiter = "\n")
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
np.savetxt(f"../{N}-body-training-time/lgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/lgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/lgnn-test.txt", ltarray, delimiter = "\n")
Main()
| 16,521 | 30.530534 | 162 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-LGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
#import matplotlib.pyplot as plt
#from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, epochs=10000, seed=42, rname=False,dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, ifDataEfficiency = 0, if_lr_search = 0, if_act_search = 0, mpass = 1, if_mpass_search = 0, if_hidden_search = 0, hidden = 5, if_nhidden_search = 0, nhidden = 2, if_noisy_data = 1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"lgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
elif (if_lr_search == 1):
rstring = "0_" + str(lr)
elif (if_act_search == 1):
rstring = "0_softplus"
elif (if_mpass_search == 1):
rstring = "0_" + str(mpass)
elif (if_hidden_search == 1):
rstring = "0_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "0_" + str(nhidden)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = hidden
nhidden = nhidden
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Lparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
if (if_act_search == 1):
g, V, T = cal_graph(params, graph, eorder=eorder, useT=True, act_fn=models.SoftPlus)
else:
g, V, T = cal_graph(params, graph, eorder=eorder, useT=True, mpass=mpass)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
if (if_act_search == 1):
g, V, T = cal_graph(params, graph, eorder=eorder, useT=True, act_fn=models.SoftPlus)
else:
g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
start = time.time()
train_time_arr = []
last_loss= 1000
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
l = l/count
larray += [l]
if epoch % 1 == 0:
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): test={ltarray[-1]}, train={larray[-1]}")
if epoch % 10 == 0:
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if (ifDataEfficiency == 0 and if_lr_search == 0 and if_act_search == 0 and if_mpass_search == 0 and if_hidden_search == 0 and if_noisy_data==0):
np.savetxt(f"../{N}-pendulum-training-time/lgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-training-loss/lgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-training-loss/lgnn-test.txt", ltarray, delimiter = "\n")
if (if_lr_search == 1):
np.savetxt(f"../lr_search/{N}-pendulum-training-time/lgnn_{lr}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/lgnn-train_{lr}.txt", larray, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/lgnn-test_{lr}.txt", ltarray, delimiter = "\n")
if (if_act_search == 1):
np.savetxt(f"../act_search/{N}-pendulum-training-time/lgnn_softplus.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/lgnn-train_softplus.txt", larray, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/lgnn-test_softplus.txt", ltarray, delimiter = "\n")
if (if_mpass_search == 1):
np.savetxt(f"../mpass_search/{N}-pendulum-training-time/lgnn_{mpass}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/lgnn-train_{mpass}.txt", larray, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/lgnn-test_{mpass}.txt", ltarray, delimiter = "\n")
if (if_hidden_search == 1):
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-time/lgnn_{hidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/lgnn-train_{hidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/lgnn-test_{hidden}.txt", ltarray, delimiter = "\n")
if (if_nhidden_search == 1):
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-time/lgnn_{nhidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/lgnn-train_{nhidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/lgnn-test_{nhidden}.txt", ltarray, delimiter = "\n")
main()
| 16,955 | 32.117188 | 316 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CLGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
import time
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, withdata=None, saveovito=1, trainm=1, runs=10, semilog=1, maxtraj=100, plotthings=False, redo=0, if_noisy_data=0):
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"clgn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=constraints,
non_conservative_forces=drag)
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
skip = 0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
try:
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs)
ylabel("Energy", ax=axs)
title = f"(FGN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"LGN {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"LGN {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
except:
print("skipped")
#if skip < 20:
skip += 1
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
np.savetxt(f"../{N}-pendulum-zerr/clgn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/clgn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/clgn.txt", [t/maxtraj], delimiter = "\n")
main(N = 10)
# main(N = 4)
# main(N = 5)
| 19,008 | 32.116725 | 196 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-FGNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, withdata=None, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"fgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_cacceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["L"])
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=None,
non_conservative_forces=None)
#def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+=end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs)
ylabel("Energy", ax=axs)
title = f"(FGN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"FGNODE-traj {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"FGNODE-traj {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/fgnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/fgnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/fgnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
main(N = 5)
| 18,532 | 32.27289 | 219 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-FGNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=4, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=1000, ifDataEfficiency = 0, if_noisy_data=0):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, ifDataEfficiency = ifDataEfficiency, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, ifDataEfficiency = 0, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"fgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
# if grid:
# print("It's a grid?")
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# print("It's a random?")
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
R, V = Rs[0], Vs[0]
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
hidden_dim = [16, 16]
edgesize = 1
nodesize = 1 + 2*dim
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_acceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = jit(accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-body-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
Main()
| 16,177 | 30.413592 | 184 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-LGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import time
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
epochs=10000
seed=100
rname=False
dt=1.0e-3
ifdrag=0
stride=100
trainm=1
lr=0.001
withdata=None
datapoints=None
batch_size=20
ifDataEfficiency = 0
mpass=1
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"peridynamics"
TAG = f"LGNN"
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States(graphs).get_array()
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(allRs))
Nts = len(allRs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
################################################
################### ML Model ###################
################################################
dim = 3
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 8
Nei = 8
Nei_ = 5 ##Nei for mass
hidden = 8
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key) #
Nei = Nei
fb_params = mlp(Ef, Eei, key) #
fv_params = mlp(Nei+Eei, Nei, key) #
fe_params = mlp(Nei, Eei, key) #
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key) #
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_, 5, 1], key, affine=[True]) #
Lparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
#params = {"Fqqdot": Fparams}
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
graph = jraph.GraphsTuple(**my_graph0_disc)
# def _force_fn(species):
# state_graph = graph
# def apply(R, V, params):
# state_graph.nodes.update(position=R)
# state_graph.nodes.update(velocity=V)
# return graph_force_fn(params, state_graph)
# return apply
# apply_fn = _force_fn(species)
# # v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
# apply_fn(R, V, Fparams)
# def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# acceleration_fn_model = F_q_qdot
# # acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# # constraints=None)
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=None,
useT=True, useonlyedge=False)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=None,
useT=True, useonlyedge=False)
return kin_energy(graph.nodes["velocity"]) - V
def energy_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
# print(R)
# #print(V)
# print(Lmodel(R, V, params))
print(acceleration_fn_model(R,V, params))
# sys.exit()
# print(Lmodel(R,V,params))
# sys.exit()
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
# loss_fn(params, Rs[:1], Vs[:1], Fs[:1])
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
start = time.time()
train_time_arr = []
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
larray += [l_]
ltarray += [loss_fn(params, Rst, Vst ,Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
metadata = {
"savedat": epoch,
# "mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/lgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgnn-test.txt", ltarray, delimiter = "\n")
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"perignode_trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"perignode_trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../peridynamics-training-time/lgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../peridynamics-training-loss/lgnn-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
| 13,916 | 27.344196 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-LGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, if_noisy_data = 1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, error_fn=error_fn, mpass=mpass,
dt=dt, ifdrag=ifdrag, trainm=trainm, stride=stride, lr=lr, datapoints=datapoints,
batch_size=batch_size, saveat=saveat, if_noisy_data = if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, withdata=None, datapoints=None, batch_size=100, config=None, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"lgn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = jit(acc_fn(species))
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
print(acceleration_fn_model(R, V, params))
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
# LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000000
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs}: train={larray[-1]}, test={ltarray[-1]}")
# print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % 1 == 0:
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
now = time.time()
train_time_arr.append((now - start))
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
# if epoch % 10000 == 0:
# savefile(f"trained_model_{ifdrag}_{trainm}_low_{epoch}.dil",
# params, metadata=metadata)
plt.clf()
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
np.savetxt("../3-pendulum-training-time/lgn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/lgn-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/lgn-test.txt", ltarray, delimiter = "\n")
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
fire.Fire(Main)
| 15,541 | 30.146293 | 142 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
from turtle import hideturtle
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
import time
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
class Datastate:
def __init__(self, model_states):
self.position = model_states.position[:-1]
self.velocity = model_states.velocity[:-1]
self.force = model_states.force[:-1]
self.mass = model_states.mass[:-1]
self.index = 0
self.change_position = model_states.position[1:]-model_states.position[:-1]
self.change_velocity = model_states.velocity[1:]-model_states.velocity[:-1]
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, if_lr_search = 0, if_act_search = 0, if_mpass_search=0, if_hidden_search = 0, hidden = 16, if_nhidden_search = 0, nhidden=2, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, error_fn=error_fn, mpass=mpass,
dt=dt, ifdrag=ifdrag, trainm=trainm, stride=stride, lr=lr, datapoints=datapoints,
batch_size=batch_size, saveat=saveat, if_lr_search=if_lr_search, if_act_search=if_act_search, if_mpass_search=if_mpass_search, if_hidden_search=if_hidden_search, hidden=hidden, if_nhidden_search=if_nhidden_search, nhidden=nhidden, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, if_lr_search=0, if_act_search = 0, if_mpass_search=0, if_hidden_search = 0, hidden = 5, if_nhidden_search=0, nhidden=2, if_noisy_data = 1):
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"fgnn"
if (if_lr_search == 1):
out_dir = f"../lr_search"
elif (if_act_search == 1):
out_dir = f"../act_search"
elif (if_mpass_search == 1):
out_dir = f"../mpass_search"
elif (if_hidden_search == 1):
out_dir = f"../mlp_hidden_search"
elif (if_nhidden_search == 1):
out_dir = f"../mlp_nhidden_search"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (if_lr_search == 1):
rstring = "1_" + str(lr)
elif (if_act_search == 1):
rstring = "1_softplus"
elif (if_mpass_search == 1):
rstring = "1_" + str(mpass)
elif (if_hidden_search == 1):
rstring = "1_" + str(hidden)
elif (if_nhidden_search == 1):
rstring = "1_" + str(nhidden)
else:
rstring = 0 if (tag != "data") else 1
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{1}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# if len(dataset_states)*model_states.position.shape[0] != 10000:
# raise Exception("Invalid number of data points")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
# Rs = Rs.reshape(-1, N, dim)
# Vs = Vs.reshape(-1, N, dim)
# Fs = Fs.reshape(-1, N, dim)
Rs, Vs, Fs, Rds, Vds = States_modified().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
Rds = Rds.reshape(-1, N, dim)
Vds = Vds.reshape(-1, N, dim)
# mask = np.random.choice(len(Rs), len(Rs), replace=False)
# allRs = Rs[mask]
# allVs = Vs[mask]
# allFs = Fs[mask]
if (if_noisy_data == 1):
Rs = np.array(Rs)
Rds = np.array(Rds)
Fs = np.array(Fs)
Vs = np.array(Vs)
Vds = np.array(Vds)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Rds[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Vds[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Rds = jnp.array(Rds)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
Vds = jnp.array(Vds)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
allRds = Rds[mask]
allVds = Vds[mask]
# Ntr = int(0.75*len(Rs))
# Nts = len(Rs) - Ntr
# Rs = allRs[:Ntr]
# Vs = allVs[:Ntr]
# Fs = allFs[:Ntr]
# Rst = allRs[Ntr:]
# Vst = allVs[Ntr:]
# Fst = allFs[Ntr:]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rds = allRds[:Ntr]
Vds = allVds[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
Rdst = allRds[Ntr:]
Vdst = allVds[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [hidden for i in range(nhidden)]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
delta_params = initialize_mlp([ne, *hidden_dim, dim*2], key),
)
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
if (if_act_search == 1):
acc = fgn.cal_delta_temp(params, graph, mpass=1)
else:
acc = fgn.cal_delta(params, graph, mpass=mpass)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers]))
return acceleration_fn(params, state_graph)
return apply
apply_fn = jit(acc_fn(species))
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
# print(acceleration_fn_model(R, V, params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
# LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Rds, Vds):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, jnp.concatenate([Rds,Vds], axis=2))
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
# bRs, bVs, bFs = batching(Rs, Vs, Fs,
# size=min(len(Rs), batch_size))
bRs, bVs, bRds, bVds = batching(Rs, Vs, Rds, Vds,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
# larray += [loss_fn(params, Rs, Vs, Fs)]
# ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs}): train={larray[-1]}, test={ltarray[-1]}")
# print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bRds, bVds):
optimizer_step += 1
opt_state, params, l_ = step(optimizer_step, (opt_state, params, 0), *data)
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % 1 == 0:
larray += [loss_fn(params, Rs, Vs, Rds, Vds)]
ltarray += [loss_fn(params, Rst, Vst, Rdst, Vdst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (if_lr_search == 1):
np.savetxt(f"../lr_search/{N}-pendulum-training-time/fgnn_{lr}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/fgnn-train_{lr}.txt", larray, delimiter = "\n")
np.savetxt(f"../lr_search/{N}-pendulum-training-loss/fgnn-test_{lr}.txt", ltarray, delimiter = "\n")
elif (if_act_search == 1):
np.savetxt(f"../act_search/{N}-pendulum-training-time/fgnn_softplus.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/fgnn-train_softplus.txt", larray, delimiter = "\n")
np.savetxt(f"../act_search/{N}-pendulum-training-loss/fgnn-test_softplus.txt", ltarray, delimiter = "\n")
elif (if_mpass_search == 1):
np.savetxt(f"../mpass_search/{N}-pendulum-training-time/fgnn_{mpass}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/fgnn-train_{mpass}.txt", larray, delimiter = "\n")
np.savetxt(f"../mpass_search/{N}-pendulum-training-loss/fgnn-test_{mpass}.txt", ltarray, delimiter = "\n")
if (if_hidden_search == 1):
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-time/fgnn_{hidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/fgnn-train_{hidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_hidden_search/{N}-pendulum-training-loss/fgnn-test_{hidden}.txt", ltarray, delimiter = "\n")
if (if_nhidden_search == 1):
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-time/fgnn_{nhidden}.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/fgnn-train_{nhidden}.txt", larray, delimiter = "\n")
np.savetxt(f"../mlp_nhidden_search/{N}-pendulum-training-loss/fgnn-test_{nhidden}.txt", ltarray, delimiter = "\n")
else:
np.savetxt("../3-pendulum-training-time/fgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/fgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/fgnn-test.txt", ltarray, delimiter = "\n")
# Main()
main()
| 20,161 | 32.942761 | 286 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-HGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dt=1.0e-3, useN=5, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"hgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = "0" if (tag != "data" ) else "2"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
z_out, zdot_out = dataset_states[0]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[0]
V = pout[0]
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
# senders, receivers = get_fully_connected_senders_and_receivers(N)
# eorder = get_fully_edge_order(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
senders = jnp.array(senders)
receivers = jnp.array(receivers)
# R = model_states.position[0]
# V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{z_out.shape}")
N, dim = xout.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
return jax.vmap(partial(src.hamiltonian.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params):
return kin_energy(p) + pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=drag, constraints=None)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
if ind > maxtraj+skip:
break
_ind = ind*runs
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
z_out, _ = dataset_states[ind]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[0]
V = pout[0]
try:
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
#raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"HGN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"HGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"HGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"HGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
print("skipped")
if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-spring-zerr/hgn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/hgn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/hgn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/hgn.txt", [t/maxtraj], delimiter = "\n")
# main(N = 20)
main(N = 5)
| 21,151 | 32.8432 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CGNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp,batch_MSE
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
N=3
epochs=10000
seed=42
rname=True
dt=1.0e-5
ifdrag=0
trainm=1
stride=1000
lr=0.001
withdata=None
datapoints=None
batch_size=100
ifDataEfficiency = 0
if_noisy_data = 1
# def main(N=2, epochs=10000, seed=42, rname=True,
# dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, withdata=None, datapoints=None, batch_size=1000):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"cgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
# Rs = Rs.reshape(-1, N, dim)
# Vs = Vs.reshape(-1, N, dim)
# Fs = Fs.reshape(-1, N, dim)
Rs = Rs.reshape(-1, 1, N, dim)
Vs = Vs.reshape(-1, 1, N, dim)
Fs = Fs.reshape(-1, 1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
print(f"training data shape(Rs): {Rs.shape}")
print(f"test data shape(Rst): {Rst.shape}")
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
Nei_ = 5 # for mass learning
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key) #
# Nei = Nei+dim+dim
fb_params = mlp(Ef, Eei, key) #
fv_params = mlp(Nei+Eei, Nei, key) #
fe_params = mlp(Nei, Eei, key) #
ff1_params = mlp(Eei, dim, key)
ff2_params = mlp(Nei, dim, key) #
ff3_params = mlp(Nei+dim+dim, dim, key)
ke_params = initialize_mlp([1+Nei, 5, 5, 1], key, affine=[True])
mass_params = initialize_mlp([Nei_,5, 1], key, affine=[True]) #
Fparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params,
mass=mass_params)
params = {"Fqqdot": Fparams}
def graph_force_fn(params, graph):
_GForce = cdgnode_cal_force_q_qdot(params, graph, eorder=eorder,
useT=True)
return _GForce
R, V = Rs[0][0], Vs[0][0]
def force_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = force_fn(species)
# v_apply_fn = vmap(apply_fn, in_axes=(0, 0, None))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# print(qddot(R,V,params))
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = qddot
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=constraints,
non_conservative_forces=None)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
x=Rs[0]
v=Vs[0]
# F_q_qdot(x[0], v[0], params)
# acceleration_fn_model(x[0], v[0], params)
# hhhh = v_v_acceleration_fn_model(Rs, Vs, params)
# # print(hhhh)
# print(hhhh.shape)
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count+=1
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
if epoch % 10 == 0:
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 100 == 0:
metadata = {
"savedat": epoch,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/cgnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/cgnode-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/cgnode-test.txt", ltarray, delimiter = "\n")
# fire.Fire(main)
# x=R
# v=V
# F_q_qdot(x, v, params)
# nn = np.prod(R.shape)
# params1 = initialize_mlp([nn, 100, nn], key)
# params = {
# 0: params,
# 1: params1
# }
# def MLP_(params, s, v):
# xx = s + v
# return forward_pass(params, xx, activation_fn=SquarePlus)
# def qddot(x, v, params):
# S = F_q_qdot(x,v,params[0])
# return (S.flatten() - MLP_(params[1], S.flatten(), v.flatten())).reshape(-1, dim)
| 14,134 | 26.661448 | 114 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CHGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
#from sklearn.metrics import r2_score
#from sympy import fu
import time
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn1, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.hamiltonian import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dt=1.0e-5, useN=3, withdata=None, datapoints=100, mpass=1, grid=False, stride=1000, ifdrag=0, seed=42, rname=0, saveovito=0, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if useN is None:
useN = N
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"chgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dim=2
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
# senders = jnp.array(senders)
# receivers = jnp.array(receivers)
# z_out, zdot_out = model_states
# R,V = jnp.split(z_out[0], 2, axis=0)
# print(
# f"Total number of training data points: {len(dataset_states)}x{z_out.shape[0]}")
# N2, dim = z_out.shape[-2:]
# N=int(N2/2)
# species = jnp.zeros(N, dtype=int)
# masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
pot_energy_orig = PEF
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, v, params):
return kin_energy(v) + pot_energy_orig(x)
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=drag, constraints=constraints)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
# t = jnp.linspace(0.0, runs*dt, runs)
# ode.odeint(zdot_func, z0(R, V), t)
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=runs)
# z_out = sim_orig(R, V)
# print(z_out)
# def simGT():
# print("Simulating ground truth ...")
# _traj = sim_orig(R, V)
# metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
# savefile("gt_trajectories.pkl",
# _traj, metadata=metadata)
# return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
def acceleration_fn(params, graph):
acc = fgn1.cal_l(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["H"])
params = loadfile(f"trained_model.dil", trained=useN)[0]
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force = get_zdot_lambda(N, dim, hamiltonian=Hmodel, drag=None, constraints=constraints)
zdot_model = jit(zdot_model)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
print(sim_model(R,V).shape)
print(sim_orig(R,V).shape)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
# Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
# Es_pred_fn(pred_traj)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
"simulation_time": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
skip=0
t=0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# z_out, _ = dataset_states[ind]
# xout, pout = jnp.split(z_out, 2, axis=1)
# R = xout[0]
# V = pout[0]
# try:
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
nexp["simulation_time"] += [end-start]
t += end -start
if saveovito:
if ind<1:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
if plotthings:
if ind<1:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
# Es_pred = Es_pred_fn(traj)
# Es_pred = Es_pred - Es_pred[0] + Es[0]
# fig, axs = panel(1, 1, figsize=(20, 5))
# axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
# # axs[0].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
# plt.legend(bbox_to_anchor=(1, 1), loc=2)
# axs[0].set_facecolor("w")
# xlabel("Time step", ax=axs[0])
# ylabel("Energy", ax=axs[0])
# title = f"(HGNN) {N}-Pendulum Exp {ind}"
# plt.title(title)
# plt.savefig(_filename(title.replace(
# " ", "-")+f"_{key}.png")) # , dpi=500)
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
# , dpi=500)
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = panel(1, 1, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[0].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
ylabel("Energy", ax=axs[0])
title = f"HGNN {N}-Pendulum Exp {ind}"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png")) # , dpi=500)
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
herrrr = RelErr(H, Hhat)
herrrr = herrrr.at[0].set(herrrr[1])
nexp["Herr"] += [herrrr]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
zerrrr = RelErr(actual_traj.position, pred_traj.position)
zerrrr = zerrrr.at[0].set(zerrrr[1])
nexp["Zerr"] += [zerrrr]
# actual_traj.velocity[1:]
# print(actual_traj.velocity[1:], pred_traj.velocity[1:])
# perrrr = RelErr(actual_traj.velocity[1:], pred_traj.velocity[1:])
# perrrr = perrrr.at[0].set(perrrr[1])
# nexp["Perr"] += [perrrr]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [ac_mom - pr_mom]
if ind%1==0:
savefile("trajectories.pkl", trajectories)
savefile(f"error_parameter.pkl", nexp)
# except:
# print("skipped")
# if skip < 20:
# skip += 1
print(f'skipped loop: {skip}')
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png")) # , dpi=500)
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png")) # , dpi=500)
make_plots(
nexp, "Zerr", yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/chgn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/chgn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/chgn.txt", [t/maxtraj], delimiter = "\n")
# main(N = 4)
main(N = 5)
| 20,725 | 32.537217 | 248 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-data.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
#from shadow.plot import *
#from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from psystems.npendulum import PEF, get_init, hconstraints
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import fire
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def main(N=3, dim=2, saveat=100, nconfig=100, ifdrag=0, runs=100):
tag = f"{N}-Pendulum-data"
seed = 42
out_dir = f"../noisy_data"
rname = False
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0_10000"
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
init_confs = [get_init(N, dim=dim) for i in range(nconfig)]
print("Saving init configs...")
savefile(f"initial-configs_{ifdrag}.pkl", init_confs)
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
dt = 1.0e-5
stride = 1000
lr = 0.001
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
@jit
def forward_sim(R, V):
return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=runs)
@jit
def v_forward_sim(init_conf):
return vmap(lambda x: forward_sim(x[0], x[1]))(init_conf)
################################################
############### DATA GENERATION ################
################################################
print("Data generation ...")
ind = 0
dataset_states = []
t = 0.0
for R, V in init_confs:
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
start = time.time()
model_states = forward_sim(R, V)
end = time.time()
t += end - start
dataset_states += [model_states]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
def cal_energy(states):
KE = vmap(kin_energy)(states.velocity)
PE = vmap(pot_energy_orig)(states.position)
L = vmap(Lactual, in_axes=(0, 0, None))(
states.position, states.velocity, None)
return jnp.array([PE, KE, L, KE+PE]).T
print("plotting energy...")
ind = 0
for states in dataset_states:
ind += 1
Es = cal_energy(states)
fig, axs = plt.subplots(1, 1, figsize=(20, 5))
plt.plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
plt.legend(bbox_to_anchor=(1, 1))
plt.ylabel("Energy")
plt.xlabel("Time step")
title = f"{N}-Pendulum random state {ind} {ifdrag}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "_")+".png"), dpi=300)
if ind >= 10:
break
np.savetxt("../3-pendulum-simulation-time/simulation.txt", [t/nconfig], delimiter = "\n")
fire.Fire(main)
| 5,891 | 28.024631 | 97 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-GNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
dt=1.0e-3
# useN=None
withdata=None
datapoints=None
# mpass=1
# grid=False
stride=100
ifdrag=0
seed=42
rname=0
saveovito=1
trainm=1
runs=100
semilog=1
maxtraj=10
plotthings=False
redo=0
mpass=1
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
PSYS = f"peridynamics"
TAG = f"GNODE"
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[0]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
origin_acceleration = []
origin_mass = []
origin_position = []
origin_velocity = []
import pandas as pd
for num in range(1000):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-MCGNODE/test/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
origin_acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
origin_mass += [np.array(split_df[[5]]).astype('float64')]
origin_position += [np.array(split_df[[6,7,8]]).astype('float64')]
origin_velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
origin_Rs = jnp.array(origin_position)
origin_Vs = jnp.array(origin_velocity)
origin_Fs = jnp.array(origin_acceleration)
origin_mass = jnp.array(origin_mass)
################################################
################### ML Model ###################
################################################
# def graph_force_fn(params, graph):
# _GForce = a_gnode_cal_force_q_qdot(params, graph, eorder=None,
# useT=True)
# return _GForce
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
mask = my_graph0_disc['senders'] != my_graph0_disc['receivers']
my_graph0_disc.update({"senders": my_graph0_disc['senders'][mask]})
my_graph0_disc.update({"receivers": my_graph0_disc['receivers'][mask]})
my_graph0_disc.update({"n_edge": mask.sum()})
graph = jraph.GraphsTuple(**my_graph0_disc)
def graph_force_fn(params, graph):
_GForce = a_cdgnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
def _force_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
acceleration_fn_model = F_q_qdot
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
#v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# constraints=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)#*mass.reshape(-1, 1)
# return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"perignode_trained_model_low.dil")[0]
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_model = get_forward_sim(params=params, force_fn=force_fn_model, runs=runs)
# my_sim = sim_model(R, V)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(ya, yp):
return norm(ya-yp)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"AbsZerr":[],
"Perr": [],
"AbsPerr": []
}
import time
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R, V = Rs[runs*ind], Vs[runs*ind]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+= end - start
# ll = [state for state in NVEStates(pred_traj)]
# save_ovito(f"pred_{ind}.data",[state for state in NVEStates(pred_traj)], lattice="")
# if ind>20:
# break
sim_size = runs
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [origin_Rs[runs*ind:runs+runs*ind]]
nexp["Zerr"] += [RelErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
# nexp["AbsZerr"] += [AbsErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
nexp["AbsZerr"] += [jnp.abs(norm(origin_Rs[runs*ind:runs+runs*ind]) - norm(pred_traj.position))]
ac_mom = jnp.square(origin_Vs[runs*ind:runs+runs*ind].sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
# nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind], pred_traj.velocity)])
nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind][6:], pred_traj.velocity[6:])])
nexp["AbsPerr"] += ([jnp.abs(ac_mom - pr_mom)])
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||\hat{p}-p||_2}{||\hat{p}||_2+||p||_2}$")
np.savetxt(f"../peridynamics-simulation-time/gnode.txt", [t/maxtraj], delimiter = "\n")
# make_plots(nexp, "AbsZerr", yl=r"${||\hat{z}-z||_2}$")
# make_plots(nexp, "Herr",
# yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
# make_plots(nexp, "AbsHerr", yl=r"${||H(\hat{z})-H(z)||_2}$")
| 11,310 | 27.854592 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-LGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, epochs=10000, seed=42, rname=False, saveat=10,
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=100, ifDataEfficiency = 0, if_noisy_data = 1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"lgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
if grid:
print("It's a grid?")
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("It's a random?")
# senders, receivers = get_fully_connected_senders_and_receivers(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Lparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
useT=True, useonlyedge=True)
return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
L_energy_fn(Lparams, state_graph)
def energy_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_{ifdrag}_{trainm}_low.dil",
params, metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt(f"../5-spring-training-time/lgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgnn-test.txt", ltarray, delimiter = "\n")
main()
| 14,856 | 29.382413 | 173 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-FGNODE.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
#import matplotlib.pyplot as plt
from shadow.plot import *
# from sklearn.metrics import r2_score
# from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints, pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10, dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, ifDataEfficiency = 0, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, error_fn=error_fn, mpass=mpass,
dt=dt, ifdrag=ifdrag, trainm=trainm, stride=stride, lr=lr, datapoints=datapoints,
batch_size=batch_size, saveat=saveat, ifDataEfficiency = 0, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=False, error_fn="L2error", mpass=1, saveat=10,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"fgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
mass_params = initialize_mlp([ne, 5, 1], key),
)
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
species = jnp.array(species).reshape(-1, 1)
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_cacceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["L"])
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=None,
non_conservative_forces=None)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
v_v_acceleration_fn_model = vmap(v_acceleration_fn_model, in_axes=(0, 0, None))
params = {"L": Lparams}
print("here")
print(acceleration_fn_model(R, V, params))
print("dom")
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
#v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
# LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs}: train={larray[-1]}, test={ltarray[-1]}")
# print_loss()
start = time.time()
train_time_arr = []
for epoch in range(epochs):
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
if epoch % 1 == 0:
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1,1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
now = time.time()
train_time_arr.append((now - start))
plt.clf()
fig, axs = panel(1,1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/fgnode.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/fgnode-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/fgnode-test.txt", ltarray, delimiter = "\n")
Main()
| 16,215 | 30.610136 | 216 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-HGNN.py | ################################################
################## IMPORT ######################
################################################
from posixpath import split
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import ode
# from shadow.plot import *
# from shadow.plot import panel
import matplotlib.pyplot as plt
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N = 4, epochs = 10000, seed = 42, rname = False, saveat = 100, dt = 1.0e-3, stride = 100, ifdrag = 0, trainm = 1, grid = False, mpass = 1, lr = 0.001, withdata = None, datapoints = None, batch_size = 100, ifDataEfficiency = 0, if_noisy_data = 0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname, dt, lr, ifdrag, batch_size, namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-body"
TAG = f"hgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "2" if (tag == "data") else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
################################################
################## SYSTEM ######################
################################################
# def phi(x):
# X = jnp.vstack([x[:1, :]*0, x])
# return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
# constraints = get_constraints(N, dim, phi)
################################################
################### ML Model ###################
################################################
# if grid:
# print("It's a grid?")
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# print("It's a random?")
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Hparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
def H_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T + V
R, V = jnp.split(Zs[0], 2, axis=0)
species = jnp.zeros(N, dtype=int)
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
H_energy_fn(Hparams, state_graph)
def energy_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
start = time.time()
train_time_arr = []
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
l = l/len(bRs)
if epoch % 1 == 0:
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Zs_dot)
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
now = time.time()
train_time_arr.append((now - start))
fig, axs = plt.subplots(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if ifDataEfficiency ==0:
np.savetxt(f"../{N}-body-training-time/hgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../{N}-body-training-loss/hgnn-test.txt", ltarray, delimiter = "\n")
main()
| 12,752 | 28.727273 | 255 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-data-HGNN.py | ################################################
################## IMPORT ######################
################################################
import sys
import fire
import os
from datetime import datetime
from functools import partial, wraps
from psystems.npendulum import get_init
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.experimental import ode
# from shadow.plot import panel
import matplotlib.pyplot as plt
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def ps(*args):
for i in args:
print(i.shape)
# N = 1
# dim = 2
# nconfig = 100
# saveat = 10
# ifdrag = 0
# dt = 0.01
# runs = 1000
def main(N=3, dim=2, nconfig=100, saveat=10, ifdrag=0, dt=0.01, runs=100):
tag = f"{N}-Pendulum-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "2_" + str(nconfig * (runs))
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
init_confs = [get_init(N, dim=dim) for i in range(nconfig)]
print("Saving init configs...")
savefile = OUT(src.io.savefile)
savefile(f"initial-configs_{ifdrag}.pkl", init_confs)
################################################
################## SYSTEM ######################
################################################
def drag(x, p, params):
return -0.1 * (p*p).sum()
def KE(p):
return (p*p).sum()/2
def V(x, params):
return 10*x[:, 1].sum()
def hamiltonian(x, p, params): return KE(p) + V(x, params)
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian, drag=None, constraints=constraints)
def zdot_func(z, t):
x, p = jnp.split(z, 2)
return zdot(x, p, None)
def get_z(x, p):
return jnp.vstack([x, p])
################################################
############### DATA GENERATION ################
################################################
def zz(out, ind=None):
if ind is None:
x, p = jnp.split(out, 2, axis=1)
return x, p
else:
return jnp.split(out, 2, axis=1)[ind]
t = jnp.linspace(0.0, runs*dt, runs)
print("Data generation ...")
ind = 0
dataset_states = []
for x, p in init_confs:
z_out = ode.odeint(zdot_func, get_z(x, p), t)
xout, pout = zz(z_out)
zdot_out = jax.vmap(zdot, in_axes=(0, 0, None))(xout, pout, None)
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
# my_state = States()
# my_state.position = xout
# my_state.velocity = pout
# my_state.force = zdot_out
# my_state.mass = jnp.ones(xout.shape[0])
# model_states = my_state
model_states = z_out, zdot_out
dataset_states += [model_states]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("plotting traj and Forces...")
ind = 0
for states in dataset_states:
z_out, _ = states
xout, pout = zz(z_out)
# xout = states.position
# pout = states.velocity
ind += 1
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
for i in range(N):
axs[0].scatter(xout[:, i, 0], xout[:, i, 1], c=t,
s=10*(i+1), label=f"pend: {i+1}")
axs[0].set_xlabel("X-position")
axs[0].set_ylabel("Y-position")
axs[0].axis("square")
force = jax.vmap(lamda_force, in_axes=(0, 0, None))(xout, pout, None)
for i in range(N):
axs[1].scatter(force[:, N+i, 0], force[:, N+i, 1], c=t,
s=10*(i+1), label=f"pend: {i+1}")
axs[1].set_xlabel(r"F$_x$ (constraints)")
axs[1].set_ylabel(r"F$_y$ (constraints)")
axs[1].axis("square")
title = f"{N}-Pendulum random state {ind} {ifdrag}"
plt.suptitle(title, va="bottom")
plt.savefig(_filename(title.replace(" ", "_")+".png"), dpi=300)
if ind >= 10:
break
fire.Fire(main)
| 5,374 | 27.141361 | 101 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CHGNN.py | ################################################
################## IMPORT ######################
################################################
from posixpath import split
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.experimental import ode
from shadow.plot import *
# from shadow.plot import panel
#import matplotlib.pyplot as plt
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
from src.hamiltonian import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N = 3, epochs = 10000, seed = 42, rname = False,
dt = 1.0e-5, ifdrag = 0, trainm = 1, stride=1000, lr = 0.001, withdata = None, datapoints = None, batch_size = 100, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname, dt, lr, ifdrag, batch_size, namespace=locals())
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Pendulum"
TAG = f"chgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
rstring = "2" if (tag == "data") else "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{2}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
z_out, zdot_out = model_states
print(
f"Total number of data points: {len(dataset_states)}x{z_out.shape[0]}")
N2, dim = z_out.shape[-2:]
N = N2//2
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
array = jnp.array([jnp.array(i) for i in dataset_states])
Zs = array[:, 0, :, :, :]
Zs_dot = array[:, 1, :, :, :]
Zs = Zs.reshape(-1, N2, dim)
Zs_dot = Zs_dot.reshape(-1, N2, dim)
if (if_noisy_data == 1):
Zs = np.array(Zs)
Zs_dot = np.array(Zs_dot)
np.random.seed(100)
for i in range(len(Zs)):
Zs[i] += np.random.normal(0,1,1)
Zs_dot[i] += np.random.normal(0,1,1)
Zs = jnp.array(Zs)
Zs_dot = jnp.array(Zs_dot)
mask = np.random.choice(len(Zs), len(Zs), replace=False)
allZs = Zs[mask]
allZs_dot = Zs_dot[mask]
Ntr = int(0.75*len(Zs))
Nts = len(Zs) - Ntr
Zs = allZs[:Ntr]
Zs_dot = allZs_dot[:Ntr]
Zst = allZs[Ntr:]
Zst_dot = allZs_dot[Ntr:]
################################################
################## SYSTEM ######################
################################################
def phi(x):
X = jnp.vstack([x[:1, :]*0, x])
return jnp.square(X[:-1, :] - X[1:, :]).sum(axis=1) - 1.0
constraints = get_constraints(N, dim, phi)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Hparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
def H_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T + V
R, V = jnp.split(Zs[0], 2, axis=0)
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return H_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params):
return apply_fn(x, v, params["H"])
params = {"H": Hparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, p, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, p, params):
return vmap(nndrag, in_axes=(0, None))(p.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
# def external_force(x, p, params):
# F = 0*p
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=None, constraints=constraints,external_force=None)
v_zdot_model = vmap(zdot_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Zs_dot):
pred = v_zdot_model(Rs, Vs, params)
return MSE(pred, Zs_dot)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
Rs, Vs = jnp.split(Zs, 2, axis=1)
Rst, Vst = jnp.split(Zst, 2, axis=1)
bRs, bVs, bZs_dot = batching(Rs, Vs, Zs_dot,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
start = time.time()
train_time_arr = []
last_loss = 1000
for epoch in range(epochs):
l = 0.0
for data in zip(bRs, bVs, bZs_dot):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
l = l/len(bRs)
if epoch % 1 == 0:
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Zs_dot)
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Zst_dot)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
if epoch % 10 == 0:
# print(
# f"Epoch: {epoch}/{epochs} Loss (MSE): train={larray[-1]}, test={ltarray[-1]}")
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/chgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/chgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/chgnn-test.txt", ltarray, delimiter = "\n")
main(N = 4)
main(N = 5)
| 12,543 | 26.569231 | 164 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CLGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
#import matplotlib.pyplot as plt
#from torch import batch_norm_gather_stats_with_counts
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, epochs=10000, seed=42, rname=False,
dt=1.0e-5, ifdrag=0, trainm=1, stride=1000, lr=0.001, datapoints=None, batch_size=100, ifDataEfficiency = 0, if_noisy_data = 1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(N, epochs, seed, rname,
dt, stride, lr, ifdrag, batch_size,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"clgnn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(
f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
Ef = 1 # eij dim
Nf = dim
Oh = 1
Eei = 5
Nei = 5
hidden = 5
nhidden = 2
def get_layers(in_, out_):
return [in_] + [hidden]*nhidden + [out_]
def mlp(in_, out_, key, **kwargs):
return initialize_mlp(get_layers(in_, out_), key, **kwargs)
# # fne_params = mlp(Oh, Nei, key)
fneke_params = initialize_mlp([Oh, Nei], key)
fne_params = initialize_mlp([Oh, Nei], key)
fb_params = mlp(Ef, Eei, key)
fv_params = mlp(Nei+Eei, Nei, key)
fe_params = mlp(Nei, Eei, key)
ff1_params = mlp(Eei, 1, key)
ff2_params = mlp(Nei, 1, key)
ff3_params = mlp(dim+Nei, 1, key)
ke_params = initialize_mlp([1+Nei, 10, 10, 1], key, affine=[True])
Lparams = dict(fb=fb_params,
fv=fv_params,
fe=fe_params,
ff1=ff1_params,
ff2=ff2_params,
ff3=ff3_params,
fne=fne_params,
fneke=fneke_params,
ke=ke_params)
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return kin_energy(graph.nodes["velocity"]) - V
R, V = Rs[0], Vs[0]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=constraints,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
def gloss(*args):
return value_and_grad(loss_fn)(*args)
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
# grads_ = jax.tree_map(partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
start = time.time()
train_time_arr = []
last_loss= 1000
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# opt_state, params, l = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
larray += [l]
if epoch % 1 == 0:
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print(
f"Epoch: {epoch}/{epochs} Loss (MSE): test={ltarray[-1]}, train={larray[-1]}")
if epoch % 10 == 0:
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
now = time.time()
train_time_arr.append((now - start))
fig, axs = panel(1, 1)
plt.semilogy(larray, label="Training")
plt.semilogy(ltarray, label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata={"savedat": epoch})
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata={"savedat": epoch})
if (ifDataEfficiency == 0):
np.savetxt("../3-pendulum-training-time/clgnn.txt", train_time_arr, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/clgnn-train.txt", larray, delimiter = "\n")
np.savetxt("../3-pendulum-training-loss/clgnn-test.txt", ltarray, delimiter = "\n")
main()
| 13,992 | 29.092473 | 137 | py |
benchmarking_graph | benchmarking_graph-main/scripts/peridynamics-FGNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
from sklearn.metrics import r2_score
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# import pickle
# data = pickle.load(open('../results/LJ-data/0/graphs_dicts.pkl','rb'))[0]
# dd = data[0]['nodes']['position']
# data[1]
acceleration = []
damage = []
id = []
mass = []
position = []
type = []
velocity = []
volume = []
import pandas as pd
for num in (np.linspace(0,5000,251).astype('int')):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-data/datafiles/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
damage += [np.array(split_df[[3]]).astype('float64')]
id += [np.array(split_df[[4]]).astype('float64')]
mass += [np.array(split_df[[5]]).astype('float64')]
position += [np.array(split_df[[6,7,8]]).astype('float64')]
type += [np.array(split_df[[9]]).astype('float64')]
velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
volume += [np.array(split_df[[13]]).astype('float64')]
Rs = jnp.array(position)
Vs = jnp.array(velocity)
Fs = jnp.array(acceleration)
o_position = position[0]/1.1
N,dim = o_position.shape
species = jnp.zeros(N, dtype=int)
def displacement(a, b):
return a - b
# make_graph(o_position,displacement[0],species=species,atoms={0: 125},V=velocity[0],A=acceleration[0],mass=mass[0],cutoff=3.0)
my_graph0_disc = make_graph(o_position,displacement,atoms={0: 125},cutoff=3.0)
senders = my_graph0_disc['senders']
receivers = my_graph0_disc['receivers']
dt=1.0e-3
# useN=None
withdata=None
datapoints=None
# mpass=1
# grid=False
stride=100
ifdrag=0
seed=42
rname=0
saveovito=1
trainm=1
runs=100
semilog=1
maxtraj=10
plotthings=True
redo=0
# def main(N=5, epochs=10000, seed=42, rname=True, dt=1.0e-3, ifdrag=0, stride=100, trainm=1, lr=0.001, withdata=None, datapoints=None, batch_size=100):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
PSYS = f"peridynamics"
TAG = f"FGNODE"
out_dir = f"../results"
randfilename = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[0]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
# def displacement(a, b):
# return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# try:
# graphs = loadfile(f"env_1_step_0.jld.data", tag="data")
# except:
# raise Exception("Generate dataset first.")
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# peridynamics_sim
origin_acceleration = []
origin_mass = []
origin_position = []
origin_velocity = []
import pandas as pd
for num in range(1000):
dataf_name = f"env_1_step_{num}.jld.data"
df = pd.read_csv(f'../results/peridynamics-MCGNODE/test/{dataf_name}')
split_df = df.iloc[1:,0].str.split(expand=True)
origin_acceleration += [(np.array(split_df[[0,1,2]]).astype('float64'))]
origin_mass += [np.array(split_df[[5]]).astype('float64')]
origin_position += [np.array(split_df[[6,7,8]]).astype('float64')]
origin_velocity += [np.array(split_df[[10,11,12]]).astype('float64')]
origin_Rs = jnp.array(origin_position)
origin_Vs = jnp.array(origin_velocity)
origin_Fs = jnp.array(origin_acceleration)
origin_mass = jnp.array(origin_mass)
print(origin_Rs.shape)
#sys.exit()
################################################
################### ML Model ###################
################################################
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
# dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
R, V = Rs[0], Vs[0]
my_graph0_disc.pop("e_order")
my_graph0_disc.pop("atoms")
my_graph0_disc.update({"globals": None})
graph = jraph.GraphsTuple(**my_graph0_disc)
def acceleration_fn(params, graph):
acc = fgn.cal_acceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = graph
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# acceleration_fn_model = acceleration_GNODE(N, dim, F_q_qdot,
# constraints=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)
# return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"perignode_trained_model_low.dil")[0]
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_model = get_forward_sim(params=params, force_fn=force_fn_model, runs=runs)
# my_sim = sim_model(R, V)
# v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
# v_acceleration_fn_model(Rs[:10], Vs[:10], params)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(ya, yp):
return norm(ya-yp)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"AbsZerr":[],
"Perr": [],
"AbsPerr": []
}
import time
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
R, V = Rs[runs*ind], Vs[runs*ind]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+= end - start
# ll = [state for state in NVEStates(pred_traj)]
# save_ovito(f"pred_{ind}.data",[state for state in NVEStates(pred_traj)], lattice="")
# if ind>20:
# break
sim_size = runs
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [origin_Rs[runs*ind:runs+runs*ind]]
nexp["Zerr"] += [RelErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
# nexp["AbsZerr"] += [AbsErr(origin_Rs[runs*ind:runs+runs*ind], pred_traj.position)]
nexp["AbsZerr"] += [jnp.abs(norm(origin_Rs[runs*ind:runs+runs*ind]) - norm(pred_traj.position))]
ac_mom = jnp.square(origin_Vs[runs*ind:runs+runs*ind].sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
# nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind], pred_traj.velocity)])
nexp["Perr"] += ([RelErr(origin_Vs[runs*ind:runs+runs*ind][6:], pred_traj.velocity[6:])])
nexp["AbsPerr"] += ([jnp.abs(ac_mom - pr_mom)])
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||\hat{p}-p||_2}{||\hat{p}||_2+||p||_2}$")
np.savetxt(f"../peridynamics-simulation-time/fgnode.txt", [t/maxtraj], delimiter = "\n")
# make_plots(nexp, "AbsZerr", yl=r"${||\hat{z}-z||_2}$")
# make_plots(nexp, "Herr",
# yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
# make_plots(nexp, "AbsHerr", yl=r"${||H(\hat{z})-H(z)||_2}$")
| 10,997 | 27.866142 | 152 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-data-FGNN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
import matplotlib.pyplot as plt
# from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.nbody import (get_fully_connected_senders_and_receivers, get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
from pyexpat import model
from statistics import mode
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
#create a new state for storing data
class Datastate:
def __init__(self, model_states):
self.position = model_states.position[:-1]
self.velocity = model_states.velocity[:-1]
self.force = model_states.force[:-1]
self.mass = model_states.mass[:-1]
self.index = 0
self.change_position = model_states.position[1:]-model_states.position[:-1]
self.change_velocity = model_states.velocity[1:]-model_states.velocity[:-1]
def main(N1=4, N2=1, dim=3, grid=False, saveat=100, runs=10001, nconfig=1, ifdrag=0, train = False):
if N2 is None:
N2 = N1
N = N1*N2
tag = f"{N}-body-data"
seed = 42
out_dir = f"../results"
rname = False
rstring = "1" if train else "1_test"
filename_prefix = f"{out_dir}/{tag}/{rstring}/"
def _filename(name):
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, **kwargs):
return f(_filename(file), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# init_confs = [chain(N)[:2]
# for i in range(nconfig)]
# _, _, senders, receivers = chain(N)
init_confs = get_init_conf(train)
senders, receivers = get_fully_connected_senders_and_receivers(N)
# if grid:
# senders, receivers = get_connections(N1, N2)
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# print("Creating Chain")
R, V = init_confs[0]
print("Saving init configs...")
savefile(f"initial-configs_{ifdrag}.pkl",
init_confs, metadata={"N1": N1, "N2": N2})
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
dt = 1.0e-3
stride = 100
lr = 0.001
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
# return vmap(partial(lnn.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
@jit
def forward_sim(R, V):
return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=runs)
@jit
def v_forward_sim(init_conf):
return vmap(lambda x: forward_sim(x[0], x[1]))(init_conf)
################################################
############### DATA GENERATION ################
################################################
print("Data generation ...")
ind = 0
dataset_states = []
for R, V in init_confs:
ind += 1
print(f"{ind}/{len(init_confs)}", end='\r')
model_states = forward_sim(R, V)
dataset_states += [Datastate(model_states)]
if ind % saveat == 0:
print(f"{ind} / {len(init_confs)}")
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
print("Saving datafile...")
savefile(f"model_states_{ifdrag}.pkl", dataset_states)
def cal_energy(states):
KE = vmap(kin_energy)(states.velocity)
PE = vmap(pot_energy_orig)(states.position)
L = vmap(Lactual, in_axes=(0, 0, None))(
states.position, states.velocity, None)
return jnp.array([PE, KE, L, KE+PE]).T
print("plotting energy...")
ind = 0
for states in dataset_states:
ind += 1
Es = cal_energy(states)
fig, axs = plt.subplots(1, 1, figsize=(20, 5))
plt.plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
plt.legend(bbox_to_anchor=(1, 1))
plt.ylabel("Energy")
plt.xlabel("Time step")
title = f"{N}-nbody random state {ind}"
plt.title(title)
plt.savefig(
_filename(title.replace(" ", "_")+".png"), dpi=300)
save_ovito(f"dataset_{ind}.data", [
state for state in NVEStates(states)], lattice="")
if ind >= 10:
break
fire.Fire(main)
| 7,313 | 29.60251 | 107 | py |
benchmarking_graph | benchmarking_graph-main/scripts/n-body-HGN-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
import time
# from psystems.nsprings import (chain, edge_order, get_connections,
# get_fully_connected_senders_and_receivers,
# get_fully_edge_order, get_init)
from psystems.nbody import (get_fully_connected_senders_and_receivers,get_fully_edge_order, get_init_conf)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
from src.hamiltonian import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=4, dt=1.0e-3, useN=4, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-body"
TAG = f"hgn"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = "0"
if (ifDataEfficiency == 1):
rstring = "2_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/2_test/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
z_out, zdot_out = dataset_states[0]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[0]
V = pout[0]
# if grid:
# a = int(np.sqrt(N))
# senders, receivers = get_connections(a, a)
# eorder = edge_order(len(senders))
# else:
# # senders, receivers = get_fully_connected_senders_and_receivers(N)
# # eorder = get_fully_edge_order(N)
# print("Creating Chain")
# _, _, senders, receivers = chain(N)
# eorder = edge_order(len(senders))
senders, receivers = get_fully_connected_senders_and_receivers(N)
eorder = get_fully_edge_order(N)
senders = jnp.array(senders)
receivers = jnp.array(receivers)
# R = model_states.position[0]
# V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{z_out.shape}")
N, dim = xout.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
# def pot_energy_orig(x):
# dr = jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1)
# return jax.vmap(partial(src.hamiltonian.SPRING, stiffness=1.0, length=1.0))(dr).sum()
def pot_energy_orig(x):
dr = jnp.sqrt(jnp.square(x[senders, :] - x[receivers, :]).sum(axis=1))
return vmap(partial(lnn.GRAVITATIONAL, Gc = 1))(dr).sum()/2
kin_energy = partial(src.hamiltonian._T, mass=masses)
def Hactual(x, p, params):
return kin_energy(p) + pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
zdot, lamda_force = get_zdot_lambda(
N, dim, hamiltonian=Hactual, drag=drag, constraints=None)
def zdot_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot(x, p, params)
def z0(x, p):
return jnp.vstack([x, p])
def get_forward_sim(params=None, zdot_func=None, runs=10):
def fn(R, V):
t = jnp.linspace(0.0, runs*stride*dt, runs*stride)
_z_out = ode.odeint(zdot_func, z0(R, V), t, params)
return _z_out[0::stride]
return fn
sim_orig = get_forward_sim(
params=None, zdot_func=zdot_func, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# useT=True, useonlyedge=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Hmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=None,
# non_conservative_forces=drag)
zdot_model, lamda_force_model = get_zdot_lambda(
N, dim, hamiltonian=Hmodel, drag=drag, constraints=None)
def zdot_model_func(z, t, params):
x, p = jnp.split(z, 2)
return zdot_model(x, p, params)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, zdot_func=zdot_model_func, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def caH_energy_fn(lag=None, params=None):
def fn(states):
KE = vmap(kin_energy)(states.velocity)
H = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = (H - KE)
# return jnp.array([H]).T
return jnp.array([PE, KE, H, KE+PE]).T
return fn
Es_fn = caH_energy_fn(lag=Hactual, params=None)
Es_pred_fn = caH_energy_fn(lag=Hmodel, params=params)
def net_force_fn(force=None, params=None):
def fn(states):
zdot_out = vmap(force, in_axes=(0, 0, None))(
states.position, states.velocity, params)
_, force_out = jnp.split(zdot_out, 2, axis=1)
return force_out
return fn
net_force_orig_fn = net_force_fn(force=zdot)
net_force_model_fn = net_force_fn(force=zdot_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
}
trajectories = []
sim_orig2 = get_forward_sim(params=None, zdot_func=zdot_func, runs=runs)
skip = 0
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj} ...")
z_out, _ = dataset_states[0]
xout, pout = jnp.split(z_out, 2, axis=1)
R = xout[ind]
V = pout[ind]
try:
z_actual_out = sim_orig2(R, V) # full_traj[start_:stop_]
x_act_out, p_act_out = jnp.split(z_actual_out, 2, axis=1)
zdot_act_out = jax.vmap(zdot, in_axes=(0, 0, None))(
x_act_out, p_act_out, None)
_, force_act_out = jnp.split(zdot_act_out, 2, axis=1)
my_state = States()
my_state.position = x_act_out
my_state.velocity = p_act_out
my_state.force = force_act_out
my_state.mass = jnp.ones(x_act_out.shape[0])
actual_traj = my_state
start = time.time()
z_pred_out = sim_model(R, V)
x_pred_out, p_pred_out = jnp.split(z_pred_out, 2, axis=1)
zdot_pred_out = jax.vmap(zdot_model, in_axes=(
0, 0, None))(x_pred_out, p_pred_out, params)
_, force_pred_out = jnp.split(zdot_pred_out, 2, axis=1)
my_state_pred = States()
my_state_pred.position = x_pred_out
my_state_pred.velocity = p_pred_out
my_state_pred.force = force_pred_out
my_state_pred.mass = jnp.ones(x_pred_out.shape[0])
pred_traj = my_state_pred
end = time.time()
t += end - start
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
#raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"HGN {N}-Spring Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(
" ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)+1e-30]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)+1e-30]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"HGN {N}-Spring Exp {ind} pred traj"
axs[1].set_title(title)
title = f"HGN {N}-Spring Exp {ind} actual traj"
axs[0].set_title(title)
plt.savefig(
_filename(f"HGN {N}-Spring Exp {ind}".replace(" ", "-")+f"_actualH.png"))
except:
print("skipped")
if skip < 20:
skip += 1
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
# make_plots(nexp, "Perr",
# yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-nbody-zerr/lgnn.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-herr/lgnn.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-perr/lgnn.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-nbody-simulation-time/lgnn.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
| 21,326 | 33.1232 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CGNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
# N=5
# dim=2
# dt=1.0e-5
# useN=5
# stride=1000
# ifdrag=0
# seed=42
# rname=0
# saveovito=1
# trainm=0
# runs=100
# semilog=1
# maxtraj=100
# plotthings=True
# redo=0
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"cgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
def graph_force_fn(params, graph):
_GForce = cdgnode_cal_force_q_qdot(params, graph, eorder=eorder,
useT=True)
return _GForce
def _force_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=constraints,
non_conservative_forces=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
print(F_q_qdot(R, V, params))
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
"simulation_time": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t=0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+= end - start
nexp["simulation_time"] += [end-start]
if saveovito:
if ind<5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
if plotthings:
if ind<5:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
# Es_pred = Es_pred_fn(traj)
# Es_pred = Es_pred - Es_pred[0] + Es[0]
# fig, axs = panel(1, 2, figsize=(20, 5))
# axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
# # axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
# plt.legend(bbox_to_anchor=(1, 1), loc=2)
# axs[0].set_facecolor("w")
# xlabel("Time step", ax=axs)
# ylabel("Energy", ax=axs)
# title = f"{N}-Pendulum Exp {ind}"
# plt.title(title)
# plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"{N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"{N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
else:
pass
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
# nexp["Perr"] += [RelErr(actual_traj.velocity,
# pred_traj.velocity)]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [ac_mom - pr_mom]
if ind%10==0:
savefile(f"error_parameter.pkl", nexp)
savefile("trajectories.pkl", trajectories)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/cgnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/cgnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/cgnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
main(N = 5)
| 18,293 | 32.261818 | 204 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Pendulum-CFGNODE-post.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
# from sklearn.metrics import r2_score
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV, acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=3, dim=2, dt=1.0e-5, useN=3, stride=1000, ifdrag=0, seed=100, rname=0, withdata=None, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=1):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"cfgnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
# rstring = randfilename if (rname and (tag != "data")) else (
# "0" if (tag == "data") or (withdata == None) else f"{withdata}")
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return T - V
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder,
# useT=True)
# return kin_energy(graph.nodes["velocity"]) - V
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
R = jnp.array(R)
V = jnp.array(V)
species = jnp.array(species).reshape(-1, 1)
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def acceleration_fn(params, graph):
acc = fgn.cal_cacceleration(params, graph, mpass=1)
return acc
def acc_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["L"])
acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
constraints=constraints,
non_conservative_forces=None)
#def acceleration_fn_model(x, v, params): return apply_fn(x, v, params["L"])
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: -0.1*v")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
# acceleration_fn_model = accelerationFull(N, dim,
# lagrangian=Lmodel,
# constraints=constraints,
# non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
t = 0.0
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+=end - start
if saveovito:
if ind<5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
raise Warning("Cannot calculate energy in FGN")
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs)
ylabel("Energy", ax=axs)
title = f"(FGN) {N}-Pendulum Exp {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"CFGNODE-traj {N}-Pendulum Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"CFGNODE-traj {N}-Pendulum Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError_{filepart}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||z_1-z_2||_2}{||z_1||_2+||z_2||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(z_1)-H(z_2)||_2}{||H(z_1)||_2+||H(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-pendulum-zerr/cfgnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-herr/cfgnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-pendulum-simulation-time/cfgnode.txt", [t/maxtraj], delimiter = "\n")
main(N = 4)
main(N = 5)
| 18,719 | 32.074205 | 219 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-GNODE-post.py | ################################################
################## IMPORT ######################
################################################
import imp
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
# from sympy import fu
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order, get_init)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn1
from src.graph1 import *
from src.lnn1 import acceleration, accelerationFull, accelerationTV,acceleration_GNODE
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
import time
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=5, dt=1.0e-3, useN=5, withdata=None, datapoints=100, mpass=1, grid=False, stride=100, ifdrag=0, seed=42, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0, ifDataEfficiency = 0, if_noisy_data=0):
if (ifDataEfficiency == 1):
data_points = int(sys.argv[1])
batch_size = int(data_points/100)
if useN is None:
useN = N
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Spring"
TAG = f"gnode"
if (ifDataEfficiency == 1):
out_dir = f"../data-efficiency"
elif (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + part + name.split(".")[-1]
rstring = randfilename if (rname and (tag != "data")) else ("0" if (tag == "data") or (withdata == None) else f"0_{withdata}")
if (ifDataEfficiency == 1):
rstring = "0_" + str(data_points)
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained), *args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
model_states = dataset_states[0]
if grid:
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
# senders, receivers = get_fully_connected_senders_and_receivers(N)
# eorder = get_fully_edge_order(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
R = model_states.position[0]
V = model_states.velocity[0]
print(
f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
# parameters = [[dict(length=1.0)]]
# pot_energy_orig = map_parameters(
# lnn.SPRING, displacement, species, parameters)
def pot_energy_orig(x):
dr = jnp.square(x[senders] - x[receivers]).sum(axis=1)
return vmap(partial(lnn1.SPRING, stiffness=1.0, length=1.0))(dr).sum()
kin_energy = partial(lnn1._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn1.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=None,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs+1)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# print(sim_orig(R, V))
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
# ################################################
# ################### ML Model ###################
# ################################################
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
def graph_force_fn(params, graph):
_GForce = a_cdgnode_cal_force_q_qdot(params, graph, eorder=None,
useT=True)
return _GForce
def _force_fn(species):
# senders, receivers = [np.array(i)
# for i in get_fully_connected_senders_and_receivers(N)]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return graph_force_fn(params, state_graph)
return apply
apply_fn = _force_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def F_q_qdot(x, v, params): return apply_fn(x, v, params["Fqqdot"])
# x=R
# v=V
# F_q_qdot(R, V, params)
# def nndrag(v, params):
# return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
# if ifdrag == 0:
# print("Drag: 0.0")
# def drag(x, v, params):
# return 0.0
# elif ifdrag == 1:
# print("Drag: nn")
# def drag(x, v, params):
# return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = F_q_qdot
# acceleration_fn_model = acceleration_GNODE(N, dim,F_q_qdot,
# constraints=None,
# non_conservative_forces=None)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model_low.dil", trained=useN)[0]
# print(R.shape,V.shape)
# print(F_q_qdot(R, V, params))
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
# sim_model(R, V)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def normp(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def RelErrp(ya, yp):
return normp(ya-yp) / (normp(ya) + normp(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
# Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
"Perr": [],
"simulation_time": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
skip_count = 0
t=0
for ind in range(maxtraj):
try:
print(f"Simulating trajectory {ind}/{len(dataset_states)} ...")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R = dataset_states[ind].position[0]
V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
start = time.time()
pred_traj = sim_model(R, V)
end = time.time()
t+= end-start
nexp["simulation_time"] += [end-start]
if saveovito:
if ind<5:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
else:
pass
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
if ind<5:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
# Es_pred = Es_pred_fn(traj)
# Es_pred = Es_pred - Es_pred[0] + Es[0]
# fig, axs = panel(1, 2, figsize=(20, 5))
# axs[0].plot(Es, label=["PE", "KE", "L", "TE"],
# lw=6, alpha=0.5)
# # axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
# plt.legend(bbox_to_anchor=(1, 1), loc=2)
# axs[0].set_facecolor("w")
# xlabel("Time step", ax=axs[0])
# xlabel("Time step", ax=axs[1])
# ylabel("Energy", ax=axs[0])
# ylabel("Energy", ax=axs[1])
# title = f"LGNN {N}-Spring Exp {ind}"
# plt.title(title)
# plt.savefig(_filename(title.replace(
# " ", "-")+f"_{key}_traj.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
fig, axs = panel(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5), hshift=0.1, vs=0.35)
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(
axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(
net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Spring Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
fig, axs = panel(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
xlabel("Time step", ax=axs[0])
xlabel("Time step", ax=axs[1])
ylabel("Energy", ax=axs[0])
ylabel("Energy", ax=axs[1])
title = f"{N}-Spring Exp {ind} Lmodel"
axs[1].set_title(title)
title = f"{N}-Spring Exp {ind} Lactual"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
else:
pass
# Es = Es_fn(actual_traj) #jnp.array([PE, KE, L, KE+PE]).T
# H = Es[:, -1]
# L = Es[:, 2]
# Eshat = Es_fn(pred_traj)
# KEhat = Eshat[:, 1]
# Lhat = Eshat[:, 2]
# k = L[5]/Lhat[5]
# print(f"scalling factor: {k}")
# Lhat = Lhat*k
# Hhat = 2*KEhat - Lhat
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
# nexp["Perr"] += [jnp.square(actual_traj.velocity.sum(axis=1) -
# pred_traj.velocity.sum(axis=1)).sum(axis=1)]#/(jnp.square(actual_traj.velocity.sum(axis=1)).sum(axis=1)+jnp.square(pred_traj.velocity.sum(axis=1)).sum(axis=1))]
ac_mom = jnp.square(actual_traj.velocity.sum(1)).sum(1)
pr_mom = jnp.square(pred_traj.velocity.sum(1)).sum(1)
nexp["Perr"] += [jnp.absolute(ac_mom - pr_mom)]
savefile(f"error_parameter.pkl", nexp)
except:
skip_count += 1
pass
print(f'skipped loop: {skip_count}')
def make_plots(nexp, key, yl="Err"):
print(f"Plotting err for {key}")
fig, axs = panel(1, 1)
for i in range(len(nexp[key])):
if semilog:
plt.semilogy(nexp[key][i].flatten())
else:
plt.plot(nexp[key][i].flatten())
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_{key}.png"))
fig, axs = panel(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
make_plots(nexp, "Perr",
yl=r"$\frac{||P(z_1)-P(z_2)||_2}{||P(z_1)||_2+||P(z_2)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
gmean_perr = jnp.exp( jnp.log(jnp.array(nexp["Perr"])).mean(axis=0) )
if (ifDataEfficiency == 0):
np.savetxt(f"../{N}-spring-zerr/gnode.txt", gmean_zerr, delimiter = "\n")
np.savetxt(f"../{N}-spring-herr/gnode.txt", gmean_herr, delimiter = "\n")
np.savetxt(f"../{N}-spring-perr/gnode.txt", gmean_perr, delimiter = "\n")
np.savetxt(f"../{N}-spring-simulation-time/gnode.txt", [t/maxtraj], delimiter = "\n")
# main(N = 20)
main(N = 5)
| 20,974 | 33.958333 | 247 | py |
benchmarking_graph | benchmarking_graph-main/scripts/Spring-LGN.py | ################################################
################## IMPORT ######################
################################################
import json
import sys
from datetime import datetime
from functools import partial, wraps
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from shadow.plot import *
#from sklearn.metrics import r2_score
import time
from psystems.nsprings import (chain, edge_order, get_connections,
get_fully_connected_senders_and_receivers,
get_fully_edge_order)
# from statistics import mode
# from sympy import LM
# from torch import batch_norm_gather_stats_with_counts
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import fgn, lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import nve
from src.utils import *
# config.update("jax_enable_x64", True)
# config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def wrap_main(f):
def fn(*args, **kwargs):
config = (args, kwargs)
print("Configs: ")
print(f"Args: ")
for i in args:
print(i)
print(f"KwArgs: ")
for k, v in kwargs.items():
print(k, ":", v)
return f(*args, **kwargs, config=config)
return fn
def Main(N=5, epochs=10000, seed=42, rname=False, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001,
withdata=None, datapoints=None, batch_size=100, if_noisy_data=1):
return wrap_main(main)(N=N, epochs=epochs, seed=seed, rname=rname, saveat=saveat, error_fn=error_fn,
dt=dt, ifdrag=ifdrag, stride=stride, trainm=trainm, grid=grid, mpass=mpass, lr=lr,
withdata=withdata, datapoints=datapoints, batch_size=batch_size, if_noisy_data=if_noisy_data)
def main(N=3, epochs=10000, seed=42, rname=True, saveat=10, error_fn="L2error",
dt=1.0e-3, ifdrag=0, stride=100, trainm=1, grid=False, mpass=1, lr=0.001, withdata=None, datapoints=None, batch_size=1000, config=None, if_noisy_data=1):
# print("Configs: ")
# pprint(N, epochs, seed, rname,
# dt, stride, lr, ifdrag, batch_size,
# namespace=locals())
randfilename = datetime.now().strftime(
"%m-%d-%Y_%H-%M-%S") + f"_{datapoints}"
PSYS = f"{N}-Spring"
TAG = f"lgn"
if (if_noisy_data == 1):
out_dir = f"../noisy_data"
else:
out_dir = f"../results"
def _filename(name, tag=TAG):
rstring = randfilename if (rname and (tag != "data")) else (
"0" if (tag == "data") or (withdata == None) else f"{withdata}")
if (tag == "data"):
filename_prefix = f"../results/{PSYS}-{tag}/{0}/"
else:
filename_prefix = f"{out_dir}/{PSYS}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, **kwargs):
return f(_filename(file, tag=tag), *args, **kwargs)
return func
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
savefile(f"config_{ifdrag}_{trainm}.pkl", config)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
try:
dataset_states = loadfile(f"model_states_{ifdrag}.pkl", tag="data")[0]
except:
raise Exception("Generate dataset first. Use *-data.py file.")
if datapoints is not None:
dataset_states = dataset_states[:datapoints]
model_states = dataset_states[0]
print(f"Total number of data points: {len(dataset_states)}x{model_states.position.shape[0]}")
N, dim = model_states.position.shape[-2:]
species = jnp.zeros((N, 1), dtype=int)
masses = jnp.ones((N, 1))
Rs, Vs, Fs = States().fromlist(dataset_states).get_array()
Rs = Rs.reshape(-1, N, dim)
Vs = Vs.reshape(-1, N, dim)
Fs = Fs.reshape(-1, N, dim)
if (if_noisy_data == 1):
Rs = np.array(Rs)
Fs = np.array(Fs)
Vs = np.array(Vs)
np.random.seed(100)
for i in range(len(Rs)):
Rs[i] += np.random.normal(0,1,1)
Vs[i] += np.random.normal(0,1,1)
Fs[i] += np.random.normal(0,1,1)
Rs = jnp.array(Rs)
Fs = jnp.array(Fs)
Vs = jnp.array(Vs)
mask = np.random.choice(len(Rs), len(Rs), replace=False)
allRs = Rs[mask]
allVs = Vs[mask]
allFs = Fs[mask]
Ntr = int(0.75*len(Rs))
Nts = len(Rs) - Ntr
Rs = allRs[:Ntr]
Vs = allVs[:Ntr]
Fs = allFs[:Ntr]
Rst = allRs[Ntr:]
Vst = allVs[Ntr:]
Fst = allFs[Ntr:]
################################################
################## SYSTEM ######################
################################################
# pot_energy_orig = PEF
# kin_energy = partial(lnn._T, mass=masses)
# def Lactual(x, v, params):
# return kin_energy(v) - pot_energy_orig(x)
# def constraints(x, v, params):
# return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
# def external_force(x, v, params):
# F = 0*R
# F = jax.ops.index_update(F, (1, 1), -1.0)
# return F.reshape(-1, 1)
# def drag(x, v, params):
# return -0.1*v.reshape(-1, 1)
# acceleration_fn_orig = lnn.accelerationFull(N, dim,
# lagrangian=Lactual,
# non_conservative_forces=None,
# constraints=constraints,
# external_force=None)
# def force_fn_orig(R, V, params, mass=None):
# if mass is None:
# return acceleration_fn_orig(R, V, params)
# else:
# return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
# @jit
# def forward_sim(R, V):
# return predition(R, V, None, force_fn_orig, shift, dt, masses, stride=stride, runs=10)
################################################
################### ML Model ###################
################################################
if grid:
print("It's a grid?")
a = int(np.sqrt(N))
senders, receivers = get_connections(a, a)
eorder = edge_order(len(senders))
else:
print("It's a random?")
# senders, receivers = get_fully_connected_senders_and_receivers(N)
print("Creating Chain")
_, _, senders, receivers = chain(N)
eorder = edge_order(len(senders))
R, V = Rs[0], Vs[0]
def dist(*args):
disp = displacement(*args)
return jnp.sqrt(jnp.square(disp).sum())
dij = vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
# if trainm:
# print("kinetic energy: learnable")
# def L_energy_fn(params, graph):
# L = fgn.cal_energy(params, graph, mpass=mpass)
# return L
# else:
# print("kinetic energy: 0.5mv^2")
# kin_energy = partial(lnn._T, mass=masses)
# raise Warning("KE = 0.5mv2 not implemented")
# # def L_energy_fn(params, graph):
# # g, V, T = cal_graph(params, graph, mpass=mpass, eorder=eorder,
# # useT=True, useonlyedge=True)
# # return kin_energy(graph.nodes["velocity"]) - V
hidden_dim = [16, 16]
edgesize = 1
nodesize = 5
ee = 8
ne = 8
Lparams = dict(
ee_params=initialize_mlp([edgesize, ee], key),
ne_params=initialize_mlp([nodesize, ne], key),
e_params=initialize_mlp([ee+2*ne, *hidden_dim, ee], key),
n_params=initialize_mlp([2*ee+ne, *hidden_dim, ne], key),
g_params=initialize_mlp([ne, *hidden_dim, 1], key),
acc_params=initialize_mlp([ne, *hidden_dim, dim], key),
lgn_params = initialize_mlp([ne, *hidden_dim, 1], key),
)
def acceleration_fn(params, graph):
acc = fgn.cal_lgn(params, graph, mpass=1)
return acc
def acc_fn(species):
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={"dij": dij},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
state_graph.edges.update(dij=vmap(dist, in_axes=(0, 0))(R[senders], R[receivers])
)
return acceleration_fn(params, state_graph)
return apply
apply_fn = acc_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
params = {"L": Lparams}
#print(acceleration_fn_model(R, V, params))
# print("lag: ", Lmodel(R, V, params))
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: nn")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
params["drag"] = initialize_mlp([1, 5, 5, 1], key)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=None,
non_conservative_forces=drag)
v_acceleration_fn_model = vmap(acceleration_fn_model, in_axes=(0, 0, None))
################################################
################## ML Training #################
################################################
#LOSS = getattr(src.models, error_fn)
@jit
def loss_fn(params, Rs, Vs, Fs):
pred = v_acceleration_fn_model(Rs, Vs, params)
return MSE(pred, Fs)
@jit
def gloss(*args):
return value_and_grad(loss_fn)(*args)
opt_init, opt_update_, get_params = optimizers.adam(lr)
@ jit
def opt_update(i, grads_, opt_state):
grads_ = jax.tree_map(jnp.nan_to_num, grads_)
grads_ = jax.tree_map(
partial(jnp.clip, a_min=-1000.0, a_max=1000.0), grads_)
return opt_update_(i, grads_, opt_state)
@jit
def update(i, opt_state, params, loss__, *data):
""" Compute the gradient for a batch and update the parameters """
value, grads_ = gloss(params, *data)
opt_state = opt_update(i, grads_, opt_state)
return opt_state, get_params(opt_state), value
@ jit
def step(i, ps, *args):
return update(i, *ps, *args)
def batching(*args, size=None):
L = len(args[0])
if size != None:
nbatches1 = int((L - 0.5) // size) + 1
nbatches2 = max(1, nbatches1 - 1)
size1 = int(L/nbatches1)
size2 = int(L/nbatches2)
if size1*nbatches1 > size2*nbatches2:
size = size1
nbatches = nbatches1
else:
size = size2
nbatches = nbatches2
else:
nbatches = 1
size = L
newargs = []
for arg in args:
newargs += [jnp.array([arg[i*size:(i+1)*size]
for i in range(nbatches)])]
return newargs
bRs, bVs, bFs = batching(Rs, Vs, Fs,
size=min(len(Rs), batch_size))
print(f"training ...")
opt_state = opt_init(params)
epoch = 0
optimizer_step = -1
larray = []
ltarray = []
last_loss = 1000
start = time.time()
train_time_arr = []
larray += [loss_fn(params, Rs, Vs, Fs)]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
def print_loss():
print(
f"Epoch: {epoch}/{epochs} Loss (mean of {error_fn}): train={larray[-1]}, test={ltarray[-1]}")
print_loss()
for epoch in range(epochs):
l = 0.0
count = 0
for data in zip(bRs, bVs, bFs):
optimizer_step += 1
opt_state, params, l_ = step(
optimizer_step, (opt_state, params, 0), *data)
l += l_
count += 1
# optimizer_step += 1
# opt_state, params, l_ = step(
# optimizer_step, (opt_state, params, 0), Rs, Vs, Fs)
l = l/count
if epoch % 1 == 0:
larray += [l]
ltarray += [loss_fn(params, Rst, Vst, Fst)]
print_loss()
now = time.time()
train_time_arr.append((now - start))
if epoch % saveat == 0:
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
if last_loss > larray[-1]:
last_loss = larray[-1]
savefile(f"trained_model_low_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
np.savetxt(f"../5-spring-training-time/lgn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgn-test.txt", ltarray, delimiter = "\n")
plt.clf()
fig, axs = panel(1, 1)
plt.semilogy(larray[1:], label="Training")
plt.semilogy(ltarray[1:], label="Test")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(_filename(f"training_loss_{ifdrag}_{trainm}.png"))
metadata = {
"savedat": epoch,
"mpass": mpass,
"grid": grid,
"ifdrag": ifdrag,
"trainm": trainm,
}
params = get_params(opt_state)
savefile(f"trained_model_{ifdrag}_{trainm}.dil",
params, metadata=metadata)
savefile(f"loss_array_{ifdrag}_{trainm}.dil",
(larray, ltarray), metadata=metadata)
np.savetxt(f"../5-spring-training-time/lgn.txt", train_time_arr, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgn-train.txt", larray, delimiter = "\n")
np.savetxt(f"../5-spring-training-loss/lgn-test.txt", ltarray, delimiter = "\n")
fire.Fire(Main)
| 16,453 | 30.045283 | 162 | py |
benchmarking_graph | benchmarking_graph-main/scripts/psystems/nbody.py | import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
def get_fully_connected_senders_and_receivers(num_particles: int, self_edges: bool = False,):
"""Returns senders and receivers for fully connected particles."""
particle_indices = np.arange(num_particles)
senders, receivers = np.meshgrid(particle_indices, particle_indices)
senders, receivers = senders.flatten(), receivers.flatten()
if not self_edges:
mask = senders != receivers
senders, receivers = senders[mask], receivers[mask]
return senders, receivers
def get_fully_edge_order(N):
out = []
for j in range(N):
for i in range(N):
if i == j:
pass
else:
if j > i:
out += [i*(N-1) + j-1]
else:
out += [i*(N-1) + j]
return np.array(out)
def get_init_conf(train = True):
R = [
[1.0,
0.0,
0.0,],
[9.0,
0.0,
0.0,],
[11.0,
0.0,
0.0,],
[-1.0,
0.0,
0.0,],]
V = [[0.0,
0.05,
0.0,],
[0.0,
-0.05,
0.0,],
[0.0,
0.65,
0.0,],
[0.0,
-0.65,
0.0],]
if (not train):
return [(-jnp.array(R), jnp.array(V))]
return [(jnp.array(R), jnp.array(V))]
def get_count(s, i):
c = 0
for item in s:
if item == i:
c += 1
return c
def check(i, j, senders, receivers):
bool = True
for it1, it2 in zip(senders, receivers):
if (it1 == i) and (it2 == j):
bool = False
break
if (it2 == i) and (it1 == j):
bool = False
break
return bool
def get_init_ab(a, b, L=1, dim=2):
R = jnp.array(np.random.rand(a, dim))*L*2
V = jnp.array(np.random.rand(*R.shape)) / 10
V = V - V.mean(axis=0)
senders = []
receivers = []
for i in range(a):
c = get_count(senders, i)
if c >= b:
pass
else:
neigh = b-c
s = ((R - R[i])**2).sum(axis=1)
ind = np.argsort(s)
new = []
for j in ind:
if check(i, j, senders, receivers) and (neigh > 0) and j != i:
new += [j]
neigh -= 1
senders += new + [i]*len(new)
receivers += [i]*len(new) + new
print(i, senders, receivers)
return R, V, jnp.array(senders, dtype=int), jnp.array(receivers, dtype=int)
def plot_conf(R, senders, receivers, s=500, **kwargs):
plt.scatter(R[:, 0], R[:, 1], s=s/np.sqrt(len(R)), **kwargs)
Ri = R[senders]
Rf = R[receivers]
for a, b in zip(Ri, Rf):
plt.plot([a[0], b[0]], [a[1], b[1]])
plt.show()
| 2,826 | 23.37069 | 93 | py |
benchmarking_graph | benchmarking_graph-main/scripts/psystems/npendulum.py | import jax.numpy as jnp
import numpy as np
def pendulum_connections(P):
return (jnp.array([i for i in range(P-1)] + [i for i in range(1, P)], dtype=int),
jnp.array([i for i in range(1, P)] + [i for i in range(P-1)], dtype=int))
def edge_order(P):
N = (P-1)
return jnp.array(jnp.hstack([jnp.array(range(N, 2*N)), jnp.array(range(N))]), dtype=int)
def get_θ(angles=(0, 360)):
return np.random.choice(np.arange(*angles))
def get_L():
# return np.random.choice(np.arange(1, 2, 0.1))
return np.random.choice([1.0])
def get_init(P, *args, **kwargs):
return get_init_pendulum(P, *args, **kwargs)
def get_init_pendulum(P, dim=2, **kwargs):
Ls = [get_L() for i in range(P)]
θs = [get_θ(**kwargs) for i in range(P)]
last = [0.0, 0.0]
pos = []
for l, θ in zip(Ls, θs):
last = [last[0] + l*np.sin(θ), last[1] - l*np.cos(θ)]
if dim == 2:
pos += [last]
else:
pos += [last+[0.0]]
R = jnp.array(pos)
return R, 0*R
def PEF(R, g=10.0, mass=jnp.array([1.0])):
if len(mass) != len(R):
mass = mass[0]*jnp.ones((len(R)))
out = (mass*g*R[:, 1]).sum()
return out
def hconstraints(R, l=jnp.array([1.0])):
if len(l) != len(R):
l = l[0]*jnp.ones((len(R)))
out = jnp.square(R - jnp.vstack([0*R[:1], R[:-1]])).sum(axis=1) - l**2
return out
| 1,375 | 28.276596 | 92 | py |
benchmarking_graph | benchmarking_graph-main/scripts/psystems/nsprings.py | import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
def get_fully_connected_senders_and_receivers(num_particles: int, self_edges: bool = False,):
"""Returns senders and receivers for fully connected particles."""
particle_indices = np.arange(num_particles)
senders, receivers = np.meshgrid(particle_indices, particle_indices)
senders, receivers = senders.flatten(), receivers.flatten()
if not self_edges:
mask = senders != receivers
senders, receivers = senders[mask], receivers[mask]
return senders, receivers
def get_fully_edge_order(N):
out = []
for j in range(N):
for i in range(N):
if i == j:
pass
else:
if j > i:
out += [i*(N-1) + j-1]
else:
out += [i*(N-1) + j]
return np.array(out)
def get_connections(a, b):
senders = []
receivers = []
def func(i, a, b):
return [j*b+i for j in range(a)]
for i in range(a):
senders += list(range(i*b, (i + 1)*b - 1))
receivers += list(range(i*b+1, (i + 1)*b))
for i in range(b):
senders += func(i, a, b)[:-1]
receivers += func(i, a, b)[1:]
return jnp.array(senders+receivers, dtype=int).flatten(), jnp.array(receivers+senders, dtype=int).flatten()
def edge_order(N):
return jnp.array(list(range(N//2, N)) + list(range(N//2)), dtype=int)
def get_init(N, *args, **kwargs):
a = int(np.sqrt(N) - 0.1) + 1
b = int(N/a - 0.1) + 1
return N, get_init_spring(a, b, *args, **kwargs)
def get_count(s, i):
c = 0
for item in s:
if item == i:
c += 1
return c
def check(i, j, senders, receivers):
bool = True
for it1, it2 in zip(senders, receivers):
if (it1 == i) and (it2 == j):
bool = False
break
if (it2 == i) and (it1 == j):
bool = False
break
return bool
def get_init_ab(a, b, L=1, dim=2):
R = jnp.array(np.random.rand(a, dim))*L*2
V = jnp.array(np.random.rand(*R.shape)) / 10
V = V - V.mean(axis=0)
senders = []
receivers = []
for i in range(a):
c = get_count(senders, i)
if c >= b:
pass
else:
neigh = b-c
s = ((R - R[i])**2).sum(axis=1)
ind = np.argsort(s)
new = []
for j in ind:
if check(i, j, senders, receivers) and (neigh > 0) and j != i:
new += [j]
neigh -= 1
senders += new + [i]*len(new)
receivers += [i]*len(new) + new
print(i, senders, receivers)
return R, V, jnp.array(senders, dtype=int), jnp.array(receivers, dtype=int)
def get_init_spring(a, b, L=1, dim=2, grid=True):
if grid:
def rand():
return np.random.rand()
Rs = []
for i in range(a):
for j in range(b):
l = 0.2*L*(rand()-0.5)
if dim == 2:
Rs += [[i*L-l, j*L-l]]
else:
Rs += [[i*L-l, j*L-l, 0.0]]
R = jnp.array(Rs, dtype=float)
V = np.random.rand(*R.shape) / 10
V = V - V.mean(axis=0)
return R, V
else:
R = jnp.array(np.random.rand(a*b, dim))*L*2
V = jnp.array(np.random.randn(*R.shape)) / 100
V = V - V.mean(axis=0)
return R, V
def plot_conf(R, senders, receivers, s=500, **kwargs):
plt.scatter(R[:, 0], R[:, 1], s=s/np.sqrt(len(R)), **kwargs)
Ri = R[senders]
Rf = R[receivers]
for a, b in zip(Ri, Rf):
plt.plot([a[0], b[0]], [a[1], b[1]])
plt.show()
def chain(N, L=2, dim=2):
R = jnp.array(np.random.rand(N, dim))*L
V = jnp.array(np.random.rand(*R.shape)) / 10
V = V - V.mean(axis=0)
senders = [N-1] + list(range(0, N-1))
receivers = list(range(N))
return R, V, jnp.array(senders+receivers, dtype=int), jnp.array(receivers+senders, dtype=int)
| 4,025 | 28.822222 | 111 | py |
CTDE.jl | CTDE.jl-master/doc/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CTDE documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 31 20:57:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CTDE'
copyright = '2016, Drew Dolgert'
author = 'Drew Dolgert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'CTDE v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CTDEdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CTDE.tex', 'CTDE Documentation',
'Drew Dolgert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ctde', 'CTDE Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CTDE', 'CTDE Documentation',
author, 'CTDE', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,417 | 31.253425 | 80 | py |
imbalanceCXR | imbalanceCXR-master/train_model.py | # coding: utf-8
import os,sys, pickle
sys.path.insert(0,".")
import numpy as np
import torch
import torchvision, torchvision.transforms
from imbalanceCXR.configure_datasets import parseDatasets
import random
from imbalanceCXR.train_utils import train
from imbalanceCXR.test_utils import valid_epoch
from imbalanceCXR.utils import getModel, getCriterions
import torchxrayvision as xrv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', type=str, default="", help='')
parser.add_argument('--output_dir', type=str, default="./output/", help='Path where outputs will be saved')
parser.add_argument('--dataset', type=str, default="chex", help='Chest X-ray Datasets to use')
parser.add_argument('--model', type=str, default="densenet121", help='Deep Learning arquitecture to train')
parser.add_argument('--cuda', type=bool, default=False, help='Use GPU')
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs to train')
parser.add_argument('--batch_size', type=int, default=64, help='Train and valid batch size')
parser.add_argument('--test_batch_size', type=int, default=64, help='Test batch size')
parser.add_argument('--shuffle', type=bool, default=True, help='If True, data CSVs are shuffled')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--threads', type=int, default=4, help='Number of threads')
parser.add_argument('--loss_function', type=str, default='NLL', help='Loss function for training. Default is negative log likelihood (NLL).\
"WNLL" for weighted NLL. "focal" for Focal Loss.')
parser.add_argument('--data_aug', type=bool, default=True, help='Whether to apply image transformations')
parser.add_argument('--data_aug_rot', type=int, default=45, help='If data_aug is True, degrees of rotation')
parser.add_argument('--data_aug_trans', type=float, default=0.15, help='If data_aug is True, proportion of translation')
parser.add_argument('--data_aug_scale', type=float, default=0.15, help='If data_aug is True, proportion of scale')
parser.add_argument('--save_all_models', type=bool, default=False, help='If True, all epochs are saved. If False, save only best epochs according to selection_metric')
parser.add_argument('--save_preds',type=bool,default=False,help='If True, save the targets and preds of the validation set')
parser.add_argument('--selection_metric',type=str,default='roc',help=' "roc" o "pr". Which AUC to use for model selection and checkpoint saving')
parser.add_argument('--n_seeds',type=bool,default=False,help='If True, "seed" integer is considered as the number of seeds to use. '
'Range from 0 to "seed"-1 will be used as seed. The whole configured proccess will be performed "seed" times')
parser.add_argument('--seed', type=int, default=0, help='If n_seeds is True, seed determines the number of times the experiment is repeated. Otherwise it determines the specific spliting seed to use for the experiment')
parser.add_argument('--only_test', type=str, default=False, help='Skip training')
parser.add_argument('--only_train', type=str, default=False, help='Skip testing')
cfg = parser.parse_args()
print(cfg)
assert cfg.loss_function in ['NLL','WNLL','focal']
assert cfg.selection_metric in ['roc','pr']
data_aug = None
if cfg.data_aug:
data_aug = torchvision.transforms.Compose([
xrv.datasets.ToPILImage(),
torchvision.transforms.RandomAffine(cfg.data_aug_rot,
translate=(cfg.data_aug_trans, cfg.data_aug_trans),
scale=(1.0-cfg.data_aug_scale, 1.0+cfg.data_aug_scale)),
torchvision.transforms.ToTensor()
])
print(data_aug)
transforms = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),xrv.datasets.XRayResizer(224)])
datas, datas_names = parseDatasets(cfg.dataset,transforms,data_aug)
print("dataset names", datas_names)
for d in datas:
xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, d)
if cfg.n_seeds:
seed_list = range(cfg.seed)
else:
seed_list = [cfg.seed]
for _seed in seed_list:
cfg.seed = _seed
print('------------STARTING SEED {}-------------'.format(cfg.seed))
#cut out training sets
train_datas = []
test_datas = []
for i, dataset in enumerate(datas):
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
torch.manual_seed(cfg.seed)
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size],
generator=torch.Generator().manual_seed(42))
#disable data aug
test_dataset.data_aug = None
#fix labels
train_dataset.labels = dataset.labels[train_dataset.indices]
test_dataset.labels = dataset.labels[test_dataset.indices]
train_dataset.csv = dataset.csv.iloc[train_dataset.indices]
test_dataset.csv = dataset.csv.iloc[test_dataset.indices]
train_dataset.pathologies = dataset.pathologies
test_dataset.pathologies = dataset.pathologies
train_datas.append(train_dataset)
test_datas.append(test_dataset)
if len(datas) == 0:
raise Exception("no dataset")
elif len(datas) == 1:
train_dataset = train_datas[0]
test_dataset = test_datas[0]
else:
print("merge datasets")
train_dataset = xrv.datasets.Merge_Dataset(train_datas)
test_dataset = xrv.datasets.Merge_Dataset(test_datas)
# Setting the seed
np.random.seed(cfg.seed)
random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if cfg.cuda:
torch.cuda.manual_seed_all(cfg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print("train_dataset.labels.shape", train_dataset.labels.shape)
print("test_dataset.labels.shape", test_dataset.labels.shape)
print('positives test', np.nansum(test_dataset.labels, axis=0))
print("train_dataset",train_dataset)
print("test_dataset",test_dataset)
# create models
num_classes = train_dataset.labels.shape[1]
model = getModel(cfg.model,num_classes)
device = 'cuda' if cfg.cuda else 'cpu'
dataset_name = "{}-{}-seed{}-{}".format(cfg.dataset, cfg.model, cfg.seed, cfg.loss_function)
os.makedirs(cfg.output_dir + '/valid', exist_ok=True)
print(xrv.datasets.default_pathologies)
if not cfg.only_test:
train(model, train_dataset, dataset_name, cfg)
print("Done training")
if not cfg.only_train:
print("Loading best weights")
weights_file = cfg.output_dir+ f'/{dataset_name}-best_{cfg.selection_metric}.pt'
model.load_state_dict(torch.load(weights_file))
model.to(device)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=cfg.test_batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.threads, pin_memory=cfg.cuda)
print("Starting test")
with open(os.path.join(cfg.output_dir, f'{dataset_name}-priors.pkl'), "rb") as f:
priors_dict = pickle.load(f)
os.makedirs(cfg.output_dir+'/test', exist_ok=True)
criterions_test, priors_test = getCriterions(test_loader)
priors_dict['test'] = priors_test
with open(os.path.join(cfg.output_dir, f'{dataset_name}-priors.pkl'), "wb") as f:
pickle.dump(priors_dict,f)
test_aucroc,test_aucpr, test_performance_metrics, test_thresholds, _, _ = valid_epoch(name='test',
epoch=0,
model=model,
device=device,
data_loader=test_loader,
criterions=criterions_test,
priors=priors_dict,
dataset_name=dataset_name,
cfg=cfg)
print('AUCROC {} - AUCPR {}'.format(test_aucroc,test_aucpr))
with open(cfg.output_dir + '/test/' + f'{dataset_name}-test-performance-metrics.pkl', 'wb') as f:
pickle.dump(test_performance_metrics, f)
with open(cfg.output_dir + '/test/' + f'{dataset_name}-test-thresholds.pkl', "wb") as f:
pickle.dump(test_thresholds, f)
| 8,990 | 45.107692 | 219 | py |
imbalanceCXR | imbalanceCXR-master/imbalanceCXR/utils.py | import os, sys
sys.path.insert(0,"..")
import torchvision.models as torch_mod
import numpy as np
import torch
import torchxrayvision as xrv
from focal_loss.focal_loss import FocalLoss
from tqdm import tqdm as tqdm_base
from matplotlib import pyplot as plt
def plotBrierMetrics(means, stds, sorted_pathologies, to_plot_metrics=None,
plot_type='combined',labels=None):
styles_dict = {
'brierPos': {'label': 'Brier+', 'color': 'mediumseagreen', 'lw': '1', 'ls': '--'},
'brierNeg': {'label': 'Brier-', 'color': 'lightcoral', 'lw': '1', 'ls': '-.'},
'brier': {'label': 'Brier', 'color': 'dodgerblue', 'lw': '1'},
'balancedBrier': {'label': 'Balanced Brier', 'color': 'deepskyblue', 'lw': '1'},
}
if to_plot_metrics is None:
to_plot_metrics = styles_dict.keys()
fig, ax = plt.subplots(1, 1, figsize=(15, 3))
x = 2.5 * np.arange(len(sorted_pathologies))
width = 0.4 # the width of the bars
aucs_count = 0
acc_count = 0
bars_count = 0
metric_count = 0
for i, metric in enumerate(to_plot_metrics):
if plot_type == 'combined':
combined = True
alpha_bars = 0.5
if metric == 'brier' or metric == 'balancedBrier':
plot_type = 'points'
else:
plot_type = 'bars'
else:
combined = False
alpha_bars = 0.8
if plot_type == 'points':
width = 0.3
horiz = x + metric_count * width + width
err_color = styles_dict[metric]['color']
if metric == 'brier' or metric == 'balancedBrier':
if metric == 'brier':
marker_ = 'o'
else:
marker = 'v'
horiz = x + acc_count * width
fs = 14
lw = 2
ls = '--'
style = 'normal'
points = ax.scatter(horiz, means[metric], label=styles_dict[metric]['label'],
marker=marker_,
color=styles_dict[metric]['color'])
ax.plot(horiz, means[metric],
ls=ls, lw=lw,
color=styles_dict[metric]['color'])
else:
horiz = x + acc_count * width
fs = 12
lw = 1
ls = '-.'
style = 'italic'
if metric == 'brierPos':
marker_ = 'x'
if metric == 'brierNeg':
marker_ = 'v'
points = ax.scatter(horiz, means[metric], label=styles_dict[metric]['label'],
marker=marker_,
color=styles_dict[metric]['color'])
for x_, y_, std in zip(horiz, means[metric], stds[metric]):
# ax.text(x_,y_+std+0.02,
# f'{y_:.2f}',horizontalalignment='center',style=style,
# color='blue',#styles_dict[metric]['color'],
# fontsize=fs)
ax.vlines(x_, ymin=y_ - std, ymax=y_ + std,
color=styles_dict[metric]['color'], alpha=0.3,
lw=1)
if plot_type == 'bars':
width = 0.3
horiz = x - width / 2 + metric_count * width
err_color = 'lightgray' # styles_dict[metric]['color']
ax.bar(horiz, means[metric], width, alpha=alpha_bars, edgecolor='gray',
label=styles_dict[metric]['label'],
color=styles_dict[metric]['color'],
yerr=stds[metric], ecolor=err_color)
metric_count += 1
if combined:
plot_type = 'combined'
ax.set_ylim((0, 1))
ax.set_ylabel('Brier metrics', fontsize=14)
# ax.set_xticks(x + metric_count*width/4)
ax.set_xticks(x)
if labels is not None:
ax.set_xticklabels(labels, fontsize=12)
ax.legend(fontsize='large', markerscale=2)
fig.tight_layout()
return fig
def plotDiscriminationMetrics(means, stds, sorted_pathologies, to_plot_metrics=None,
plot_type='combined',labels=None):
styles_dict = {
'AUC-ROC': {'label': 'AUC-ROC', 'color': 'darkgreen', 'lw': '2'},
'AUC-PR': {'label': 'AUC-PR', 'color': 'firebrick', 'lw': '2'},
'recall': {'label': 'Recall', 'color': 'orange', 'lw': '2'},
'precision': {'label': 'Precision', 'color': 'purple', 'lw': '2'},
'specificity': {'label': 'Specificity', 'color': 'deepskyblue', 'lw': '2'},
}
if to_plot_metrics is None:
to_plot_metrics = styles_dict.keys()
fig, ax = plt.subplots(1, 1, figsize=(15, 3))
x = 2.5 * np.arange(len(sorted_pathologies))
width = 0.4 # the width of the bars
metric_count = 0
for i, metric in enumerate(to_plot_metrics):
if 'AUC' in metric:
marker_ = 'o'
fs = 14
lw = 2
ls = '--'
style = 'normal'
else:
fs = 12
lw = 1
ls = '-.'
style = 'italic'
if metric == 'recall':
marker_ = 'x'
if metric == 'precision':
marker_ = 'v'
if metric == 'specificity':
marker_ = '*'
if plot_type == 'combined':
combined = True
alpha_bars = 0.5
if 'AUC' in metric:
plot_type = 'points'
else:
plot_type = 'bars'
else:
combined = False
alpha_bars = 0.8
if plot_type == 'points':
width = 0.3
horiz = x + metric_count * width + width
err_color = styles_dict[metric]['color']
points = ax.scatter(horiz, means[metric], label=styles_dict[metric]['label'],
marker=marker_,
color=styles_dict[metric]['color'])
ax.plot(horiz, means[metric], alpha=0.3,
ls=ls, lw=lw,
color=styles_dict[metric]['color'])
for x_, y_, std in zip(horiz, means[metric], stds[metric]):
# ax.text(x_,y_+std+0.02,
# f'{y_:.2f}',horizontalalignment='center',style=style,
# color='blue',#styles_dict[metric]['color'],
# fontsize=fs)
ax.vlines(x_, ymin=y_ - std, ymax=y_ + std,
color=styles_dict[metric]['color'], alpha=0.2,
lw=0.5)
if plot_type == 'bars':
width = 0.3
horiz = x + metric_count * width
err_color = 'lightgray' # styles_dict[metric]['color']
ax.bar(horiz, means[metric], width, alpha=alpha_bars, edgecolor='gray',
label=styles_dict[metric]['label'],
color=styles_dict[metric]['color'],
yerr=stds[metric], ecolor=err_color)
metric_count += 1
if combined:
plot_type = 'combined'
ax.set_ylabel('Discrimination metrics', fontsize=14)
ax.set_xticks(x + (metric_count / 2) * width)
# ax.set_xticks(x + metric_count*width+width)
if labels is not None:
ax.set_xticklabels(labels, fontsize=12)
ax.set_ylim((0, 1.1))
ax.legend()
fig.tight_layout()
return fig
def getModel(modelName, num_classes):
if "densenet" in modelName:
model = xrv.models.DenseNet(num_classes=num_classes, in_channels=1,
**xrv.models.get_densenet_params(modelName))
elif "resnet101" in modelName:
model = torch_mod.resnet101(num_classes=num_classes, pretrained=False)
#patch for single channel
model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
elif "shufflenet_v2_x2_0" in modelName:
model = torch_mod.shufflenet_v2_x2_0(num_classes=num_classes, pretrained=False)
#patch for single channel
model.conv1[0] = torch.nn.Conv2d(1, 24, kernel_size=3, stride=2, padding=1, bias=False)
else:
raise Exception("no model")
return model
def tqdm(*args, **kwargs):
if hasattr(tqdm_base, '_instances'):
for instance in list(tqdm_base._instances):
tqdm_base._decr_instances(instance)
return tqdm_base(*args, **kwargs)
def getCriterions(loader):
n_pathologys = loader.dataset[0]["lab"].shape[0]
N_total_validos = np.count_nonzero(~np.isnan(loader.dataset.labels), axis=0)
N_total_validos_pos = np.count_nonzero(loader.dataset.labels==1, axis=0)
N_total_validos_neg = np.count_nonzero(loader.dataset.labels==0, axis=0)
priors_pos = N_total_validos_pos / N_total_validos
priors_neg = N_total_validos_neg / N_total_validos
pos_weights = torch.Tensor(N_total_validos_neg / (N_total_validos_pos + 1e-7))
criterions_dict = {}
for criterion in ['NLL','WNLL','focal']:
if 'NLL' in criterion:
value = [torch.nn.BCEWithLogitsLoss()] * n_pathologys
if criterion=='WNLL':
for pathology in range(n_pathologys):
value[pathology] = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weights[pathology])
if criterion=='focal':
value = [FocalLoss(alpha=2, gamma=5)] * n_pathologys
criterions_dict[criterion] = value
priors_dict = {'n_total': N_total_validos,
'n_pos': N_total_validos_pos,
'n_neg': N_total_validos_neg,
'priors_pos': priors_pos,
'priors_neg': priors_neg}
return criterions_dict, priors_dict
| 9,802 | 36.849421 | 96 | py |
imbalanceCXR | imbalanceCXR-master/imbalanceCXR/train_utils.py | import os, sys
sys.path.insert(0,"..")
import pickle
import pprint
import random
from glob import glob
from os.path import exists, join
import numpy as np
import torch
from torch.optim.lr_scheduler import StepLR
from imbalanceCXR.utils import getCriterions, tqdm
from imbalanceCXR.test_utils import valid_epoch
def train(model, dataset, dataset_name, cfg):
print("Our config:")
pprint.pprint(cfg)
device = 'cuda' if cfg.cuda else 'cpu'
if not torch.cuda.is_available() and cfg.cuda:
device = 'cpu'
print("WARNING: cuda was requested but is not available, using cpu instead.")
print(f'Using device: {device}')
print(cfg.output_dir)
if not exists(cfg.output_dir):
os.makedirs(cfg.output_dir, exist_ok=True)
# Setting the seed
np.random.seed(cfg.seed)
random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if cfg.cuda:
torch.cuda.manual_seed_all(cfg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Dataset
train_size = int(0.8 * len(dataset))
valid_size = len(dataset) - train_size
torch.manual_seed(cfg.seed)
train_dataset, valid_dataset = torch.utils.data.random_split(dataset, [train_size, valid_size],
generator=torch.Generator().manual_seed(42))
# disable data aug
valid_dataset.data_aug = None
# fix labels
train_dataset.labels = dataset.labels[train_dataset.indices]
valid_dataset.labels = dataset.labels[valid_dataset.indices]
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.threads,
pin_memory=cfg.cuda)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.threads,
pin_memory=cfg.cuda)
# Optimizer
optim = torch.optim.Adam(model.parameters(), lr=cfg.lr, weight_decay=1e-5, amsgrad=True)
scheduler = StepLR(optim, step_size=40, gamma=0.1)
criterions_train, priors_train = getCriterions(train_loader)
criterions_valid, priors_valid = getCriterions(valid_loader)
priors_dict = {'train': priors_train,
'valid': priors_valid}
with open(join(cfg.output_dir, f'{dataset_name}-priors.pkl'), "wb") as f:
pickle.dump(priors_dict, f)
start_epoch = 0
best_metric_roc = 0.
best_metric_pr = 0.
weights_for_best_validauc = None
metrics = []
weights_files = glob(join(cfg.output_dir, f'{dataset_name}-e*.pt')) # Find all weights files
if len(weights_files):
# Find most recent epoch
epochs = np.array(
[int(w[len(join(cfg.output_dir, f'{dataset_name}-e')):-len('.pt')].split('-')[0]) for w in weights_files])
start_epoch = epochs.max()
weights_file = [weights_files[i] for i in np.argwhere(epochs == np.amax(epochs)).flatten()][0]
model.load_state_dict(torch.load(weights_file))
print("Resuming training at epoch {0}.".format(start_epoch))
print("Weights loaded: {0}".format(weights_file))
with open(join(cfg.output_dir, f'{dataset_name}-metrics.pkl'), 'rb') as f:
metrics = pickle.load(f)
best_metric_roc = metrics[-1]['best_metric_roc']
best_metric_pr = metrics[-1]['best_metric_pr']
model.to(device)
for epoch in range(start_epoch, cfg.num_epochs):
avg_loss = train_epoch(cfg=cfg,
epoch=epoch,
model=model,
device=device,
optimizer=optim,
train_loader=train_loader,
scheduler=scheduler,
criterions=criterions_train)
aucroc_valid, aucpr_valid, current_performance_metrics, thresholds, _, _ = valid_epoch(
name='valid',
epoch=epoch,
model=model,
device=device,
data_loader=valid_loader,
criterions=criterions_valid,
priors=priors_dict,
dataset_name=dataset_name,
cfg=cfg,
)
if os.path.exists(join(cfg.output_dir, f'{dataset_name}-performance-metrics.pkl')):
with open(join(cfg.output_dir, f'{dataset_name}-performance-metrics.pkl'), 'rb') as f:
performance_metrics = pickle.load(f)
performance_metrics.append(current_performance_metrics)
else: # First epoch
performance_metrics = [current_performance_metrics]
with open(join(cfg.output_dir, f'{dataset_name}-performance-metrics.pkl'), 'wb') as f:
pickle.dump(performance_metrics, f)
if np.mean(aucroc_valid) > best_metric_roc:
best_metric_roc = np.mean(aucroc_valid)
print('new best roc ', best_metric_roc)
weights_for_best_validauc = model.state_dict()
torch.save(model.state_dict(), join(cfg.output_dir, f'{dataset_name}-best_roc.pt'))
with open(join(cfg.output_dir, f'{dataset_name}-best-thresholds_roc.pkl'), "wb") as f:
pickle.dump(thresholds, f)
if np.mean(aucpr_valid) > best_metric_pr:
best_metric_pr = np.mean(aucpr_valid)
print('new best pr ', best_metric_pr)
weights_for_best_validauc = model.state_dict()
torch.save(model.state_dict(), join(cfg.output_dir, f'{dataset_name}-best_pr.pt'))
stat = {
"epoch": epoch + 1,
"trainloss": avg_loss,
"validaucroc": aucroc_valid,
"validaucpr": aucpr_valid,
'best_metric_roc': best_metric_roc,
'best_metric_pr': best_metric_pr
}
metrics.append(stat)
with open(join(cfg.output_dir, f'{dataset_name}-metrics.pkl'), 'wb') as f:
pickle.dump(metrics, f)
if cfg.save_all_models:
torch.save(model.state_dict(), join(cfg.output_dir, f'{dataset_name}-e{epoch + 1}.pt'))
return metrics, best_metric_roc, weights_for_best_validauc
def train_epoch(cfg, epoch, model, device, optimizer,
train_loader, criterions, scheduler=None, limit=None):
model.train()
avg_loss = []
t = tqdm(train_loader)
training_criterion = criterions[cfg.loss_function]
for batch_idx, samples in enumerate(t):
if limit and (batch_idx > limit):
print("breaking out")
break
optimizer.zero_grad()
images = samples["img"].float().to(device)
targets = samples["lab"].to(device)
outputs = model(images)
loss = torch.zeros(1).to(device).float()
for pathology in range(targets.shape[1]):
pathology_output = outputs[:, pathology]
pathology_target = targets[:, pathology]
mask = ~torch.isnan(pathology_target)
pathology_output = pathology_output[mask]
pathology_target = pathology_target[mask]
if len(pathology_target) > 0:
pathology_loss = training_criterion[pathology](pathology_output.float(), pathology_target.float())
loss += pathology_loss
loss = loss.sum()
loss.backward()
avg_loss.append(loss.detach().cpu().numpy())
t.set_description(f'Epoch {epoch + 1} - Train - Loss = {np.mean(avg_loss):4.4f}')
optimizer.step()
if scheduler:
scheduler.step()
return np.mean(avg_loss)
| 7,923 | 36.201878 | 118 | py |
imbalanceCXR | imbalanceCXR-master/imbalanceCXR/configure_datasets.py | import torchxrayvision as xrv
NIH_IMAGES = "/run/user/1000/gvfs/smb-share:server=lxestudios.hospitalitaliano.net,share=pacs/T-Rx/NIH/"
CHEXPERT_IMAGES = "/run/user/1000/gvfs/smb-share:server=lxestudios.hospitalitaliano.net,share=pacs/T-Rx/CheXpert-v1.0-small"
CHEXPERT_CSV = "/run/user/1000/gvfs/smb-share:server=lxestudios.hospitalitaliano.net,share=pacs/T-Rx/CheXpert-v1.0-small/train_renamed_ubuntu.csv"
NIH_GOOGLE_IMAGES = "/home/mila/c/cohenjos/data/images-224-NIH"
PADCHEST_IMAGES = "/home/mila/c/cohenjos/data/images-224-PC"
MIMIC_IMAGES = "/lustre04/scratch/cohenjos/MIMIC/images-224/files"
MIMIC_CSV = "/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/mimic-cxr-2.0.0-chexpert.csv.gz"
MIMIC_METADATA = "/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/mimic-cxr-2.0.0-metadata.csv.gz"
OPENI_IMAGES = "/lustre03/project/6008064/jpcohen/OpenI/images/"
RSNA_IMAGES = "/lustre03/project/6008064/jpcohen/kaggle-pneumonia/stage_2_train_images_jpg"
def parseDatasets(datasets,transforms,data_aug):
datas = []
datas_names = []
if "nih" in datasets:
dataset = xrv.datasets.NIH_Dataset(
imgpath=NIH_IMAGES,
transform=transforms, data_aug=data_aug,
views=['PA',
'AP'
],
unique_patients=True,
)
datas.append(dataset)
datas_names.append("nih")
if "pc" in datasets:
dataset = xrv.datasets.PC_Dataset(
imgpath=PADCHEST_IMAGES,
transform=transforms, data_aug=data_aug)
datas.append(dataset)
datas_names.append("pc")
if "chex" in datasets:
dataset = xrv.datasets.CheX_Dataset(
imgpath=CHEXPERT_IMAGES,
csvpath=CHEXPERT_CSV,
transform=transforms, data_aug=data_aug,
views=['PA', 'AP'],
unique_patients=True,
)
datas.append(dataset)
datas_names.append("chex")
if "google" in datasets:
dataset = xrv.datasets.NIH_Google_Dataset(
imgpath=NIH_GOOGLE_IMAGES,
transform=transforms, data_aug=data_aug)
datas.append(dataset)
datas_names.append("google")
if "mimic_ch" in datasets:
dataset = xrv.datasets.MIMIC_Dataset(
imgpath=MIMIC_IMAGES,
csvpath=MIMIC_CSV,
metacsvpath=MIMIC_METADATA,
transform=transforms, data_aug=data_aug)
datas.append(dataset)
datas_names.append("mimic_ch")
if "openi" in datasets:
dataset = xrv.datasets.Openi_Dataset(
imgpath=OPENI_IMAGES,
transform=transforms, data_aug=data_aug)
datas.append(dataset)
datas_names.append("openi")
if "rsna" in datasets:
dataset = xrv.datasets.RSNA_Pneumonia_Dataset(
imgpath=RSNA_IMAGES,
transform=transforms, data_aug=data_aug)
datas.append(dataset)
datas_names.append("rsna")
return datas, datas_names
| 2,968 | 40.236111 | 146 | py |
imbalanceCXR | imbalanceCXR-master/imbalanceCXR/test_utils.py | import os, sys
sys.path.insert(0,"..")
import pickle
from os.path import join
import numpy as np
import torch
from sklearn.metrics import roc_auc_score, f1_score, accuracy_score, brier_score_loss, log_loss, roc_curve, precision_recall_curve
from sklearn.calibration import calibration_curve
from sklearn.metrics import auc as sklearnAUC
from imbalanceCXR.utils import tqdm
try:
from imbalanceCXR.calibration import logregCal, PAV
CALIBRATION_AVAILABLE = True
except Exception as e:
print(e, "Couldnt import logregCal, wont apply calibration")
CALIBRATION_AVAILABLE = False
def getCalibrationErrors(labels, probs,
bin_upper_bounds=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
save_details_pathology=None):
num_bins = len(bin_upper_bounds)
bin_indices = np.digitize(probs, bin_upper_bounds)
counts = np.bincount(bin_indices, minlength=num_bins)
nonzero = counts != 0
accuracies_sklearn, confidences_sklearn = calibration_curve(labels, probs, n_bins=num_bins)
if save_details_pathology:
with open(save_details_pathology, 'wb') as f:
pickle.dump([accuracies_sklearn, confidences_sklearn, counts], f)
calibration_errors = accuracies_sklearn - confidences_sklearn
weighting = counts / float(len(probs.flatten()))
weighted_calibration_errors = np.abs(calibration_errors) * weighting[nonzero]
ece = np.sum(weighted_calibration_errors)
mce = np.max(calibration_errors)
return ece, mce
def getCalibrationMetrics(labels, probs, save_details_pathology=None):
positive_labels = labels[labels == 1]
Npos = len(positive_labels)
positive_preds = probs[labels == 1]
negative_labels = labels[labels == 0]
negative_preds = probs[labels == 0]
# Calibration errors
try:
ece, mce = getCalibrationErrors(labels, probs, save_details_pathology=save_details_pathology)
ecePos, mcePos = getCalibrationErrors(positive_labels, positive_preds)
eceNeg, mceNeg = getCalibrationErrors(negative_labels, negative_preds)
except:
ece, mce, ecePos, mcePos, eceNeg, mceNeg = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
# Brier scores
assert len(positive_labels) + len(negative_labels) == len(labels)
brierPos = brier_score_loss(positive_labels, positive_preds)
brierNeg = brier_score_loss(negative_labels, negative_preds)
brier = brier_score_loss(labels, probs)
# Negative log likelihood
nll = log_loss(labels, probs)
return Npos, ece, mce, ecePos, mcePos, eceNeg, mceNeg, brier, brierPos, brierNeg, nll
def getMetrics(y_true, y_pred, metrics_results, YI_thresholds_roc, save_details_pathology=None, costs_thr=None):
fpr, tpr, thr = roc_curve(y_true, y_pred)
youden_index_thres = thr[np.argmax(tpr - fpr)]
YI_thresholds_roc.append(youden_index_thres)
precision, recall, thresholds = precision_recall_curve(y_true, y_pred)
auc_precision_recall = sklearnAUC(recall, precision)
Npos, ece, mce, ecePos, \
mcePos, eceNeg, mceNeg, \
brier, brierPos, brierNeg, nllSklearn = getCalibrationMetrics(y_true,
y_pred,
save_details_pathology=save_details_pathology,
)
metrics_results['AUC-ROC'].append(roc_auc_score(y_true, y_pred))
metrics_results['f1score-0.5'].append(f1_score(y_true, y_pred > 0.5))
metrics_results['accuracy-0.5'].append(accuracy_score(y_true, y_pred > 0.5))
metrics_results['AUC-PR'].append(auc_precision_recall)
metrics_results['Npos'].append(Npos)
metrics_results['ECE'].append(ece)
metrics_results['MCE'].append(mce)
metrics_results['ECE+'].append(ecePos)
metrics_results['MCE+'].append(mcePos)
metrics_results['ECE-'].append(eceNeg)
metrics_results['MCE-'].append(mceNeg)
metrics_results['brier'].append(brier)
metrics_results['brier+'].append(brierPos)
metrics_results['brier-'].append(brierNeg)
metrics_results['balancedBrier'].append(brierPos+brierNeg)
metrics_results['nllSklearn'].append(nllSklearn)
try:
if costs_thr is not None:
f1cost = f1_score(y_true, y_pred > costs_thr)
acccost = accuracy_score(y_true, y_pred > costs_thr)
metrics_results['f1score-costsTh'].append(f1cost)
metrics_results['accuracy-costsTh'].append(acccost)
except Exception as e:
print(e)
print(costs_thr)
return metrics_results, YI_thresholds_roc
def valid_epoch(name, epoch, model, device, data_loader, criterions, priors=None,
limit=None, cfg=None, dataset_name='', save_preds=False):
if cfg is not None:
save_preds = cfg.save_preds
model.eval()
n_count = {}
pathology_outputs = {}
pathology_targets = {}
pathology_outputs_sigmoid = {}
pathology_outputs_sigmoid_calibrated = {}
avg_loss_results = dict.fromkeys(criterions.keys())
for loss_function in avg_loss_results.keys():
avg_loss_results[loss_function] = {}
for pathology in range(data_loader.dataset[0]["lab"].shape[0]):
avg_loss_results[loss_function][pathology] = torch.zeros(1).to(device).double()
for pathology in range(data_loader.dataset[0]["lab"].shape[0]):
pathology_outputs[pathology] = []
pathology_targets[pathology] = []
n_count[pathology] = 0
pathology_outputs_sigmoid[pathology] = []
pathology_outputs_sigmoid_calibrated[pathology] = []
cost_ratio = 1 / 1 # Cost of false positives over cost of false negatives. TODO: Make it configurable for each pathology
with torch.no_grad():
t = tqdm(data_loader)
for batch_idx, samples in enumerate(t):
if limit and (batch_idx > limit):
print("breaking out")
break
images = samples["img"].to(device)
targets = samples["lab"].to(device)
outputs = model(images)
for pathology in range(len(pathology_targets)):
pathology_output = outputs[:, pathology]
pathology_target = targets[:, pathology]
mask = ~torch.isnan(pathology_target) # We use the samples where this pathology is positive
pathology_output = pathology_output[mask]
pathology_target = pathology_target[mask]
pathology_output_sigmoid = torch.sigmoid(pathology_output).detach().cpu().numpy()
pathology_outputs_sigmoid[pathology].append(pathology_output_sigmoid)
pathology_outputs[pathology].append(pathology_output.detach().cpu().numpy())
pathology_targets[pathology].append(pathology_target.detach().cpu().numpy())
if len(pathology_target) > 0:
for loss_function, criterion in criterions.items():
criterion_pathology = criterion[pathology]
batch_loss_pathology = criterion_pathology(pathology_output.double(), pathology_target.double())
avg_loss_results[loss_function][pathology] += batch_loss_pathology
n_count[pathology] += len(pathology_target)
del images
del outputs
del samples
del targets
txt = ''
print('ncounts: ', n_count)
for loss_function, losses in avg_loss_results.items():
txt += f'\n{loss_function}:'
for pathology in range(len(pathology_targets)):
avg_loss_results[loss_function][pathology] /= n_count[pathology]
txt += f'{pathology}: {avg_loss_results[loss_function][pathology].item()}'
t.set_description(f'Epoch {epoch + 1} - {txt}')
# Once we infered all batches and sum their losses, we unify predictions to average loss per pathology
if name == 'test':
with open(join(cfg.output_dir, 'valid', f'{dataset_name}-calibrator_parameters.pkl'), 'rb') as f:
calibration_parameters = pickle.load(f)
else:
calibration_parameters = {}
for pathology in range(len(pathology_targets)):
pathology_outputs[pathology] = np.concatenate(pathology_outputs[pathology])
pathology_outputs_sigmoid[pathology] = np.concatenate(pathology_outputs_sigmoid[pathology])
pathology_targets[pathology] = np.concatenate(pathology_targets[pathology])
targets = pathology_targets[pathology]
if CALIBRATION_AVAILABLE:
if len(targets) > 0:
# Calibration with dca_plda package
epsilon = 1e-100
positive_posteriors = pathology_outputs_sigmoid[pathology]
negative_posteriors = 1 - pathology_outputs_sigmoid[pathology]
train_positive_prior = priors['train']['priors_pos'][pathology]
train_negative_prior = priors['train']['priors_neg'][pathology]
LLR = np.log((positive_posteriors + epsilon) / (negative_posteriors + epsilon)) - np.log(
(train_positive_prior + epsilon) / (train_negative_prior + epsilon))
tar = LLR[targets == 1]
non = LLR[targets == 0]
print('Len tar {} Len non {}'.format(len(tar), len(non)))
ptar = priors['valid']['priors_pos'][pathology]
theta = np.log(cost_ratio * (1 - ptar) / ptar)
ptar_hat = 1 / (1 + np.exp(theta))
if name=='test':
# Apply linear calibrator that was fit with validation set
a = calibration_parameters[pathology]['a']
b = calibration_parameters[pathology]['b']
k = calibration_parameters[pathology]['k']
#Fit PAV algorithm as reference of perfectly calibrated version of the model
sc = np.concatenate((tar, non))
la = np.zeros_like(sc, dtype=int)
la[:len(tar)] = 1.0
calibration_parameters[pathology]["pav"] = PAV(sc, la)
else:
#Fit a linear calibrator to the validation set
a, b = logregCal(tar, non, ptar_hat, return_params=True)
k = -np.log((1 - ptar) / ptar)
print('a {:.2f} b {:.2f} k {:.2f}'.format(a,b,k))
calibration_parameters[pathology] = {'a': a, 'b': b, 'k': k}
pathology_outputs_sigmoid_calibrated[pathology] = 1 / (1 + np.exp(-(a * LLR + b) + k))
else:
print('Not calibrating pathology ',pathology)
if name!='test':
with open(join(cfg.output_dir, name, f'{dataset_name}-calibrator_parameters.pkl'), 'wb') as f:
pickle.dump(calibration_parameters, f)
if save_preds:
os.makedirs(join(cfg.output_dir, name), exist_ok=True)
results_dict = {'targets': pathology_targets,
'probas': pathology_outputs_sigmoid,
'logits': pathology_outputs,
'calibrated_probas': pathology_outputs_sigmoid_calibrated}
with open(join(cfg.output_dir, name, f'{dataset_name}-predictions.pkl'), 'wb') as f:
pickle.dump(results_dict, f)
metrics = ['Npos',
'ECE', 'MCE',
'ECE+', 'MCE+',
'ECE-', 'MCE-',
'brier', 'brier+', 'brier-','balancedBrier',
'AUC-ROC', 'AUC-PR',
'f1score-0.5', 'f1score-costsTh',
'accuracy-0.5', 'accuracy-costsTh',
'nllSklearn']
metrics_results = {}
for metric in metrics:
metrics_results[metric] = []
YI_thresholds_roc = []
for pathology in range(len(pathology_targets)):
if len(np.unique(pathology_targets[pathology])) > 1:
y_true, y_pred = np.array(pathology_targets[pathology], dtype=np.int64), pathology_outputs_sigmoid[
pathology]
metrics_results, YI_thresholds_roc = getMetrics(y_true, y_pred,
metrics_results,
YI_thresholds_roc)
else:
for metric in metrics:
metrics_results[metric].append(np.nan)
for loss_function, criterion in criterions.items():
metrics_results[loss_function] = avg_loss_results[loss_function][pathology]
metrics_means = {}
for metric in metrics:
metrics_results[metric] = np.asarray(metrics_results[metric])
metrics_means[metric] = np.mean(metrics_results[metric][~np.isnan(metrics_results[metric])])
thresholds = np.array(YI_thresholds_roc)
if 'test' not in name:
print(f'Epoch {epoch + 1} - {name}')
print_string = ''
for metric, mean in metrics_means.items():
print_string += f' Avg {metric}={mean:4.4f} '
print(print_string)
if CALIBRATION_AVAILABLE:
metrics_results_calibrated = {}
for metric in metrics:
metrics_results_calibrated[metric] = []
thresholds_roc_calibrated = []
for pathology in range(len(pathology_targets)):
if len(np.unique(pathology_targets[pathology])) > 1:
y_true, y_pred = np.array(pathology_targets[pathology], dtype=np.int64), \
pathology_outputs_sigmoid_calibrated[pathology]
ptar = priors['valid']['priors_pos'][pathology]
Tau_bayes = cost_ratio * (1 - ptar) / ptar
th_posteriors = Tau_bayes / (1 + Tau_bayes)
print('\n{} - COSTS TH: {}'.format(pathology,th_posteriors))
metrics_results_calibrated, thresholds_roc_calibrated = getMetrics(y_true, y_pred,
metrics_results_calibrated,
thresholds_roc_calibrated,
costs_thr=th_posteriors,
)
else:
for metric in metrics:
metrics_results_calibrated[metric].append(np.nan)
# Add calibrated dictionary to metrics_results dictionary
for oldkey in metrics:
metrics_results_calibrated[oldkey + '_calibrated'] = metrics_results_calibrated.pop(oldkey)
metrics_results.update(metrics_results_calibrated)
# TODO: add calibration with PAV to find minimum Brier as reference for calibration performance
if name=='test':
for pathology in range(len(pathology_targets)):
if len(pathology_targets[pathology])>0:
try:
pav = calibration_parameters[pathology]['pav']
llrs, ntar, nnon = pav.llrs()
k = -np.log((1 - ptar) / ptar)
pathology_outputs_sigmoid_calibrated[pathology] = 1 / (1 + np.exp(-(a * LLR + b) + k))
print(pathology,llrs)
print(n_count[pathology],ntar.sum()+nnon.sum())
except Exception as e:
print('Error in pav llrs computation: ',e)
#Usar priors['test'] para ptar?
"""
for p in np.atleast_1d(ptar):
logitPost = llrs + logit(p)
Ctar, Cnon = softplus(-logitPost), softplus(logitPost)
min_cllr = p*(Ctar[ntar!=0] @ ntar[ntar!=0]) / ntar.sum() + (1-p)*(Cnon[nnon!=0] @ nnon[nnon!=0]) / nnon.sum()
min_cllr /= -p*np.log(p) - (1-p)*np.log(1-p)
"""
return metrics_means['AUC-ROC'], metrics_means[
'AUC-PR'], metrics_results, thresholds, pathology_outputs, pathology_targets
| 16,495 | 45.730878 | 137 | py |
Comp2Comp | Comp2Comp-master/setup.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from os import path
from setuptools import find_packages, setup
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), "comp2comp", "__init__.py")
init_py = open(init_py_path, "r").readlines()
version_line = [line.strip() for line in init_py if line.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("ABCTSEG_VERSION_SUFFIX", "")
version = version + suffix
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%y%m%d")
version = version + ".dev" + date_str
new_init_py = [line for line in init_py if not line.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
setup(
name="comp2comp",
version=get_version(),
author="StanfordMIMI",
url="https://github.com/StanfordMIMI/Comp2Comp",
description="Computed tomography to body composition.",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
install_requires=[
"pydicom",
"numpy==1.23.5",
"h5py",
"tabulate",
"tqdm",
"silx",
"yacs",
"pandas",
"dosma",
"opencv-python",
"huggingface_hub",
"pycocotools",
"wget",
"tensorflow>=2.0.0",
"totalsegmentator @ git+https://github.com/StanfordMIMI/TotalSegmentator.git",
],
extras_require={
"all": ["shapely", "psutil"],
"dev": [
# Formatting
"flake8",
"isort",
"black==22.8.0",
"flake8-bugbear",
"flake8-comprehensions",
# Docs
"mock",
"sphinx",
"sphinx-rtd-theme",
"recommonmark",
"myst-parser",
],
"contrast_phase": ["xgboost"],
},
)
| 2,240 | 28.103896 | 94 | py |
Comp2Comp | Comp2Comp-master/comp2comp/models/models.py | import enum
import os
from pathlib import Path
from typing import Dict, Sequence
import wget
from keras.models import load_model
class Models(enum.Enum):
ABCT_V_0_0_1 = (
1,
"abCT_v0.0.1",
{"muscle": 0, "imat": 1, "vat": 2, "sat": 3},
False,
("soft", "bone", "custom"),
)
STANFORD_V_0_0_1 = (
2,
"stanford_v0.0.1",
# ("background", "muscle", "bone", "vat", "sat", "imat"),
# Category name mapped to channel index
{"muscle": 1, "vat": 3, "sat": 4, "imat": 5},
True,
("soft", "bone", "custom"),
)
TS_SPINE_FULL = (
3,
"ts_spine_full",
# Category name mapped to channel index
{
"L5": 18,
"L4": 19,
"L3": 20,
"L2": 21,
"L1": 22,
"T12": 23,
"T11": 24,
"T10": 25,
"T9": 26,
"T8": 27,
"T7": 28,
"T6": 29,
"T5": 30,
"T4": 31,
"T3": 32,
"T2": 33,
"T1": 34,
"C7": 35,
"C6": 36,
"C5": 37,
"C4": 38,
"C3": 39,
"C2": 40,
"C1": 41,
},
False,
(),
)
TS_SPINE = (
4,
"ts_spine",
# Category name mapped to channel index
{"L5": 18, "L4": 19, "L3": 20, "L2": 21, "L1": 22, "T12": 23},
False,
(),
)
STANFORD_SPINE_V_0_0_1 = (
5,
"stanford_spine_v0.0.1",
# Category name mapped to channel index
{"L5": 24, "L4": 23, "L3": 22, "L2": 21, "L1": 20, "T12": 19},
False,
(),
)
TS_HIP = (
6,
"ts_hip",
# Category name mapped to channel index
{"femur_left": 88, "femur_right": 89},
False,
(),
)
def __new__(
cls,
value: int,
model_name: str,
categories: Dict[str, int],
use_softmax: bool,
windows: Sequence[str],
):
obj = object.__new__(cls)
obj._value_ = value
obj.model_name = model_name
obj.categories = categories
obj.use_softmax = use_softmax
obj.windows = windows
return obj
def load_model(self, model_dir):
"""Load the model from the models directory.
Args:
logger (logging.Logger): Logger.
Returns:
keras.models.Model: Model.
"""
try:
filename = Models.find_model_weights(self.model_name, model_dir)
except Exception:
print("Downloading muscle/fat model from hugging face")
Path(model_dir).mkdir(parents=True, exist_ok=True)
wget.download(
f"https://huggingface.co/stanfordmimi/stanford_abct_v0.0.1/resolve/main/{self.model_name}.h5",
out=os.path.join(model_dir, f"{self.model_name}.h5"),
)
filename = Models.find_model_weights(self.model_name, model_dir)
print("")
print("Loading muscle/fat model from {}".format(filename))
return load_model(filename)
@staticmethod
def model_from_name(model_name):
"""Get the model enum from the model name.
Args:
model_name (str): Model name.
Returns:
Models: Model enum.
"""
for model in Models:
if model.model_name == model_name:
return model
return None
@staticmethod
def find_model_weights(file_name, model_dir):
for root, _, files in os.walk(model_dir):
for file in files:
if file.startswith(file_name):
filename = os.path.join(root, file)
return filename
| 3,821 | 24.651007 | 110 | py |
Comp2Comp | Comp2Comp-master/comp2comp/contrast_phase/contrast_inf.py | import argparse
import os
import pickle
import sys
import nibabel as nib
import numpy as np
import scipy
import SimpleITK as sitk
from scipy import ndimage as ndi
def loadNiiToArray(path):
NiImg = nib.load(path)
array = np.array(NiImg.dataobj)
return array
def loadNiiWithSitk(path):
reader = sitk.ImageFileReader()
reader.SetImageIO("NiftiImageIO")
reader.SetFileName(path)
image = reader.Execute()
array = sitk.GetArrayFromImage(image)
return array
def loadNiiImageWithSitk(path):
reader = sitk.ImageFileReader()
reader.SetImageIO("NiftiImageIO")
reader.SetFileName(path)
image = reader.Execute()
# invert the image to be compatible with Nibabel
image = sitk.Flip(image, [False, True, False])
return image
def keep_masked_values(arr, mask):
# Get the indices of the non-zero elements in the mask
mask_indices = np.nonzero(mask)
# Use the indices to select the corresponding elements from the array
masked_values = arr[mask_indices]
# Return the selected elements as a new array
return masked_values
def get_stats(arr):
# # Get the indices of the non-zero elements in the array
# nonzero_indices = np.nonzero(arr)
# # Use the indices to get the non-zero elements of the array
# nonzero_elements = arr[nonzero_indices]
nonzero_elements = arr
# Calculate the stats for the non-zero elements
max_val = np.max(nonzero_elements)
min_val = np.min(nonzero_elements)
mean_val = np.mean(nonzero_elements)
median_val = np.median(nonzero_elements)
std_val = np.std(nonzero_elements)
variance_val = np.var(nonzero_elements)
return max_val, min_val, mean_val, median_val, std_val, variance_val
def getMaskAnteriorAtrium(mask):
erasePreAtriumMask = mask.copy()
for sliceNum in range(mask.shape[-1]):
mask2D = mask[:, :, sliceNum]
itemindex = np.where(mask2D == 1)
if itemindex[0].size > 0:
row = itemindex[0][0]
erasePreAtriumMask[:, :, sliceNum][:row, :] = 1
return erasePreAtriumMask
"""
Function from
https://stackoverflow.com/questions/46310603/how-to-compute-convex-hull-image-volume-in-3d-numpy-arrays/46314485#46314485
"""
def fill_hull(image):
points = np.transpose(np.where(image))
hull = scipy.spatial.ConvexHull(points)
deln = scipy.spatial.Delaunay(points[hull.vertices])
idx = np.stack(np.indices(image.shape), axis=-1)
out_idx = np.nonzero(deln.find_simplex(idx) + 1)
out_img = np.zeros(image.shape)
out_img[out_idx] = 1
return out_img
def getClassBinaryMask(TSOutArray, classNum):
binaryMask = np.zeros(TSOutArray.shape)
binaryMask[TSOutArray == classNum] = 1
return binaryMask
def loadNiftis(TSNiftiPath, imageNiftiPath):
TSArray = loadNiiToArray(TSNiftiPath)
scanArray = loadNiiToArray(imageNiftiPath)
return TSArray, scanArray
# function to select one slice from 3D volume of SimpleITK image
def selectSlice(scanImage, zslice):
size = list(scanImage.GetSize())
size[2] = 0
index = [0, 0, zslice]
Extractor = sitk.ExtractImageFilter()
Extractor.SetSize(size)
Extractor.SetIndex(index)
sliceImage = Extractor.Execute(scanImage)
return sliceImage
# function to apply windowing
def windowing(sliceImage, center=400, width=400):
windowMinimum = center - (width / 2)
windowMaximum = center + (width / 2)
img_255 = sitk.Cast(
sitk.IntensityWindowing(
sliceImage,
windowMinimum=-windowMinimum,
windowMaximum=windowMaximum,
outputMinimum=0.0,
outputMaximum=255.0,
),
sitk.sitkUInt8,
)
return img_255
def selectSampleSlice(kidneyLMask, adRMask, scanImage):
# Get the middle slice of the kidney mask from where there is the first 1 value to the last 1 value
middleSlice = np.where(kidneyLMask.sum(axis=(0, 1)) > 0)[0][0] + int(
(
np.where(kidneyLMask.sum(axis=(0, 1)) > 0)[0][-1]
- np.where(kidneyLMask.sum(axis=(0, 1)) > 0)[0][0]
)
/ 2
)
# print("Middle slice: ", middleSlice)
# make middleSlice int
middleSlice = int(middleSlice)
# select one slice using simple itk
sliceImageK = selectSlice(scanImage, middleSlice)
# Get the middle slice of the addrenal mask from where there is the first 1 value to the last 1 value
middleSlice = np.where(adRMask.sum(axis=(0, 1)) > 0)[0][0] + int(
(
np.where(adRMask.sum(axis=(0, 1)) > 0)[0][-1]
- np.where(adRMask.sum(axis=(0, 1)) > 0)[0][0]
)
/ 2
)
# print("Middle slice: ", middleSlice)
# make middleSlice int
middleSlice = int(middleSlice)
# select one slice using simple itk
sliceImageA = selectSlice(scanImage, middleSlice)
sliceImageK = windowing(sliceImageK)
sliceImageA = windowing(sliceImageA)
return sliceImageK, sliceImageA
def getFeatures(TSArray, scanArray):
aortaMask = getClassBinaryMask(TSArray, 7)
IVCMask = getClassBinaryMask(TSArray, 8)
portalMask = getClassBinaryMask(TSArray, 9)
atriumMask = getClassBinaryMask(TSArray, 45)
kidneyLMask = getClassBinaryMask(TSArray, 3)
kidneyRMask = getClassBinaryMask(TSArray, 2)
adRMask = getClassBinaryMask(TSArray, 11)
# Remove toraccic aorta adn IVC from aorta and IVC masks
anteriorAtriumMask = getMaskAnteriorAtrium(atriumMask)
aortaMask = aortaMask * (anteriorAtriumMask == 0)
IVCMask = IVCMask * (anteriorAtriumMask == 0)
# Erode vessels to get only the center of the vessels
struct2 = np.ones((3, 3, 3))
aortaMaskEroded = ndi.binary_erosion(aortaMask, structure=struct2).astype(aortaMask.dtype)
IVCMaskEroded = ndi.binary_erosion(IVCMask, structure=struct2).astype(IVCMask.dtype)
struct3 = np.ones((1, 1, 1))
portalMaskEroded = ndi.binary_erosion(portalMask, structure=struct3).astype(portalMask.dtype)
# If portalMaskEroded has less then 500 values, use the original portalMask
if np.count_nonzero(portalMaskEroded) < 500:
portalMaskEroded = portalMask
# Get masked values from scan
aortaArray = keep_masked_values(scanArray, aortaMaskEroded)
IVCArray = keep_masked_values(scanArray, IVCMaskEroded)
portalArray = keep_masked_values(scanArray, portalMaskEroded)
kidneyLArray = keep_masked_values(scanArray, kidneyLMask)
kidneyRArray = keep_masked_values(scanArray, kidneyRMask)
"""Put this on a separate function and return only the pelvis arrays"""
# process the Renal Pelvis masks from the Kidney masks
# create the convex hull of the Left Kidney
kidneyLHull = fill_hull(kidneyLMask)
# exclude the Left Kidney mask from the Left Convex Hull
kidneyLHull = kidneyLHull * (kidneyLMask == 0)
# erode the kidneyHull to remove the edges
struct = np.ones((3, 3, 3))
kidneyLHull = ndi.binary_erosion(kidneyLHull, structure=struct).astype(kidneyLHull.dtype)
# keep the values of the scanArray that are in the Left Convex Hull
pelvisLArray = keep_masked_values(scanArray, kidneyLHull)
# create the convex hull of the Right Kidney
kidneyRHull = fill_hull(kidneyRMask)
# exclude the Right Kidney mask from the Right Convex Hull
kidneyRHull = kidneyRHull * (kidneyRMask == 0)
# erode the kidneyHull to remove the edges
struct = np.ones((3, 3, 3))
kidneyRHull = ndi.binary_erosion(kidneyRHull, structure=struct).astype(kidneyRHull.dtype)
# keep the values of the scanArray that are in the Right Convex Hull
pelvisRArray = keep_masked_values(scanArray, kidneyRHull)
# Get the stats
# Get the stats for the aortaArray
(
aorta_max_val,
aorta_min_val,
aorta_mean_val,
aorta_median_val,
aorta_std_val,
aorta_variance_val,
) = get_stats(aortaArray)
# Get the stats for the IVCArray
(
IVC_max_val,
IVC_min_val,
IVC_mean_val,
IVC_median_val,
IVC_std_val,
IVC_variance_val,
) = get_stats(IVCArray)
# Get the stats for the portalArray
(
portal_max_val,
portal_min_val,
portal_mean_val,
portal_median_val,
portal_std_val,
portal_variance_val,
) = get_stats(portalArray)
# Get the stats for the kidneyLArray and kidneyRArray
(
kidneyL_max_val,
kidneyL_min_val,
kidneyL_mean_val,
kidneyL_median_val,
kidneyL_std_val,
kidneyL_variance_val,
) = get_stats(kidneyLArray)
(
kidneyR_max_val,
kidneyR_min_val,
kidneyR_mean_val,
kidneyR_median_val,
kidneyR_std_val,
kidneyR_variance_val,
) = get_stats(kidneyRArray)
(
pelvisL_max_val,
pelvisL_min_val,
pelvisL_mean_val,
pelvisL_median_val,
pelvisL_std_val,
pelvisL_variance_val,
) = get_stats(pelvisLArray)
(
pelvisR_max_val,
pelvisR_min_val,
pelvisR_mean_val,
pelvisR_median_val,
pelvisR_std_val,
pelvisR_variance_val,
) = get_stats(pelvisRArray)
# create three new columns for the decision tree
# aorta - porta, Max min and mean columns
aorta_porta_max = aorta_max_val - portal_max_val
aorta_porta_min = aorta_min_val - portal_min_val
aorta_porta_mean = aorta_mean_val - portal_mean_val
# aorta - IVC, Max min and mean columns
aorta_IVC_max = aorta_max_val - IVC_max_val
aorta_IVC_min = aorta_min_val - IVC_min_val
aorta_IVC_mean = aorta_mean_val - IVC_mean_val
# Save stats in CSV:
# Create a list to store the stats
stats = []
# Add the stats for the aortaArray to the list
stats.extend(
[
aorta_max_val,
aorta_min_val,
aorta_mean_val,
aorta_median_val,
aorta_std_val,
aorta_variance_val,
]
)
# Add the stats for the IVCArray to the list
stats.extend(
[IVC_max_val, IVC_min_val, IVC_mean_val, IVC_median_val, IVC_std_val, IVC_variance_val]
)
# Add the stats for the portalArray to the list
stats.extend(
[
portal_max_val,
portal_min_val,
portal_mean_val,
portal_median_val,
portal_std_val,
portal_variance_val,
]
)
# Add the stats for the kidneyLArray and kidneyRArray to the list
stats.extend(
[
kidneyL_max_val,
kidneyL_min_val,
kidneyL_mean_val,
kidneyL_median_val,
kidneyL_std_val,
kidneyL_variance_val,
]
)
stats.extend(
[
kidneyR_max_val,
kidneyR_min_val,
kidneyR_mean_val,
kidneyR_median_val,
kidneyR_std_val,
kidneyR_variance_val,
]
)
# Add the stats for the kidneyLHull and kidneyRHull to the list
stats.extend(
[
pelvisL_max_val,
pelvisL_min_val,
pelvisL_mean_val,
pelvisL_median_val,
pelvisL_std_val,
pelvisL_variance_val,
]
)
stats.extend(
[
pelvisR_max_val,
pelvisR_min_val,
pelvisR_mean_val,
pelvisR_median_val,
pelvisR_std_val,
pelvisR_variance_val,
]
)
stats.extend(
[
aorta_porta_max,
aorta_porta_min,
aorta_porta_mean,
aorta_IVC_max,
aorta_IVC_min,
aorta_IVC_mean,
]
)
return stats, kidneyLMask, adRMask
def loadModel():
c2cPath = os.path.dirname(sys.path[0])
filename = os.path.join(c2cPath, "comp2comp", "contrast_phase", "xgboost.pkl")
model = pickle.load(open(filename, "rb"))
return model
def predict_phase(TS_path, scan_path, outputPath=None, save_sample=False):
TS_array, image_array = loadNiftis(TS_path, scan_path)
model = loadModel()
# TS_array, image_array = loadNiftis(TS_output_nifti_path, image_nifti_path)
featureArray, kidneyLMask, adRMask = getFeatures(TS_array, image_array)
y_pred = model.predict([featureArray])
if y_pred == 0:
pred_phase = "non-contrast"
if y_pred == 1:
pred_phase = "arterial"
if y_pred == 2:
pred_phase = "venous"
if y_pred == 3:
pred_phase = "delayed"
output_path_metrics = os.path.join(outputPath, "metrics")
if not os.path.exists(output_path_metrics):
os.makedirs(output_path_metrics)
outputTxt = os.path.join(output_path_metrics, "phase_prediction.txt")
with open(outputTxt, "w") as text_file:
text_file.write(pred_phase)
print(pred_phase)
output_path_images = os.path.join(outputPath, "images")
if not os.path.exists(output_path_images):
os.makedirs(output_path_images)
scanImage = loadNiiImageWithSitk(scan_path)
sliceImageK, sliceImageA = selectSampleSlice(kidneyLMask, adRMask, scanImage)
outJpgK = os.path.join(output_path_images, "sampleSliceKidney.png")
sitk.WriteImage(sliceImageK, outJpgK)
outJpgA = os.path.join(output_path_images, "sampleSliceAdrenal.png")
sitk.WriteImage(sliceImageA, outJpgA)
if __name__ == "__main__":
# parse arguments optional
parser = argparse.ArgumentParser()
parser.add_argument("--TS_path", type=str, required=True, help="Input image")
parser.add_argument("--scan_path", type=str, required=True, help="Input image")
parser.add_argument(
"--output_dir", type=str, required=False, help="Output .txt prediction", default=None
)
parser.add_argument(
"--save_sample", type=bool, required=False, help="Save jpeg sample ", default=False
)
args = parser.parse_args()
predict_phase(args.TS_path, args.scan_path, args.output_dir, args.save_sample)
| 13,957 | 30.436937 | 121 | py |
Comp2Comp | Comp2Comp-master/comp2comp/muscle_adipose_tissue/data.py | import math
from typing import List, Sequence
import keras.utils as k_utils
import numpy as np
import pydicom
from keras.utils.data_utils import OrderedEnqueuer
from tqdm import tqdm
def parse_windows(windows):
"""Parse windows provided by the user.
These windows can either be strings corresponding to popular windowing
thresholds for CT or tuples of (upper, lower) bounds.
Args:
windows (list): List of strings or tuples.
Returns:
list: List of tuples of (upper, lower) bounds.
"""
windowing = {
"soft": (400, 50),
"bone": (1800, 400),
"liver": (150, 30),
"spine": (250, 50),
"custom": (500, 50),
}
vals = []
for w in windows:
if isinstance(w, Sequence) and len(w) == 2:
assert_msg = "Expected tuple of (lower, upper) bound"
assert len(w) == 2, assert_msg
assert isinstance(w[0], (float, int)), assert_msg
assert isinstance(w[1], (float, int)), assert_msg
assert w[0] < w[1], assert_msg
vals.append(w)
continue
if w not in windowing:
raise KeyError("Window {} not found".format(w))
window_width = windowing[w][0]
window_level = windowing[w][1]
upper = window_level + window_width / 2
lower = window_level - window_width / 2
vals.append((lower, upper))
return tuple(vals)
def _window(xs, bounds):
"""Apply windowing to an array of CT images.
Args:
xs (ndarray): NxHxW
bounds (tuple): (lower, upper) bounds
Returns:
ndarray: Windowed images.
"""
imgs = []
for lb, ub in bounds:
imgs.append(np.clip(xs, a_min=lb, a_max=ub))
if len(imgs) == 1:
return imgs[0]
elif xs.shape[-1] == 1:
return np.concatenate(imgs, axis=-1)
else:
return np.stack(imgs, axis=-1)
class Dataset(k_utils.Sequence):
def __init__(self, files: List[str], batch_size: int = 16, windows=None):
self._files = files
self._batch_size = batch_size
self.windows = windows
def __len__(self):
return math.ceil(len(self._files) / self._batch_size)
def __getitem__(self, idx):
files = self._files[idx * self._batch_size : (idx + 1) * self._batch_size]
dcms = [pydicom.read_file(f, force=True) for f in files]
xs = [(x.pixel_array + int(x.RescaleIntercept)).astype("float32") for x in dcms]
params = [{"spacing": header.PixelSpacing, "image": x} for header, x in zip(dcms, xs)]
# Preprocess xs via windowing.
xs = np.stack(xs, axis=0)
if self.windows:
xs = _window(xs, parse_windows(self.windows))
else:
xs = xs[..., np.newaxis]
return xs, params
def _swap_muscle_imap(xs, ys, muscle_idx: int, imat_idx: int, threshold=-30.0):
"""
If pixel labeled as muscle but has HU < threshold, change label to imat.
Args:
xs (ndarray): NxHxWxC
ys (ndarray): NxHxWxC
muscle_idx (int): Index of the muscle label.
imat_idx (int): Index of the imat label.
threshold (float): Threshold for HU value.
Returns:
ndarray: Segmentation mask with swapped labels.
"""
labels = ys.copy()
muscle_mask = (labels[..., muscle_idx] > 0.5).astype(int)
imat_mask = labels[..., imat_idx]
imat_mask[muscle_mask.astype(np.bool) & (xs < threshold)] = 1
muscle_mask[xs < threshold] = 0
labels[..., muscle_idx] = muscle_mask
labels[..., imat_idx] = imat_mask
return labels
def postprocess(xs: np.ndarray, ys: np.ndarray):
"""Built-in post-processing.
TODO: Make this configurable.
Args:
xs (ndarray): NxHxW
ys (ndarray): NxHxWxC
params (dictionary): Post-processing parameters. Must contain
"categories".
Returns:
ndarray: Post-processed labels.
"""
# Add another channel full of zeros to ys
ys = np.concatenate([ys, np.zeros_like(ys[..., :1])], axis=-1)
# If muscle hu is < -30, assume it is imat.
"""
if "muscle" in categories and "imat" in categories:
ys = _swap_muscle_imap(
xs,
ys,
muscle_idx=categories["muscle"],
imat_idx=categories["imat"],
)
"""
return ys
def predict(
model,
dataset: Dataset,
batch_size: int = 16,
num_workers: int = 1,
max_queue_size: int = 10,
use_multiprocessing: bool = False,
):
"""Predict segmentation masks for a dataset.
Args:
model (keras.Model): Model to use for prediction.
dataset (Dataset): Dataset to predict on.
batch_size (int): Batch size.
num_workers (int): Number of workers.
max_queue_size (int): Maximum queue size.
use_multiprocessing (bool): Use multiprocessing.
use_postprocessing (bool): Use built-in post-processing.
postprocessing_params (dict): Post-processing parameters.
Returns:
List: List of segmentation masks.
"""
if num_workers > 0:
enqueuer = OrderedEnqueuer(dataset, use_multiprocessing=use_multiprocessing, shuffle=False)
enqueuer.start(workers=num_workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
output_generator = iter(dataset)
num_scans = len(dataset)
xs = []
ys = []
params = []
for _ in tqdm(range(num_scans)):
x, p_dicts = next(output_generator)
y = model.predict(x, batch_size=batch_size)
image = np.stack([out["image"] for out in p_dicts], axis=0)
y = postprocess(image, y)
params.extend(p_dicts)
xs.extend([x[i, ...] for i in range(len(x))])
ys.extend([y[i, ...] for i in range(len(y))])
return xs, ys, params
| 5,857 | 26.763033 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/muscle_adipose_tissue/muscle_adipose_tissue.py | import os
from time import perf_counter
from typing import List
import cv2
import h5py
import numpy as np
import pandas as pd
from keras import backend as K
from tqdm import tqdm
from comp2comp.inference_class_base import InferenceClass
from comp2comp.metrics.metrics import CrossSectionalArea, HounsfieldUnits
from comp2comp.models.models import Models
from comp2comp.muscle_adipose_tissue.data import Dataset, predict
class MuscleAdiposeTissueSegmentation(InferenceClass):
"""Muscle adipose tissue segmentation class."""
def __init__(self, batch_size: int, model_name: str, model_dir: str = None):
super().__init__()
self.batch_size = batch_size
self.model_name = model_name
self.model_type = Models.model_from_name(model_name)
def forward_pass_2d(self, files):
dataset = Dataset(files, windows=self.model_type.windows)
num_workers = 1
print("Computing segmentation masks using {}...".format(self.model_name))
start_time = perf_counter()
_, preds, results = predict(
self.model,
dataset,
num_workers=num_workers,
use_multiprocessing=num_workers > 1,
batch_size=self.batch_size,
)
K.clear_session()
print(
f"Completed {len(files)} segmentations in {(perf_counter() - start_time):.2f} seconds."
)
for i in range(len(results)):
results[i]["preds"] = preds[i]
return results
def __call__(self, inference_pipeline):
inference_pipeline.muscle_adipose_tissue_model_type = self.model_type
inference_pipeline.muscle_adipose_tissue_model_name = self.model_name
dicom_file_paths = inference_pipeline.dicom_file_paths
# if dicom_file_names not an attribute of inference_pipeline, add it
if not hasattr(inference_pipeline, "dicom_file_names"):
inference_pipeline.dicom_file_names = [
dicom_file_path.stem for dicom_file_path in dicom_file_paths
]
self.model = self.model_type.load_model(inference_pipeline.model_dir)
results = self.forward_pass_2d(dicom_file_paths)
images = []
for result in results:
images.append(result["image"])
preds = []
for result in results:
preds.append(result["preds"])
spacings = []
for result in results:
spacings.append(result["spacing"])
return {"images": images, "preds": preds, "spacings": spacings}
class MuscleAdiposeTissuePostProcessing(InferenceClass):
"""Post-process muscle and adipose tissue segmentation."""
def __init__(self):
super().__init__()
def preds_to_mask(self, preds):
"""Convert model predictions to a mask.
Args:
preds (np.ndarray): Model predictions.
Returns:
np.ndarray: Mask.
"""
if self.use_softmax:
# softmax
labels = np.zeros_like(preds, dtype=np.uint8)
l_argmax = np.argmax(preds, axis=-1)
for c in range(labels.shape[-1]):
labels[l_argmax == c, c] = 1
return labels.astype(np.bool)
else:
# sigmoid
return preds >= 0.5
def __call__(self, inference_pipeline, images, preds, spacings):
"""Post-process muscle and adipose tissue segmentation."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.use_softmax = self.model_type.use_softmax
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
return self.post_process(images, preds, spacings)
def remove_small_objects(self, mask, min_size=10):
mask = mask.astype(np.uint8)
components, output, stats, centroids = cv2.connectedComponentsWithStats(
mask, connectivity=8
)
sizes = stats[1:, -1]
mask = np.zeros((output.shape))
for i in range(0, components - 1):
if sizes[i] >= min_size:
mask[output == i + 1] = 1
return mask
def post_process(
self,
images,
preds,
spacings,
):
categories = self.model_type.categories
start_time = perf_counter()
masks = [self.preds_to_mask(p) for p in preds]
for i, _ in enumerate(masks):
# Keep only channels from the model_type categories dict
masks[i] = masks[i][..., [categories[cat] for cat in categories]]
masks = self.fill_holes(masks)
cats = list(categories.keys())
file_idx = 0
for mask, image in tqdm(zip(masks, images), total=len(masks)):
muscle_mask = mask[..., cats.index("muscle")]
imat_mask = mask[..., cats.index("imat")]
imat_mask = (
np.logical_and((image * muscle_mask) <= -30, (image * muscle_mask) >= -190)
).astype(int)
imat_mask = self.remove_small_objects(imat_mask)
mask[..., cats.index("imat")] += imat_mask
mask[..., cats.index("muscle")][imat_mask == 1] = 0
masks[file_idx] = mask
images[file_idx] = image
file_idx += 1
print(f"Completed post-processing in {(perf_counter() - start_time):.2f} seconds.")
return {"images": images, "masks": masks, "spacings": spacings}
# function that fills in holes in a segmentation mask
def _fill_holes(self, mask: np.ndarray, mask_id: int):
"""Fill in holes in a segmentation mask.
Args:
mask (ndarray): NxHxW
mask_id (int): Label of the mask.
Returns:
ndarray: Filled mask.
"""
int_mask = ((1 - mask) > 0.5).astype(np.int8)
components, output, stats, _ = cv2.connectedComponentsWithStats(int_mask, connectivity=8)
sizes = stats[1:, -1]
components = components - 1
# Larger threshold for SAT
# TODO make this configurable / parameter
if mask_id == 2:
min_size = 200
else:
# min_size = 50 # Smaller threshold for everything else
min_size = 20
img_out = np.ones_like(mask)
for i in range(0, components):
if sizes[i] > min_size:
img_out[output == i + 1] = 0
return img_out
def fill_holes(self, ys: List):
"""Take an array of size NxHxWxC and for each channel fill in holes.
Args:
ys (list): List of segmentation masks.
"""
segs = []
for n in range(len(ys)):
ys_out = [self._fill_holes(ys[n][..., i], i) for i in range(ys[n].shape[-1])]
segs.append(np.stack(ys_out, axis=2).astype(float))
return segs
class MuscleAdiposeTissueComputeMetrics(InferenceClass):
"""Compute muscle and adipose tissue metrics."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline, images, masks, spacings):
"""Compute muscle and adipose tissue metrics."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
metrics = self.compute_metrics_all(images, masks, spacings)
return metrics
def compute_metrics_all(self, images, masks, spacings):
"""Compute metrics for all images and masks.
Args:
images (List[np.ndarray]): Images.
masks (List[np.ndarray]): Masks.
Returns:
Dict: Results.
"""
results = []
for image, mask, spacing in zip(images, masks, spacings):
results.append(self.compute_metrics(image, mask, spacing))
return {"images": images, "results": results}
def compute_metrics(self, x, mask, spacing):
"""Compute results for a given segmentation."""
categories = self.model_type.categories
hu = HounsfieldUnits()
csa_units = "cm^2" if spacing else ""
csa = CrossSectionalArea(csa_units)
hu_vals = hu(mask, x, category_dim=-1)
csa_vals = csa(mask=mask, spacing=spacing, category_dim=-1)
assert mask.shape[-1] == len(
categories
), "{} categories found in mask, " "but only {} categories specified".format(
mask.shape[-1], len(categories)
)
results = {
cat: {
"mask": mask[..., idx],
hu.name(): hu_vals[idx],
csa.name(): csa_vals[idx],
}
for idx, cat in enumerate(categories.keys())
}
return results
class MuscleAdiposeTissueH5Saver(InferenceClass):
"""Save results to an HDF5 file."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline, results):
"""Save results to an HDF5 file."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
self.output_dir = inference_pipeline.output_dir
self.h5_output_dir = os.path.join(self.output_dir, "segmentations")
os.makedirs(self.h5_output_dir, exist_ok=True)
self.dicom_file_paths = inference_pipeline.dicom_file_paths
self.dicom_file_names = inference_pipeline.dicom_file_names
self.save_results(results)
return {"results": results}
def save_results(self, results):
"""Save results to an HDF5 file."""
categories = self.model_type.categories
cats = list(categories.keys())
for i, result in enumerate(results):
file_name = self.dicom_file_names[i]
with h5py.File(os.path.join(self.h5_output_dir, file_name + ".h5"), "w") as f:
for cat in cats:
mask = result[cat]["mask"]
f.create_dataset(name=cat, data=np.array(mask, dtype=np.uint8))
class MuscleAdiposeTissueMetricsSaver(InferenceClass):
"""Save metrics to a CSV file."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline, results):
"""Save metrics to a CSV file."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
self.output_dir = inference_pipeline.output_dir
self.csv_output_dir = os.path.join(self.output_dir, "metrics")
os.makedirs(self.csv_output_dir, exist_ok=True)
self.dicom_file_paths = inference_pipeline.dicom_file_paths
self.dicom_file_names = inference_pipeline.dicom_file_names
self.save_results(results)
return {}
def save_results(self, results):
"""Save results to a CSV file."""
categories = self.model_type.categories
cats = list(categories.keys())
df = pd.DataFrame(
columns=[
"File Name",
"File Path",
"Muscle HU",
"Muscle CSA (cm^2)",
"IMAT HU",
"IMAT CSA (cm^2)",
"SAT HU",
"SAT CSA (cm^2)",
"VAT HU",
"VAT CSA (cm^2)",
]
)
for i, result in enumerate(results):
row = []
row.append(self.dicom_file_names[i])
row.append(self.dicom_file_paths[i])
for cat in cats:
row.append(result[cat]["Hounsfield Unit"])
row.append(result[cat]["Cross-sectional Area (cm^2)"])
df.loc[i] = row
df.to_csv(
os.path.join(self.csv_output_dir, "muscle_adipose_tissue_metrics.csv"), index=False
)
| 11,794 | 34.42042 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/visualization/detectron_visualizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
import os
from enum import Enum, unique
from pathlib import Path
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from comp2comp.utils.colormap import random_color
from comp2comp.visualization.dicom import to_dicom
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (
height,
width,
), f"mask shape: {m.shape}, target dims: {height}, {width}"
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = (
segment_ids[sorted_idxs],
areas[sorted_idxs],
)
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(lbl, s * 100) for lbl, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [lbl + ("|crowd" if crowd else "") for lbl, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
self.fig = fig
self.ax = ax
self.reset_image(img)
def reset_image(self, img):
"""
Args:
img: same as in __init__
"""
img = img.astype("uint8")
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
# if filepath is a png or jpg
img = self.get_image()
if filepath.endswith(".png") or filepath.endswith(".jpg"):
self.fig.savefig(filepath)
if filepath.endswith(".dcm"):
to_dicom(img, Path(filepath))
return img
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
# if metadata is None:
# metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids,
scores,
self.metadata.thing_classes,
[x.get("iscrowd", 0) for x in sinfo],
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (
x0,
y0,
) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i],
edge_color=colors[i],
label=labels[i] if labels is not None else None,
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > self.keypoint_threshold:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line(
[nose_x, mid_shoulder_x],
[nose_y, mid_shoulder_y],
color=_RED,
)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line(
[mid_hip_x, mid_shoulder_x],
[mid_hip_y, mid_shoulder_y],
color=_RED,
)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0,
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={
"facecolor": "black",
"alpha": 0.8,
"pad": 0.7,
"edgecolor": "none",
},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [
(-w / 2, h / 2),
(-w / 2, -h / 2),
(w / 2, -h / 2),
(w / 2, h / 2),
]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(
label,
text_pos,
color=label_color,
font_size=font_size,
rotation=angle,
)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=False, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self,
binary_mask,
color=None,
*,
edge_color=None,
text=None,
alpha=0.5,
area_threshold=10,
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component smaller than this area will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
"""
Args:
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
shape2d = (soft_mask.shape[0], soft_mask.shape[1])
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = soft_mask * alpha
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
binary_mask = (soft_mask > 0.5).astype("uint8")
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is not None:
"""
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
"""
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _draw_text_in_mask(self, binary_mask, text, color):
"""
Find proper places to draw text given a binary mask.
"""
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
if stats[1:, -1].size == 0:
return
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=color)
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
| 48,577 | 38.526444 | 100 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/dl_utils.py | import subprocess
from keras import Model
# from keras.utils import multi_gpu_model
# from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
def get_available_gpus(num_gpus: int = None):
"""Get gpu ids for gpus that are >95% free.
Tensorflow does not support checking free memory on gpus.
This is a crude method that relies on `nvidia-smi` to
determine which gpus are occupied and which are free.
Args:
num_gpus: Number of requested gpus. If not specified,
ids of all available gpu(s) are returned.
Returns:
List[int]: List of gpu ids that are free. Length
will equal `num_gpus`, if specified.
"""
# Built-in tensorflow gpu id.
assert isinstance(num_gpus, (type(None), int))
if num_gpus == 0:
return [-1]
num_requested_gpus = num_gpus
try:
num_gpus = (
len(subprocess.check_output("nvidia-smi --list-gpus", shell=True).decode().split("\n"))
- 1
)
out_str = subprocess.check_output("nvidia-smi | grep MiB", shell=True).decode()
except subprocess.CalledProcessError:
return None
mem_str = [x for x in out_str.split() if "MiB" in x]
# First 2 * num_gpu elements correspond to memory for gpus
# Order: (occupied-0, total-0, occupied-1, total-1, ...)
mems = [float(x[:-3]) for x in mem_str]
gpu_percent_occupied_mem = [
mems[2 * gpu_id] / mems[2 * gpu_id + 1] for gpu_id in range(num_gpus)
]
available_gpus = [gpu_id for gpu_id, mem in enumerate(gpu_percent_occupied_mem) if mem < 0.05]
if num_requested_gpus and num_requested_gpus > len(available_gpus):
raise ValueError(
"Requested {} gpus, only {} are free".format(num_requested_gpus, len(available_gpus))
)
return available_gpus[:num_requested_gpus] if num_requested_gpus else available_gpus
class ModelMGPU(Model):
"""Wrapper for distributing model across multiple gpus"""
def __init__(self, ser_model, gpus):
pmodel = multi_gpu_model(ser_model, gpus) # noqa: F821
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
"""Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
"""
# return Model.__getattribute__(self, attrname)
if "load" in attrname or "save" in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
| 2,610 | 34.767123 | 99 | py |
Comp2Comp | Comp2Comp-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'comp2comp'
copyright = '2023, StanfordMIMI'
author = 'StanfordMIMI'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
# Adapted from https://github.com/pyvoxel/pyvoxel
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinxcontrib.bibtex",
"sphinx_rtd_theme",
"sphinx.ext.githubpages",
"m2r2",
]
autosummary_generate = True
autosummary_imported_members = True
bibtex_bibfiles = ["references.bib"]
templates_path = ['_templates']
exclude_patterns = []
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
htmlhelp_basename = "Comp2Compdoc"
html_static_path = ["_static"]
intersphinx_mapping = {"numpy": ("https://numpy.org/doc/stable/", None)}
html_theme_options = {"navigation_depth": 2}
source_suffix = [".rst", ".md"]
todo_include_todos = True
napoleon_use_ivar = True
napoleon_google_docstring = True
html_show_sourcelink = False
| 1,598 | 26.568966 | 85 | py |
igmspec | igmspec-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# igmspec documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 13 13:39:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../igmspec'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Napoleon settings
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'igmspec'
copyright = u'2016, Prochaska, and Associates'
author = u'Prochaska, and Associates'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'globaltoc.html', 'relations.html', 'sourcelink.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'igmspecdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'igmspec.tex', u'igmspec Documentation',
u'Prochaska, and Associates', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'igmspec', u'igmspec Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'igmspec', u'igmspec Documentation',
author, 'igmspec', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 10,057 | 31.031847 | 80 | py |
neural-splines | neural-splines-main/fit-grid.py | import argparse
import numpy as np
import point_cloud_utils as pcu
import torch
import tqdm
from scipy.ndimage import binary_erosion
from skimage.measure import marching_cubes
from neural_splines import load_point_cloud, point_cloud_bounding_box, fit_model_to_pointcloud, eval_model_on_grid, \
voxel_chunks, points_in_bbox, affine_transform_pointcloud, get_weights
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input_point_cloud", type=str, help="Path to the input point cloud to reconstruct.")
argparser.add_argument("num_nystrom_samples", type=int, default=-1,
help="Number of Nyström samples to use for kernel ridge regression. "
"If negative, don't use Nyström sampling."
"This is the number of basis centers to use to represent the final function. "
"If this value is too small, the reconstruction can miss details in the input. "
"Values between 10-100 times sqrt(N) (where N = number of input points) are "
"generally good depending on the complexity of the input shape.")
argparser.add_argument("grid_size", type=int,
help="When reconstructing the mesh, use this many voxels along the longest side of the "
"bounding box.")
argparser.add_argument("cells_per_axis", type=int,
help="Number of cells per axis to split the input along")
argparser.add_argument("--trim", type=float, default=-1.0,
help="If set to a positive value, trim vertices of the reconstructed mesh whose nearest "
"point in the input is greater than this value. The units of this argument are voxels "
"(where the grid_size determines the size of a voxel) Default is -1.0.")
argparser.add_argument("--overlap", type=float, default=0.25,
help="By how much should each grid cell overlap as a fraction of the bounding "
"box diagonal. Default is 0.25")
argparser.add_argument("--weight-type", type=str, default='trilinear',
help="How to interpolate predictions in overlapping cells. Must be one of 'trilinear' "
"or 'none', where 'trilinear' interpolates using a partition of unity defined using a"
"bicubic spline and 'none' does not interpolate overlapping cells. "
"Default is 'trilinear'.")
argparser.add_argument("--min-pts-per-cell", type=int, default=0,
help="Ignore cells with fewer points than this value. Default is zero.")
argparser.add_argument("--eps", type=float, default=0.05,
help="Perturbation amount for finite differencing in voxel units. i.e. we perturb points by "
"eps times the diagonal length of a voxel "
"(where the grid_size determines the size of a voxel). "
"To approximate the gradient of the function, we sample points +/- eps "
"along the normal direction.")
argparser.add_argument("--scale", type=float, default=1.1,
help="Reconstruct the surface in a bounding box whose diameter is --scale times bigger than"
" the diameter of the bounding box of the input points. Defaults is 1.1.")
argparser.add_argument("--regularization", type=float, default=1e-10,
help="Regularization penalty for kernel ridge regression. Default is 1e-10.")
argparser.add_argument("--nystrom-mode", type=str, default="blue-noise",
help="How to generate nystrom samples. Default is 'k-means'. Must be one of "
"(1) 'random': choose Nyström samples at random from the input, "
"(2) 'blue-noise': downsample the input with blue noise to get Nyström samples, or "
"(3) 'k-means': use k-means clustering to generate Nyström samples. "
"Default is 'blue-noise'")
argparser.add_argument("--voxel-downsample-threshold", type=int, default=150_000,
help="If the number of input points is greater than this value, downsample it by "
"averaging points and normals within voxels on a grid. The size of the voxel grid is "
"determined via the --grid-size argument. Default is 150_000."
"NOTE: This can massively speed up reconstruction for very large point clouds and "
"generally won't throw away any details.")
argparser.add_argument("--kernel", type=str, default="neural-spline",
help="Which kernel to use. Must be one of 'neural-spline', 'spherical-laplace', or "
"'linear-angle'. Default is 'neural-spline'."
"NOTE: The spherical laplace is a good approximation to the neural tangent kernel"
"(see https://arxiv.org/pdf/2007.01580.pdf for details)")
argparser.add_argument("--seed", type=int, default=-1, help="Random number generator seed to use.")
argparser.add_argument("--out", type=str, default="recon.ply", help="Path to file to save reconstructed mesh in.")
argparser.add_argument("--save-grid", action="store_true",
help="If set, save the function evaluated on a voxel grid to {out}.grid.npy "
"where out is the value of the --out argument.")
argparser.add_argument("--save-points", action="store_true",
help="If set, save the tripled input points, their occupancies, and the Nyström samples "
"to an npz file named {out}.pts.npz where out is the value of the --out argument.")
argparser.add_argument("--cg-max-iters", type=int, default=20,
help="Maximum number of conjugate gradient iterations. Default is 20.")
argparser.add_argument("--cg-stop-thresh", type=float, default=1e-5,
help="Stop threshold for the conjugate gradient algorithm. Default is 1e-5.")
argparser.add_argument("--dtype", type=str, default="float64",
help="Scalar type of the data. Must be one of 'float32' or 'float64'. "
"Warning: float32 may not work very well for complicated inputs.")
argparser.add_argument("--outer-layer-variance", type=float, default=0.001,
help="Variance of the outer layer of the neural network from which the neural "
"spline kernel arises from. Default is 0.001.")
argparser.add_argument("--use-abs-units", action="store_true",
help="If set, then use absolute units instead of voxel units for --eps and --trim.")
argparser.add_argument("--verbose", action="store_true", help="Spam your terminal with debug information")
args = argparser.parse_args()
if args.dtype == "float64":
dtype = torch.float64
elif args.dtype == "float32":
dtype = torch.float32
else:
raise ValueError(f"invalid --dtype argument. Must be one of 'float32' or 'float64' but got {args.dtype}")
if args.seed > 0:
seed = args.seed
else:
seed = np.random.randint(2 ** 32 - 1)
torch.manual_seed(seed)
np.random.seed(seed)
print("Using random seed", seed)
x, n = load_point_cloud(args.input_point_cloud, dtype=dtype)
scaled_bbox = point_cloud_bounding_box(x, args.scale)
out_grid_size = torch.round(scaled_bbox[1] / scaled_bbox[1].max() * args.grid_size).to(torch.int32)
voxel_size = scaled_bbox[1] / out_grid_size # size of one voxel
# Downsample points to grid resolution if there are enough points
if x.shape[0] > args.voxel_downsample_threshold:
print("Downsampling input point cloud to voxel resolution.")
x, n, _ = pcu.downsample_point_cloud_voxel_grid(voxel_size, x.numpy(), n.numpy(),
min_bound=scaled_bbox[0],
max_bound=scaled_bbox[0] + scaled_bbox[1])
x, n = torch.from_numpy(x), torch.from_numpy(n)
# Voxel grid to store the output
out_grid = torch.zeros(*out_grid_size, dtype=torch.float32)
out_mask = torch.zeros(*out_grid_size, dtype=torch.bool)
print(f"Fitting {x.shape[0]} points using {args.cells_per_axis ** 3} cells")
# Iterate over each grid cell
tqdm_bar = tqdm.tqdm(total=args.cells_per_axis ** 3)
current_num_points = 0 # The number of points in this cell (used to log to the tqdm bar)
for cell_idx, cell_vmin, cell_vmax in voxel_chunks(out_grid_size, args.cells_per_axis):
tqdm_bar.set_postfix({"Cell": str(cell_idx), "Num Points": current_num_points})
# Bounding box of the cell in world coordinates
cell_vox_size = cell_vmax - cell_vmin
cell_bbox = scaled_bbox[0] + cell_vmin * voxel_size, cell_vox_size * voxel_size
# If there are no points in this region, then skip it
mask_cell = points_in_bbox(x, cell_bbox)
if mask_cell.sum() <= max(args.min_pts_per_cell, 0):
tqdm_bar.update(1)
continue
# Amount of voxels by which to pad each cell in each direction
cell_pad_vox = torch.round(0.5 * args.overlap * out_grid_size.to(torch.float64) / args.cells_per_axis)
# Minimum and maximum voxel indices of the padded cell
cell_pvmin = torch.maximum(cell_vmin - cell_pad_vox, torch.zeros(3).to(cell_vmin)).to(torch.int32)
cell_pvmax = torch.minimum(cell_vmax + cell_pad_vox, torch.tensor(out_grid.shape).to(cell_vmin)).to(torch.int32)
# Bounding box and point mask for the padded cell
cell_pad_amount = cell_pad_vox * voxel_size
padded_cell_bbox = cell_bbox[0] - cell_pad_amount, cell_bbox[1] + 2.0 * cell_pad_amount
mask_padded_cell = points_in_bbox(x, padded_cell_bbox)
# Center the cell so it lies in [-0.5, 0.5]^3
tx = -padded_cell_bbox[0] - 0.5 * padded_cell_bbox[1], 1.0 / torch.max(padded_cell_bbox[1])
x_cell = x[mask_padded_cell].clone()
n_cell = n[mask_padded_cell].clone()
x_cell = affine_transform_pointcloud(x_cell, tx)
current_num_points = x_cell.shape[0]
tqdm_bar.set_postfix({"Cell": str(cell_idx), "Num Points": current_num_points})
# Cell trilinear blending weights, and index range for which voxels to reconstruct
weights, idxmin, idxmax = get_weights(cell_vmin, cell_vmax, cell_pvmin, cell_pvmax, args.weight_type)
# Finite differencing epsilon in world units
if args.use_abs_units:
eps_world_coords = args.eps
else:
eps_world_coords = args.eps * torch.norm(voxel_size).item()
# Fit the model and evaluate it on the subset of voxels corresponding to this cell
cell_model, _ = fit_model_to_pointcloud(x_cell, n_cell,
num_ny=args.num_nystrom_samples, eps=eps_world_coords,
kernel=args.kernel, reg=args.regularization, ny_mode=args.nystrom_mode,
cg_max_iters=args.cg_max_iters, cg_stop_thresh=args.cg_stop_thresh,
outer_layer_variance=args.outer_layer_variance,
verbosity_level=7 if not args.verbose else 0,
normalize=False)
cell_recon = eval_model_on_grid(cell_model, scaled_bbox, tx, out_grid_size,
cell_vox_min=idxmin, cell_vox_max=idxmax, print_message=False)
w_cell_recon = weights * cell_recon
out_grid[idxmin[0]:idxmax[0], idxmin[1]:idxmax[1], idxmin[2]:idxmax[2]] += w_cell_recon
out_mask[cell_vmin[0]:cell_vmax[0], cell_vmin[1]:cell_vmax[1], cell_vmin[2]:cell_vmax[2]] = True
tqdm_bar.update(1)
out_grid[torch.logical_not(out_mask)] = 1.0
if args.save_grid:
np.savez(args.out + ".grid", grid=out_grid.detach().cpu().numpy(), mask=out_mask.detach().cpu().numpy(),
bbox=[b.numpy() for b in scaled_bbox])
# Erode the mask so we don't get weird boundaries
eroded_mask = binary_erosion(out_mask.numpy().astype(np.bool), np.ones([3, 3, 3]).astype(np.bool))
v, f, n, c = marching_cubes(out_grid.numpy(), level=0.0, mask=eroded_mask, spacing=voxel_size,
gradient_direction='ascent')
v += scaled_bbox[0].numpy() + 0.5 * voxel_size.numpy()
# Possibly trim regions which don't contain samples
if args.trim > 0.0:
# Trim distance in world coordinates
if args.use_abs_units:
trim_dist_world = args.trim
else:
trim_dist_world = args.trim * torch.norm(voxel_size).item()
nn_dist, _ = pcu.k_nearest_neighbors(v, x.numpy(), k=2)
nn_dist = nn_dist[:, 1]
f_mask = np.stack([nn_dist[f[:, i]] < trim_dist_world for i in range(f.shape[1])], axis=-1)
f_mask = np.all(f_mask, axis=-1)
f = f[f_mask]
pcu.save_mesh_vfn(args.out, v, f, n)
if __name__ == "__main__":
main()
| 13,776 | 60.231111 | 120 | py |
neural-splines | neural-splines-main/trim-surface.py | import argparse
import numpy as np
import point_cloud_utils as pcu
import torch
from neural_splines.geometry import point_cloud_bounding_box
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input_points", type=str)
argparser.add_argument("mesh", type=str)
argparser.add_argument("grid_size", type=int,
help="When trimming the mesh, use this many voxels along the longest side of the "
"bounding box. This is used to determine the size of a voxel and "
"hence the units of distance to use. You should set this to the save value you used in "
"fit.py or fit-grid.py")
argparser.add_argument("trim_distance", type=float,
help="Trim vertices of the reconstructed mesh whose nearest "
"point in the input is greater than this value. The units of this argument are voxels "
"(where the cells_per_axis determines the size of a voxel) Default is -1.0.")
argparser.add_argument("--scale", type=float, default=1.1,
help="Pad the bounding box of the input point cloud by a factor if --scale. "
"i.e. the the diameter of the padded bounding box is --scale times bigger than the "
"diameter of the bounding box of the input points. Defaults is 1.1.")
argparser.add_argument("--out", type=str, default="trimmed.ply", help="Path to file to save trim mesh to.")
argparser.add_argument("--use-abs-units", action="store_true",
help="If set, then use absolute units instead of voxel units for the trim distance.")
args = argparser.parse_args()
print(f"Loading input point cloud {args.input_points}")
p = pcu.load_mesh_v(args.input_points)
scaled_bbox = point_cloud_bounding_box(torch.from_numpy(p), args.scale)
out_grid_size = np.round(scaled_bbox[1].numpy() / scaled_bbox[1].max().item() * args.grid_size).astype(np.int32)
voxel_size = scaled_bbox[1] / out_grid_size # size of one voxel
print(f"Loading reconstructed mesh {args.mesh}")
v, f, n = pcu.load_mesh_vfn(args.mesh)
print("Trimming mesh...")
# Trim distance in world coordinates
if args.use_abs_units:
trim_dist_world = args.trim_distance
else:
trim_dist_world = args.trim_distance * torch.norm(voxel_size).item()
nn_dist, _ = pcu.k_nearest_neighbors(v, p, k=2)
nn_dist = nn_dist[:, 1]
f_mask = np.stack([nn_dist[f[:, i]] < trim_dist_world for i in range(f.shape[1])], axis=-1)
f_mask = np.all(f_mask, axis=-1)
f = f[f_mask]
print("Saving trimmed mesh...")
pcu.save_mesh_vfn(args.out, v, f, n)
print("Done!")
if __name__ == "__main__":
main()
| 2,884 | 47.083333 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.