code stringlengths 17 6.64M |
|---|
class BaseNetworkFactory(ABC):
def __init__(self, input_spec: InputSpec, network_config: NetworkConfig, conv_config: Optional[ConvolutionConfig]=None, net_weights: Optional[NetWeights]=None):
if (net_weights is None):
net_weights = NetWeights(x=NetWeight(1.0, 1.0, 1.0), v=NetWeight(1.0, 1.0, 1.0))
self.nw = net_weights
self.input_spec = input_spec
self.network_config = network_config
self.conv_config = conv_config
self.config = {'net_weights': self.nw, 'input_spec': self.input_spec, 'network_config': self.network_config}
if (conv_config is not None):
self.config.update({'conv_config': asdict(self.conv_config)})
def get_build_configs(self):
return {'xnet': {'net_weight': self.nw.x, 'xshape': self.input_spec.xshape, 'input_shapes': self.input_spec.xnet, 'network_config': self.network_config, 'conv_config': self.conv_config}, 'vnet': {'net_weight': self.nw.v, 'xshape': self.input_spec.xshape, 'input_shapes': self.input_spec.vnet, 'network_config': self.network_config}}
@abstractmethod
def build_networks(self, n: int=0, split_xnets: bool=True, group: str='U1'):
'Build Networks.'
pass
|
class TimePotentialSU3(nn.Module):
def __init__(self) -> None:
super(TimePotentialSU3, self).__init__()
self.full_eigdecomp = su3_to_eigs_cdesa
self.deepset = ComplexDeepTimeSet(1, 1, hidden_channels=64)
def forward(self, t: Tensor, x: Tensor) -> Tensor:
x = self.full_eigdecomp(x)
x = x.unsqueeze((- 1))
x = self.deepset(t, x)
return x
|
class SU3TimeEquivariantVectorField(nn.Module):
def __init__(self, func):
super(SU3TimeEquivariantVectorField, self).__init__()
self.func = func
def forward(self, t: Tensor, x: Tensor) -> Tensor:
return torch.autograd.grad(self.func(t, x).squeeze().sum(), x, create_graph=True, retain_graph=True)[0]
|
class AmbientProjNN(nn.Module):
def __init__(self, func):
super(AmbientProjNN, self).__init__()
self.func = func
self.man = SUN()
def forward(self, t: Tensor, x: Tensor) -> Tensor:
self.man.proju(x, self.func(t, x))
|
def linear_activation(x: Tensor) -> Tensor:
return x
|
def get_activation(act_fn: (str | Callable)) -> Callable:
if isinstance(act_fn, Callable):
return act_fn
act_fn = Activation(act_fn)
assert callable(act_fn)
return act_fn
|
def dummy_network(inputs: tuple[(Tensor, Tensor)], training: Optional[bool]=None) -> tuple[(Tensor, Tensor, Tensor)]:
(_, v) = inputs
return (tf.zeros_like(v), tf.zeros_like(v), tf.zeros_like(v))
|
def zero_weights(model: Model) -> Model:
for layer in model.layers:
if isinstance(layer, Model):
zero_weights(layer)
else:
weights = layer.get_weights()
zeros = []
for w in weights:
log.info(f'Zeroing layer: {layer}')
zeros.append(np.zeros_like(w))
layer.set_weights(zeros)
return model
|
class ScaledTanh(Layer):
def __init__(self, out_features: int, name: Optional[str], **kwargs) -> None:
super(ScaledTanh, self).__init__(name=name, **kwargs)
self.out_features = out_features
self.dense = Dense(out_features)
def get_layer_weights(self) -> dict:
return {'coeff': self.coeff, 'dense/weight': self.dense.weights}
def get_weights_dict(self, sep: str='/', name: Optional[str]=None) -> dict:
name = (self.name if (name is None) else name)
weights = self.get_layer_weights()
return {sep.join([name, k]): v for (k, v) in weights.items()}
def get_config(self):
config = super(ScaledTanh, self).get_config()
config.update({'out_features': self.out_features})
def build(self, input_shape):
'Create the state of the layer (weights).'
self.coeff = self.add_weight(name='coeff', shape=(1, self.out_features), initializer='zeros', trainable=True)
def call(self, x):
return (tf.math.exp(self.coeff) * tf.math.tanh(self.dense(x)))
|
class ConvStack(Layer):
def __init__(self, xshape: Sequence[int], conv_config: ConvolutionConfig, activation_fn: (str | Callable), use_batch_norm: bool=False, name: Optional[str]=None, **kwargs) -> None:
super(ConvStack, self).__init__(name=name, **kwargs)
self.conv_config = conv_config
self.use_batch_norm = use_batch_norm
if (len(xshape) == 3):
(d, *latvol) = (xshape[0], *xshape[1:])
elif (len(xshape) == 4):
(_, d, *latvol) = (xshape[0], xshape[1], *xshape[2:])
elif (len(xshape) == 8):
d = xshape[1]
latvol = xshape[2:(- 2)]
else:
raise ValueError(f'Invalid value for xshape: {xshape}')
self.d = d
self.latvol = latvol
self.xdim = (d * np.cumprod(latvol)[(- 1)])
self.xshape = xshape
self.activation_fn = get_activation(activation_fn)
self.flatten = Flatten()
self.conv_layers = []
self._layers_names = []
idx = 0
if ((filters := getattr(conv_config, 'filters', None)) is not None):
if ((nfilters := len(filters)) > 0):
if ((conv_config.sizes is not None) and (nfilters == len(conv_config.sizes))):
for (idx, (f, n)) in enumerate(zip(filters, conv_config.sizes)):
self.conv_layers.append(PeriodicPadding((n - 1)))
cname = f'Conv2D-{idx}'
self._layers_names.append(cname)
self.conv_layers.append(Conv2D(filters=f, kernel_size=n, activation=self.activation_fn, name=cname))
if (((idx + 1) % 2) == 0):
p = (2 if (conv_config.pool is None) else conv_config.pool[idx])
self.conv_layers.append(MaxPooling2D((p, p), name=f'{name}/xPool{idx}'))
self.batch_norm = None
if use_batch_norm:
self.batch_norm = BatchNormalization((- 1))
self.output_layer = Dense(self.xdim, activation=self.activation_fn)
def get_layer_weights(self) -> dict:
weights = {}
for (name, layer) in zip(self._layers_names, self.conv_layers):
lweights = getattr(layer, 'weights', [])
if (len(lweights) > 0):
weights.update({f'{name}/weight': lweights})
(w, b) = self.output_layer.weights
if (self.batch_norm is not None):
assert isinstance(self.batch_norm, tf.keras.layers.BatchNorm)
(g, b, m, s) = self.batch_norm.weights
pre = 'batch_norm'
weights.update({f'{pre}/gamma': g, f'{pre}/beta': b, f'{pre}/moving_avg': m, f'{pre}/moving_std': s})
weights.update({'DenseOutput.weight': w, 'DenseOutput.bias': b})
return weights
def get_weights_dict(self, sep: str='/', name: Optional[str]=None) -> dict:
name = (self.name if (name is None) else name)
weights = self.get_layer_weights()
return {sep.join([name, k]): v for (k, v) in weights.items()}
def get_config(self):
config = super(ConvStack, self).get_config()
config.update({'xshape': self.xshape, 'conv_config': self.conv_config, 'activation_fn': self.activation_fn, 'use_batch_norm': self.use_batch_norm})
def call(self, x: Tensor, training: Optional[bool]=None) -> Tensor:
if (x.shape != self.xshape):
if (len(x.shape) == 2):
try:
x = tf.reshape(x, (x.shape[0], (self.d + 2), *self.latvol))
except ValueError:
x = tf.reshape(x, (x.shape[0], self.d, *self.latvol))
if (x.shape[1] in [self.d, (self.d + 2)]):
x = tf.transpose(x, (0, 2, 3, 1))
for layer in self.conv_layers:
x = layer(x)
x = self.flatten(x)
if (self.batch_norm is not None):
x = self.batch_norm(x, training=training)
x = self.output_layer(x)
return x
|
class InputLayer(Model):
def __init__(self, xshape: Sequence[int], network_config: NetworkConfig, activation_fn: (str | Callable[([Tensor], Tensor)]), conv_config: Optional[ConvolutionConfig]=None, input_shapes: Optional[dict[(str, (int | Sequence[int]))]]=None, name: Optional[str]=None) -> None:
super(InputLayer, self).__init__(name=name)
self.xshape = xshape
self.activation_fn = get_activation(activation_fn)
self.net_config = network_config
self.units = self.net_config.units
self.xdim = np.cumprod(xshape[1:])[(- 1)]
self.flatten = Flatten()
if (input_shapes is None):
input_shapes = {'x': self.xdim, 'v': self.xdim}
self.input_shapes = {}
for (key, val) in input_shapes.items():
if isinstance(val, (list, tuple)):
self.input_shapes[key] = np.cumprod(val)[(- 1)]
elif isinstance(val, int):
self.input_shapes[key] = val
else:
raise ValueError(f'''Unexpected value in input_shapes!
input_shapes: {input_shapes}
val: {val}''')
self.conv_stack = None
self.conv_config = conv_config
filters = getattr(conv_config, 'filters', [])
if ((conv_config is not None) and (len(filters) > 0)):
self.conv_stack = ConvStack(xshape=xshape, conv_config=conv_config, activation_fn=self.activation_fn)
self.xlayer = Dense(self.net_config.units[0])
self.vlayer = Dense(self.net_config.units[0])
def get_weights_dict(self) -> dict:
weights = {}
filters = getattr(self.conv_config, 'filters', [])
if ((self.conv_config is not None) and (len(filters) > 0) and (self.conv_stack is not None)):
if (self.conv_stack is not None):
wd = self.conv_stack.get_weights_dict()
weights.update({f'{self.name}/ConvStack': wd})
(xw, xb) = self.xlayer.weights
(vw, vb) = self.vlayer.weights
weights.update({f'{self.name}/xlayer/w': xw, f'{self.name}/xlayer/b': xb, f'{self.name}/vlayer/w': vw, f'{self.name}/vlayer/b': vb})
return weights
def call(self, inputs: tuple[(Tensor, Tensor)]) -> Tensor:
(x, v) = inputs
if (self.conv_stack is not None):
x = self.conv_stack(x)
v = self.vlayer(self.flatten(v))
x = self.xlayer(self.flatten(x))
assert ((x is not None) and (v is not None))
return self.activation_fn((x + v))
|
class LeapfrogLayer(Model):
def __init__(self, xshape: Sequence[int], network_config: NetworkConfig, group: Optional[((U1Phase | SU3) | str)]=None, input_shapes: Optional[dict[(str, (int | Sequence[int]))]]=None, net_weight: Optional[NetWeight]=None, conv_config: Optional[ConvolutionConfig]=None, name: Optional[str]=None):
super(LeapfrogLayer, self).__init__(name=name)
if (net_weight is None):
net_weight = NetWeight(1.0, 1.0, 1.0)
self.xshape = xshape
self.nw = net_weight
self.net_config = network_config
self.xdim = np.cumprod(xshape[1:])[(- 1)]
act_fn = get_activation(self.net_config.activation_fn)
self.activation_fn = act_fn
self.g = group
if (group is not None):
if isinstance(group, str):
if (group.lower() in ['u1', 'u1phase']):
self.g = U1Phase()
elif (group.lower() == 'su3'):
self.g = SU3()
else:
raise ValueError(f'Unexpected str {group} for group')
self.input_layer = InputLayer(xshape=xshape, network_config=network_config, activation_fn=self.activation_fn, conv_config=conv_config, input_shapes=input_shapes)
self.units = self.net_config.units
self.hidden_layers = []
for (idx, units) in enumerate(self.units[1:]):
h = Dense(units, name=f'hidden.{idx}')
self.hidden_layers.append(h)
self.scale = ScaledTanh(self.xdim, name='scale')
self.transf = ScaledTanh(self.xdim, name='transf')
self.transl = Dense(self.xdim, name='transl')
self.dropout = None
if (self.net_config.dropout_prob > 0.0):
self.dropout = Dropout(self.net_config.dropout_prob)
self.batch_norm = None
if self.net_config.use_batch_norm:
self.batch_norm = BatchNormalization((- 1), name=f'{name}_batchnorm')
def get_layer_weights(self) -> dict:
weights = {}
weights.update({'input_layer': self.input_layer.get_weights_dict()})
for (idx, layer) in enumerate(self.hidden_layers):
(w, b) = layer.weights
weights.update({})
weights[f'hidden_layers.{idx}.weight'] = w
weights[f'hidden_layers.{idx}.bias'] = b
weights.update({'scale': self.scale.get_weights_dict()})
(tw, tb) = self.transl.weights
weights.update({'transl.weight': tw, 'transl.bias': tb})
weights.update({'transf': self.transf.get_weights_dict()})
return weights
def get_weights_dict(self, sep: str='/', name: Optional[str]=None) -> dict:
name = (self.name if (name is None) else name)
weights = self.get_layer_weights()
return {sep.join([name, k]): v for (k, v) in weights.items()}
def set_net_weight(self, net_weight: NetWeight):
self.nw = net_weight
def call(self, inputs: tuple[(Tensor, Tensor)], training: bool=False) -> tuple[(Tensor, Tensor, Tensor)]:
z = self.input_layer(inputs)
for layer in self.hidden_layers:
z = self.activation_fn(layer(z))
if ((self.net_config.dropout_prob > 0.0) and (self.dropout is not None)):
z = self.dropout(z, training=training)
if (self.net_config.use_batch_norm and (self.batch_norm is not None)):
z = self.batch_norm(z, training=training)
s = tf.scalar_mul(self.nw.s, self.scale(z))
t = tf.scalar_mul(self.nw.t, self.transl(z))
q = tf.scalar_mul(self.nw.q, self.transf(z))
return (tf.cast(s, TF_FLOAT), tf.cast(t, TF_FLOAT), tf.cast(q, TF_FLOAT))
|
class NetworkFactory(BaseNetworkFactory):
def build_xnet(self, group: (SU3 | U1Phase), name: Optional[str]=None) -> LeapfrogLayer:
xname = ('xnet' if (name is None) else f'xnet/{name}')
return LeapfrogLayer(xshape=self.input_spec.xshape, network_config=self.network_config, group=group, input_shapes=self.input_spec.xnet, net_weight=self.nw.x, conv_config=self.conv_config, name=xname)
def build_vnet(self, group: (SU3 | U1Phase), name: Optional[str]=None) -> LeapfrogLayer:
vname = ('vnet' if (name is None) else f'vnet/{name}')
return LeapfrogLayer(xshape=self.input_spec.xshape, network_config=self.network_config, group=group, input_shapes=self.input_spec.vnet, net_weight=self.nw.v, conv_config=self.conv_config, name=vname)
def build_networks(self, n: int, split_xnets: bool, group: (U1Phase | SU3)) -> dict:
'Build LeapfrogNetwork.'
assert (n >= 1), 'Must build at least one network'
cfg = self.get_build_configs()
if (n == 1):
return {'xnet': LeapfrogLayer(**cfg['xnet'], name='xnet', group=group), 'vnet': LeapfrogLayer(**cfg['vnet'], name='vnet', group=group)}
xnet = {}
vnet = {}
for lf in range(n):
vnet[f'{lf}'] = LeapfrogLayer(**cfg['vnet'], name=f'vnet/{lf}', group=group)
if split_xnets:
xnet[f'{lf}'] = {'first': LeapfrogLayer(**cfg['xnet'], name=f'xnet/{lf}/first', group=group), 'second': LeapfrogLayer(**cfg['xnet'], name=f'xnet/{lf}/second', group=group)}
else:
xnet[f'{lf}'] = LeapfrogLayer(**cfg['xnet'], name=f'xnet/{lf}', group=group)
return {'xnet': xnet, 'vnet': vnet}
|
def savefig(fig: plt.Figure, fname: str, outdir: os.PathLike):
pngfile = Path(outdir).joinpath(f'pngs/{fname}.png')
svgfile = Path(outdir).joinpath(f'svgs/{fname}.svg')
pngfile.parent.mkdir(exist_ok=True, parents=True)
svgfile.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(svgfile, transparent=True, bbox_inches='tight')
fig.savefig(pngfile, transparent=True, bbox_inches='tight', dpi=300)
|
def plot_metrics(metrics: dict, title: Optional[str]=None, **kwargs):
outdir = Path(f'./plots-4dSU3/{title}')
outdir.mkdir(exist_ok=True, parents=True)
for (key, val) in metrics.items():
(fig, ax) = plot_metric(val, name=key, **kwargs)
if (title is not None):
ax.set_title(title)
console.log(f'Saving {key} to {outdir}')
savefig(fig, f'{key}', outdir=outdir)
plt.show()
|
def plot_metric(metric: torch.Tensor, name: Optional[str]=None, **kwargs):
assert (len(metric) > 0)
if isinstance(metric[0], (int, float, bool, np.floating)):
y = np.stack(metric)
return plot_scalar(y, ylabel=name, **kwargs)
element_shape = metric[0].shape
if (len(element_shape) == 2):
y = grab_tensor(torch.stack(metric))
return plot_leapfrogs(y, ylabel=name)
if (len(element_shape) == 1):
y = grab_tensor(torch.stack(metric))
return plot_chains(y, ylabel=name, **kwargs)
if (len(element_shape) == 0):
y = grab_tensor(torch.stack(metric))
return plot_scalar(y, ylabel=name, **kwargs)
raise ValueError
|
def savefig(fig: plt.Figure, fname: str, outdir: os.PathLike):
pngfile = Path(outdir).joinpath(f'pngs/{fname}.png')
svgfile = Path(outdir).joinpath(f'svgs/{fname}.svg')
pngfile.parent.mkdir(exist_ok=True, parents=True)
svgfile.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(svgfile, transparent=True, bbox_inches='tight')
fig.savefig(pngfile, transparent=True, bbox_inches='tight', dpi=300)
|
def plot_metrics(metrics: dict, title: Optional[str]=None, **kwargs):
outdir = Path(f'./plots-4dSU3/{title}')
outdir.mkdir(exist_ok=True, parents=True)
for (key, val) in metrics.items():
(fig, ax) = plot_metric(val, name=key, **kwargs)
if (title is not None):
ax.set_title(title)
console.log(f'Saving {key} to {outdir}')
savefig(fig, f'{key}', outdir=outdir)
plt.show()
|
def plot_metric(metric: torch.Tensor, name: Optional[str]=None, **kwargs):
assert (len(metric) > 0)
if isinstance(metric[0], (int, float, bool, np.floating)):
y = np.stack(metric)
return plot_scalar(y, ylabel=name, **kwargs)
element_shape = metric[0].shape
if (len(element_shape) == 2):
y = grab_tensor(torch.stack(metric))
return plot_leapfrogs(y, ylabel=name)
if (len(element_shape) == 1):
y = grab_tensor(torch.stack(metric))
return plot_chains(y, ylabel=name, **kwargs)
if (len(element_shape) == 0):
y = grab_tensor(torch.stack(metric))
return plot_scalar(y, ylabel=name, **kwargs)
raise ValueError
|
def log_dict(writer: SummaryWriter, d: dict, step: Optional[int]=None, prefix: Optional[str]=None, nchains: Optional[int]=None) -> None:
'Create TensorBoard summaries for all items in `d`'
for (key, val) in d.items():
pre = (key if (prefix is None) else f'{prefix}/{key}')
if isinstance(val, dict):
log_dict(writer=writer, d=val, step=step, prefix=pre, nchains=nchains)
else:
log_item(writer=writer, val=val, step=step, tag=pre, nchains=nchains)
|
def log_dict_wandb(d: dict, step: Optional[int]=None, prefix: Optional[str]=None, commit: bool=True) -> None:
'Create WandB summaries for all items in `d`'
if ((prefix is not None) and (step is not None)):
d |= {f'{prefix}/iter': step}
wandb.log((d if (prefix is None) else {f'{prefix}/{k}': v for (k, v) in d.items()}), commit=commit)
|
def log_list(writer: SummaryWriter, x: list, prefix: str, step: Optional[int]=None, nchains: Optional[int]=None) -> None:
'Create TensorBoard summaries for all entries in `x`'
for t in x:
name = getattr(t, 'name', getattr(t, '__name__', None))
tag = (name if (prefix is None) else f'{prefix}/{name}')
assert (tag is not None)
log_item(writer=writer, val=t, step=step, tag=tag, nchains=nchains)
|
def log_step(tag: str, step: int, writer: SummaryWriter) -> None:
iter_tag = '/'.join(([tag.split('/')[0]] + ['iter']))
writer.add_scalar(tag=iter_tag, scalar_value=step, global_step=step)
|
def check_tag(tag: str) -> str:
tags = tag.split('/')
return ('/'.join(tags[1:]) if ((len(tags) > 2) and (tags[0] == tags[1])) else tag)
|
def log_item(tag: str, val: (((((float | int) | bool) | list) | np.ndarray) | torch.Tensor), writer: SummaryWriter, step: Optional[int]=None, nchains: Optional[int]=None) -> None:
if (step is not None):
log_step(tag, step, writer)
tag = check_tag(tag)
if isinstance(val, (Tensor, Array)):
if ((nchains is not None) and (len(val.shape) > 0)):
val = val[:nchains]
if ((isinstance(val, Tensor) and torch.is_complex(val)) or (isinstance(val, Array) and np.iscomplexobj(val))):
log_item(tag=f'{tag}/real', val=val.real, writer=writer, step=step)
log_item(tag=f'{tag}/imag', val=val.imag, writer=writer, step=step)
elif (len(val.shape) > 0):
writer.add_scalar(f'{tag}/avg', val.mean(), global_step=step)
val = (val[:nchains] if ((len(val.shape) > 0) and (nchains is not None)) else val)
if (len(val.shape) > 0):
if (nchains is not None):
val = val[:nchains]
try:
writer.add_histogram(tag=tag, values=val, global_step=step)
except ValueError:
log.error(f'Error adding histogram for: {tag}')
else:
writer.add_scalar(tag, val, global_step=step)
elif isinstance(val, list):
log_list(writer=writer, x=val, step=step, prefix=tag)
elif (isinstance(val, (float, int, bool, np.floating)) or (len(val.shape) == 0)):
writer.add_scalar(tag=tag, scalar_value=val, global_step=step)
else:
log.warning(f'Unexpected type encountered for: {tag}')
log.warning(f'{tag}.type: {type(val)}')
|
def as_tensor(x: ((torch.Tensor | list) | None), grab: bool=False, nchains: Optional[int]=None) -> (((torch.Tensor | None) | np.ndarray) | Scalar):
if (x is None):
return x
if (nchains is not None):
try:
x = x[:nchains]
except Exception:
pass
if isinstance(x, torch.Tensor):
x = torch.nan_to_num(x)
if isinstance(x, list):
x = torch.stack(x)
return (grab_tensor(x) if grab else x)
|
def log_params_and_grads(model: nn.Module, step: Optional[int]=None, with_grads: bool=True, nchains: Optional[int]=None) -> None:
if (wandb.run is None):
return
params = {f'params/{k}': as_tensor(v, nchains=nchains) for (k, v) in model.named_parameters()}
grads = {}
if with_grads:
grads = {f'grads/{k}/grad': as_tensor(v.grad, nchains=nchains) for (k, v) in model.named_parameters()}
if (step is not None):
step_ = torch.tensor(step)
params |= {'params/iter': step_}
grads |= {'grads/iter': step_}
wandb.log(params, commit=False)
try:
wandb.log(grads)
except Exception:
log.critical('Failed to `wandb.log(grads)` ')
|
def update_summaries(writer: SummaryWriter, step: Optional[int]=None, metrics: Optional[dict[(str, ArrayLike)]]=None, model: Optional[torch.nn.Module]=None, prefix: str='', with_grads: bool=True, use_tb: bool=True, use_wandb: bool=True, nchains: int=8, optimizer: Optional[torch.optim.Optimizer]=None) -> None:
if (metrics is not None):
if use_tb:
with StopWatch(iter=step, wbtag=f'tblogdict/{prefix}', msg=f'`log_dict(prefix={prefix}, nchains={nchains})`', prefix='TrackingTimers/', log_output=False):
log_dict(writer=writer, d=metrics, step=step, prefix=prefix, nchains=nchains)
if use_wandb:
with StopWatch(iter=step, wbtag=f'wblogdict/{prefix}', msg=f'`log_dict_wandb(prefix={prefix})`', prefix='TrackingTimers/', log_output=False):
metrics = ({f'{prefix}/wb/{k}': v for (k, v) in metrics.items()} if use_tb else {f'{prefix}/{k}': v for (k, v) in metrics.items()})
log_dict_wandb(metrics, step)
assert (isinstance(step, int) if (step is not None) else None)
if (model is not None):
if use_wandb:
with StopWatch(iter=step, wbtag=f'wblogwng/{prefix}', msg='`log_params_and_grads()`', prefix='TrackingTimers/', log_output=False):
log_params_and_grads(model=model, step=step, with_grads=with_grads, nchains=nchains)
if use_tb:
with StopWatch(iter=step, msg='`log_dict(grads)`', prefix='TrackingTimers/', wbtag=f'tblogwng/{prefix}', log_output=False):
params = {f'model/{k}': (as_tensor(v, grab=True, nchains=nchains) if v.requires_grad else None) for (k, v) in model.named_parameters()}
log_dict(writer=writer, d=params, step=step, nchains=nchains)
if with_grads:
grads = {f'grads-wb/{k}': (as_tensor(v.grad, grab=True, nchains=nchains) if v.requires_grad else None) for (k, v) in model.named_parameters()}
log_dict(writer=writer, d=grads, step=step, nchains=nchains)
|
def log_step(tag: str, step: int) -> None:
iter_tag = '/'.join(([tag.split('/')[0]] + ['iter']))
tf.summary.scalar(iter_tag, step, step=step)
|
def check_tag(tag: str) -> str:
tags = tag.split('/')
return ('/'.join(tags[1:]) if ((len(tags) > 2) and (tags[0] == tags[1])) else tag)
|
def log_item(tag: str, val: ((((((float | int) | bool) | list) | np.ndarray) | tf.Tensor) | None), step: Optional[int]=None):
if (val is None):
return
if (step is not None):
log_step(tag, step)
tag = check_tag(tag)
if isinstance(val, (Tensor, Array)):
if ((isinstance(val, Tensor) and (val.dtype in tfComplex)) or (isinstance(val, Array) and np.iscomplexobj(val))):
log_item(tag=f'{tag}.real', val=tf.math.real(val), step=step)
log_item(tag=f'{tag}.imag', val=tf.math.imag(val), step=step)
elif (hasattr(val, 'shape') and (len(getattr(val, 'shape', [])) > 0)):
tf.summary.scalar(f'{tag}/avg', tf.reduce_mean(val), step=step)
tf.summary.histogram(tag, val, step=step)
else:
tf.summary.scalar(tag, val, step=step)
elif isinstance(val, list):
log_list(val, step=step, prefix=tag)
elif (isinstance(val, (float, int, np.floating, np.integer, bool)) or (len(val.shape) == 0)):
tf.summary.scalar(tag, val, step=step)
|
def log_dict(d: ((dict | DefaultDict) | Mapping), step: int, prefix: Optional[str]=None):
'Create tensorboard summaries for all items in `d`'
for (key, val) in d.items():
pre = (key if (prefix is None) else f'{prefix}/{key}')
if isinstance(val, dict):
log_dict(val, step=step, prefix=pre)
elif isinstance(val, list):
log_list(val, step, prefix=prefix)
else:
log_item(pre, val, step=step)
|
def log_list(x, step, prefix: Optional[str]=None):
for (idx, t) in enumerate(x):
name = getattr(t, 'name', getattr(t, '__name__', None))
if (name is None):
name = f'{idx}'
tag = (name if (prefix is None) else f'{prefix}/{name}')
assert (tag is not None)
log_item(tag, t, step=step)
|
def log_model_weights1(step: int, model: (tf.keras.Model | tf.keras.layers.Layer), prefix: Optional[str]=None):
prefix = (f'model/{prefix}' if (prefix is not None) else 'model')
name = getattr(model, 'name', None)
if (name is not None):
prefix += f'/{name}'
log_list(model.trainable_variables, step=step, prefix=f'{prefix}/trainable_vars')
layers = getattr(model, 'layers', [])
if (len(layers) > 0):
for layer in model.layers:
weights = layer.get_weights()
log_list(weights, step=step, prefix=f'{prefix}/{layer.name}.weights')
else:
weights = model.get_weights()
log_list(weights, step=step, prefix=f'{prefix}/{model.name}.weights')
|
def log_model_weights(step: int, model: (tf.keras.Model | tf.keras.layers.Layer), prefix: Optional[str]=None, sep: Optional[str]=None):
weights = model.weights
wdict = {w.name: w for w in weights}
if (sep is not None):
wdict.update({k.replace('/', sep): v for (k, v) in wdict.items()})
log_dict(wdict, step, prefix=prefix)
|
def format_weight_name(name: str) -> str:
return name.rstrip(':0').replace('kernel', 'weight')
|
def update_summaries(step: int, metrics: Optional[dict[(str, Tensor)]]=None, model: Optional[Model]=None, optimizer: Optional[Optimizer]=None, prefix: Optional[str]=None) -> None:
if ((metrics is not None) and isinstance(metrics, dict)):
log_dict(metrics, step, prefix=prefix)
if (model is not None):
weights = {format_weight_name(w.name): w for w in model.weights}
assert isinstance(weights, (dict, DefaultDict, Mapping))
log_dict(flatten_dict(weights), step=step, prefix='model')
if (optimizer is not None):
ostr = 'optimizer'
opre = (ostr if (prefix is None) else '/'.join([ostr, prefix]))
lpre = ('lr' if (prefix is None) else '/'.join(['lr', prefix]))
tf.summary.scalar(lpre, K.get_value(optimizer.lr), step=step)
log_list(optimizer.variables(), step=step, prefix=opre)
|
def update_summaries1(step: int, metrics: Optional[dict]=None, model: Optional[(Model | Layer)]=None, weights: Optional[dict]=None, optimizer: Optional[Optimizer]=None, prefix: Optional[str]=None, job_type: Optional[str]=None, sep: Optional[str]=None):
'"Create summary objects.'
if (metrics is not None):
log_dict(metrics, step, prefix=job_type)
if (weights is not None):
log.info('Caught weights!')
if isinstance(weights, dict):
log_dict(weights, step=step, prefix=prefix)
if (model is not None):
weights = {w.name: w for w in model.weights}
if (weights != {}):
log_dict(weights, step, prefix='network')
if isinstance(model, Model):
log_model_weights(step=step, model=model, sep=sep, prefix=prefix)
if (optimizer is not None):
ostr = 'optimizer'
opre = (ostr if (prefix is None) else '/'.join([ostr, prefix]))
lpre = ('lr' if (prefix is None) else '/'.join(['lr', prefix]))
tf.summary.scalar(lpre, K.get_value(optimizer.lr), step=step)
log_list(optimizer.variables(), step=step, prefix=opre)
|
def savefig(fig: plt.Figure, fname: str, outdir: os.PathLike):
pngfile = Path(outdir).joinpath(f'pngs/{fname}.png')
svgfile = Path(outdir).joinpath(f'svgs/{fname}.svg')
pngfile.parent.mkdir(exist_ok=True, parents=True)
svgfile.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(svgfile, transparent=True, bbox_inches='tight')
fig.savefig(pngfile, transparent=True, bbox_inches='tight', dpi=300)
|
def HMC(experiment: Experiment, nsteps: int=10, beta: float=1.0, nlog: int=1, nprint: int=1, x: Optional[torch.Tensor]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None) -> tuple[(torch.Tensor, BaseHistory)]:
'Run HMC on `experiment`'
history_hmc = BaseHistory()
if (x is None):
state = experiment.trainer.dynamics.random_state(beta=beta)
x = state.x
for step in range(nsteps):
tic = time.perf_counter()
(x, metrics_) = experiment.trainer.hmc_step((x, beta), eps=eps, nleapfrog=nleapfrog)
toc = time.perf_counter()
metrics = {'hmc_step': step, 'dt': (toc - tic), **metrics_}
if (((step % nlog) == 0) or ((step % nprint) == 0)):
avgs = history_hmc.update(metrics)
if ((step % nprint) == 0):
summary = summarize_dict(avgs)
log.info(summary)
xhmc = experiment.trainer.dynamics.unflatten(x)
log.info(f'checkSU(x_hmc): {g.checkSU(xhmc)}')
history_hmc.plot_all(outdir=HMC_DIR)
return (xhmc, history_hmc)
|
def eval(experiment: Experiment, nsteps: int=10, beta: float=1.0, nlog: int=1, nprint: int=2, x: Optional[torch.Tensor]=None) -> tuple[(torch.Tensor, BaseHistory)]:
'Run eval on `experiment`'
history_eval = BaseHistory()
if (x is None):
state = experiment.trainer.dynamics.random_state(beta=beta)
x = state.x
for step in range(nsteps):
tic = time.perf_counter()
(x, metrics_) = experiment.trainer.eval_step((x, beta))
toc = time.perf_counter()
metrics = {'eval_step': step, 'dt': (toc - tic), **metrics_}
if (((step % nlog) == 0) or ((step % nprint) == 0)):
avgs = history_eval.update(metrics)
if ((step % nprint) == 0):
summary = summarize_dict(avgs)
log.info(summary)
xeval = experiment.trainer.dynamics.unflatten(x)
log.info(f'checkSU(x_hmc): {g.checkSU(xeval)}')
history_eval.plot_all(outdir=EVAL_DIR)
return (xeval, history_eval)
|
def main() -> tuple[(torch.Tensor, dict[(str, BaseHistory)])]:
plt.style.use(opinionated.STYLES['opinionated_min'])
su3conf = Path('./conf/su3-min.yaml')
with su3conf.open('r') as stream:
conf = dict(yaml.safe_load(stream))
log.info(json.dumps(conf, indent=4))
overrides = dict_to_list_of_overrides(conf)
ptExpSU3 = get_experiment(overrides=[*overrides], build_networks=True)
state = ptExpSU3.trainer.dynamics.random_state(6.0)
assert isinstance(state.x, torch.Tensor)
assert isinstance(state.beta, torch.Tensor)
assert isinstance(ptExpSU3, Experiment)
(xhmc, history_hmc) = HMC(nsteps=10, experiment=ptExpSU3, beta=state.beta.item(), x=state.x, eps=0.1, nleapfrog=1, nlog=1, nprint=2)
(xeval, history_eval) = eval(nsteps=10, experiment=ptExpSU3, beta=6.0, x=state.x, nlog=1, nprint=1)
history_train = BaseHistory()
x = state.x
for step in range(50):
tic = time.perf_counter()
(x, metrics_) = ptExpSU3.trainer.train_step((x, state.beta))
toc = time.perf_counter()
metrics = {'train_step': step, 'dt': (toc - tic), **metrics_}
avgs = history_train.update(metrics)
summary = summarize_dict(avgs)
log.info(summary)
history_train.plot_all(outdir=TRAIN_DIR)
return (x, {'train': history_train, 'eval': history_eval, 'hmc': history_hmc})
|
def grab(x: Tensor) -> np.ndarray:
return x.detach().cpu().numpy()
|
def load_ds_config(fpath: os.PathLike) -> dict:
ds_config_path = Path(fpath)
log.info(f'Loading DeepSpeed Config from: {ds_config_path.as_posix()}')
if (ds_config_path.suffix == '.json'):
with ds_config_path.open('r') as f:
ds_config = json.load(f)
return ds_config
if (ds_config_path.suffix == '.yaml'):
import yaml
with ds_config_path.open('r') as stream:
ds_config = dict(yaml.safe_load(stream))
return ds_config
raise TypeError('Unexpected FileType')
|
def box_header(header: str):
headerlen = (len(header) + 2)
log.info((('┏' + (headerlen * '━')) + '┓'))
log.info(f'┃ {header} ┃')
log.info((('┗' + (headerlen * '━')) + '┛'))
|
class Trainer(BaseTrainer):
def __init__(self, cfg: (DictConfig | ExperimentConfig), build_networks: bool=True, ckpt_dir: Optional[os.PathLike]=None, keep: Optional[(str | Sequence[str])]=None, skip: Optional[(str | Sequence[str])]=None) -> None:
super().__init__(cfg=cfg, keep=keep, skip=skip)
assert (self.config.dynamics.group.upper() in ['U1', 'SU3'])
if (self.config.dynamics.group == 'U1'):
self.g = U1Phase()
elif (self.config.dynamics.group == 'SU3'):
self.g = SU3()
else:
raise ValueError
self.config: ExperimentConfig = instantiate(cfg)
self.clip_norm = self.config.learning_rate.clip_norm
self._lr_warmup = torch.linspace(self.config.learning_rate.min_lr, self.config.learning_rate.lr_init, (2 * self.steps.nepoch))
self.dtype = PT_DTYPES.get(self.config.precision, None)
assert (self.dtype is not None)
dsetup: dict = setup_torch_distributed(self.config.backend)
self.size: int = dsetup['size']
self.rank: int = dsetup['rank']
self.local_rank: int = dsetup['local_rank']
self._is_orchestrator: bool = ((self.local_rank == 0) and (self.rank == 0))
self._with_cuda: bool = torch.cuda.is_available()
self._dtype = self.dtype
self.device: str = ('cuda' if torch.cuda.is_available() else 'cpu')
self.warning(f'Using {self.dtype} on {self.device}!')
self.lattice = self.build_lattice()
self.loss_fn = self.build_loss_fn()
self.dynamics: Dynamics = self.build_dynamics(build_networks=build_networks)
self.ckpt_dir: Path = (Path(CHECKPOINTS_DIR).joinpath('checkpoints') if (ckpt_dir is None) else Path(ckpt_dir).resolve())
self.ckpt_dir.mkdir(exist_ok=True, parents=True)
self._fstep = 0
self._bstep = 0
self._gstep = 0
self._estep = 0
self._hstep = 0
if self.config.restore:
output: dict = self.load_ckpt()
self.dynamics: Dynamics = output['dynamics']
ckpt: dict = output['ckpt']
self._gstep: int = ckpt.get('gstep', ckpt.get('step', 0))
if self._is_orchestrator:
self.warning(f'Restoring global step from ckpt! self._gstep: {self._gstep}')
else:
self._gstep: int = 0
self.warning('Using `torch.optim.Adam` optimizer')
self._optimizer = torch.optim.Adam(self.dynamics.parameters(), lr=self.config.learning_rate.lr_init)
self.num_params = self.count_parameters(self.dynamics)
self.autocast_context_train = torch.autocast(dtype=self._dtype, device_type=self.device, enabled=((self._dtype != torch.float64) and (self.device != 'cpu')))
self.ds_config = {}
self.grad_scaler = None
self.dynamics_engine = None
if (self.config.backend == 'DDP'):
from torch.nn.parallel import DistributedDataParallel as DDP
self.optimizer = self._optimizer
find_unused_parameters = (str(self.config.dynamics.group).lower() == 'su3')
self.dynamics_engine = DDP(self.dynamics, find_unused_parameters=find_unused_parameters)
if (self._dtype != torch.float64):
self.grad_scaler = GradScaler()
elif (self.config.backend.lower() in ['ds', 'deepspeed']):
self._setup_deepspeed()
elif (self.config.backend.lower() in ['hvd', 'horovod']):
self._setup_horovod()
else:
self.optimizer = self._optimizer
logfreq = self.config.steps.log
log.warning(f'logging with freq {logfreq} for wandb.watch')
if (self.config.use_wandb and (wandb.run is not None)):
wandb.run.watch(self.dynamics, log='all', log_freq=logfreq)
assert (isinstance(self.dynamics, Dynamics) and isinstance(self.dynamics, nn.Module) and (str(self.config.dynamics.group).upper() in {'U1', 'SU3'}))
def count_parameters(self, model: Optional[nn.Module]=None) -> int:
'Count the total number of parameters in `model`.'
model = (self.dynamics if (model is None) else model)
num_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
log.info(f'num_params in model: {num_params}')
if (self.config.init_wandb and (wandb.run is not None)):
wandb.run.config['NUM_PARAMS'] = num_params
return num_params
def _setup_deepspeed(self) -> None:
self.ds_config = self.prepare_ds_config()
if (self.dtype == torch.bfloat16):
log.warning('Using `bf16` in DeepSpeed config...')
self.ds_config |= {'bf16': {'enabled': True}}
self.dynamics = self.dynamics.to(torch.bfloat16)
if (self.dtype == torch.float16):
log.warning('Using `fp16` in DeepSpeed config...')
self.ds_config |= {'fp16': {'enabled': True}}
self.dynamics = self.dynamics.to(torch.float16)
if (self.rank == 0):
print_json(json.dumps(self.ds_config, indent=4))
if ('optimizer' in self.ds_config.items()):
(engine, optimizer, *_) = deepspeed.initialize(model=self.dynamics, config=self.ds_config, model_parameters=self.dynamics.parameters())
else:
(engine, optimizer, *_) = deepspeed.initialize(model=self.dynamics, config=self.ds_config, optimizer=self._optimizer, model_parameters=self.dynamics.parameters())
assert (engine is not None)
assert (optimizer is not None)
self.dynamics_engine = engine
self.optimizer = optimizer
self.device = self.dynamics_engine.local_rank
def _setup_horovod(self) -> None:
import horovod.torch as hvd
compression = (hvd.Compression.fp16 if (self.dtype in {*BF16_SYNONYMS, *FP16_SYNONYMS}) else hvd.Compression.none)
self.optimizer = hvd.DistributedOptimizer(self._optimizer, named_parameters=self.dynamics.named_parameters(), compression=compression)
hvd.broadcast_parameters(self.dynamics.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.optimizer, root_rank=0)
def prepare_ds_config(self) -> dict:
if (self.config.backend.lower() not in ['ds', 'deepspeed']):
return {}
ds_config = {}
assert (self.config.ds_config_path is not None)
ds_config = load_ds_config(self.config.ds_config_path)
self.info(f'Loaded DeepSpeed config from: {self.config.ds_config_path}')
pname = 'l2hmc-qcd'
if self.config.debug_mode:
pname += '-debug'
if self.config.init_wandb:
ds_config['wandb'].update({'enabled': True, 'project': pname, 'group': f'{self.config.framework}/{self.config.backend}'})
else:
ds_config['wandb'] = {}
opath = Path(os.getcwd()).joinpath('ds_outputs').resolve()
ds_config['csv_monitor'] = {'enabled': True, 'output_path': opath.joinpath('ds_csv_monitor').as_posix()}
ds_config.update({'gradient_accumulation_steps': 1, 'train_micro_batch_size_per_gpu': 1})
ds_config['train_batch_size'] = ((self.size * ds_config['gradient_accumulation_steps']) * ds_config['train_micro_batch_size_per_gpu'])
scheduler = ds_config.get('scheduler', None)
if (scheduler is not None):
sparams = scheduler.get('params', None)
if (sparams is not None):
ds_config['scheduler']['params'].update({'warmup_num_steps': self.config.steps.nepoch, 'total_num_steps': (self.config.steps.nera * self.config.steps.nepoch)})
zero_opt_config = ds_config.get('zero_optimization', None)
if (zero_opt_config is not None):
hostname = str(socket.gethostbyaddr(socket.gethostname())[0]).lower()
if hostname.startswith('thetagpu'):
nvme_path = Path('/raid/scratch/').resolve()
else:
nvme_path = Path('/local/scratch').resolve()
if nvme_path.exists():
nvme_path = nvme_path.as_posix()
self.info(f'[{hostname}] Setting NVMe path to: {nvme_path}')
zero_opt_config['offload_param']['nvme_path'] = nvme_path
zero_opt_config['offload_optimizer']['nvme_path'] = nvme_path
ds_config['zero_optimization'] = zero_opt_config
self.config.set_ds_config(ds_config)
self.ds_config = ds_config
return ds_config
def warning(self, s: str) -> None:
if self._is_orchestrator:
log.warning(s)
def info(self, s: str) -> None:
if self._is_orchestrator:
log.info(s)
def draw_x(self):
return self.g.random(list(self.config.dynamics.xshape)).flatten(1)
def draw_v(self):
return self.g.random_momentum(list(self.config.dynamics.xshape))
def reset_optimizer(self):
if self._is_orchestrator:
import horovod.torch as hvd
self.warning('Resetting optimizer state!')
self.optimizer.state = defaultdict(dict)
hvd.broadcast_optimizer_state(self.optimizer, root_rank=0)
def build_lattice(self):
group = str(self.config.dynamics.group).upper()
kwargs = {'nchains': self.config.dynamics.nchains, 'shape': list(self.config.dynamics.latvolume)}
if (group == 'U1'):
return LatticeU1(**kwargs)
if (group == 'SU3'):
c1 = (self.config.c1 if (self.config.c1 is not None) else 0.0)
return LatticeSU3(c1=c1, **kwargs)
raise ValueError('Unexpected value in `config.dynamics.group`')
def build_loss_fn(self) -> Callable:
assert isinstance(self.lattice, (LatticeU1, LatticeSU3))
return LatticeLoss(lattice=self.lattice, loss_config=self.config.loss)
def build_optimizer(self, dynamics: Optional[Dynamics]=None, build_networks: bool=True) -> torch.optim.Optimizer:
if (dynamics is None):
dynamics = self.build_dynamics(build_networks=build_networks)
assert (dynamics is not None)
return (torch.optim.Adam(dynamics.parameters(), lr=self.config.learning_rate.lr_init) if (self.config.dynamics.group == 'U1') else torch.optim.SGD(dynamics.parameters(), lr=self.config.learning_rate.lr_init))
def build_dynamics(self, build_networks: bool=True) -> Dynamics:
input_spec = self.get_input_spec()
net_factory = None
if build_networks:
net_factory = NetworkFactory(input_spec=input_spec, conv_config=self.config.conv, network_config=self.config.network, net_weights=self.config.net_weights)
dynamics = Dynamics(config=self.config.dynamics, potential_fn=self.lattice.action, network_factory=net_factory)
if torch.cuda.is_available():
dynamics.cuda()
return dynamics
def get_lr(self, step: int) -> float:
return self.config.learning_rate.lr_init
def build_lr_schedule(self):
return LambdaLR(optimizer=self.optimizer, lr_lambda=(lambda step: self.get_lr(step)))
def save_ckpt(self, era: int, epoch: int, metrics: Optional[dict]=None, run: Optional[Any]=None) -> None:
if ((self.rank != 0) or (not self.config.save)):
return
tstamp = get_timestamp('%Y-%m-%d-%H%M%S')
step = self._gstep
ckpt_file = self.ckpt_dir.joinpath(f'ckpt-{era}-{epoch}-{step}-{tstamp}.tar')
assert isinstance(self.dynamics.xeps, nn.ParameterList)
assert isinstance(self.dynamics.veps, nn.ParameterList)
xeps = [e.detach().cpu().numpy() for e in self.dynamics.xeps]
veps = [e.detach().cpu().numpy() for e in self.dynamics.veps]
ckpt = {'era': era, 'epoch': epoch, 'xeps': xeps, 'veps': veps, 'gstep': self._gstep, 'model_state_dict': self.dynamics.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict()}
if (metrics is not None):
ckpt.update(metrics)
torch.save(ckpt, ckpt_file)
modelfile = self.ckpt_dir.joinpath(f'model-{era}-{epoch}-{step}-{tstamp}.pth')
torch.save(self.dynamics.state_dict(), modelfile)
self.info(f'Saving checkpoint to: {ckpt_file.as_posix()}')
self.info(f'Saving modelfile to: {modelfile.as_posix()}')
if ((wandb.run is not None) and self.config.init_wandb):
artifact = wandb.Artifact('model', type='model')
artifact.add_file(modelfile.as_posix())
wandb.run.log_artifact(artifact)
def load_ckpt(self, dynamics: Optional[Dynamics]=None, optimizer: Optional[torch.optim.Optimizer]=None, build_networks: bool=True, era: Optional[int]=None, epoch: Optional[int]=None) -> dict:
if (dynamics is None):
dynamics = (self.dynamics if (self.dynamics is not None) else self.build_dynamics(build_networks=build_networks))
if (optimizer is None):
optimizer = (self.optimizer if (self.optimizer is not None) else self.build_optimizer())
output = {'dynamics': dynamics, 'optimizer': optimizer, 'ckpt': {}}
ckpts = [Path(self.ckpt_dir).joinpath(i) for i in os.listdir(self.ckpt_dir) if i.endswith('.tar')]
modelfiles = [Path(self.ckpt_dir).joinpath(i) for i in os.listdir(self.ckpt_dir) if i.endswith('.pth')]
self.info(f'''Looking for checkpoints in:
{self.ckpt_dir}''')
if (not ckpts):
self.warning('No checkpoints found to load from')
return output
ckpt_file = None
modelfile = None
if (era is not None):
cmatch = f'ckpt-{era}'
mmatch = f'model-{era}'
if (epoch is not None):
cmatch += f'-{epoch}'
mmatch += f'-{epoch}'
for ckpt in ckpts:
if (cmatch in ckpt.as_posix()):
ckpt_file = ckpt
for mfile in modelfiles:
if (mmatch in mfile.as_posix()):
modelfile = mfile
else:
ckpts = sorted(ckpts, key=(lambda t: os.stat(t).st_mtime))
mfiles = sorted(modelfiles, key=(lambda t: os.stat(t).st_mtime))
ckpt_file = ckpts[(- 1)]
modelfile = mfiles[(- 1)]
if (modelfile is not None):
self.info(f'Loading model from: {modelfile}')
dynamics.load_state_dict(torch.load(modelfile))
output['modelfile'] = modelfile
if (ckpt_file is not None):
ckpt_file = Path(self.ckpt_dir).joinpath(ckpt_file)
self.info(f'Loading checkpoint from: {ckpt_file}')
ckpt = torch.load(ckpt_file)
output['ckpt'] = ckpt
output['ckpt_file'] = ckpt_file
return output
def should_log(self, epoch):
return (((epoch % self.steps.log) == 0) and self._is_orchestrator)
def should_print(self, epoch):
return (((epoch % self.steps.print) == 0) and self._is_orchestrator)
def should_emit(self, epoch: int, nepoch: int) -> bool:
nprint = min(getattr(self.steps, 'print', int((nepoch // 10))), int((nepoch // 5)))
nlog = min(getattr(self.steps, 'log', int((nepoch // 4))), int((nepoch // 4)))
emit = (((epoch % nprint) == 0) or ((epoch % nlog) == 0))
return (self._is_orchestrator and emit)
def record_metrics(self, metrics: dict, job_type: str, step: Optional[int]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, model: Optional[(nn.Module | Dynamics)]=None, optimizer: Optional[Any]=None) -> tuple[(dict[(str, ScalarLike)], str)]:
assert (job_type in {'train', 'eval', 'hmc'})
if (step is None):
timer = self.timers.get(job_type, None)
if isinstance(timer, StepTimer):
step = timer.iterations
if (step is not None):
metrics[f'{job_type[0]}step'] = step
if ((job_type == 'train') and (step is not None)):
metrics['lr'] = self.get_lr(step)
if ((job_type == 'eval') and ('eps' in metrics)):
_ = metrics.pop('eps', None)
metrics.update(self.metrics_to_numpy(metrics))
avgs = self.histories[job_type].update(metrics)
summary = summarize_dict(avgs)
metrics |= {'xeps': torch.tensor(self.dynamics.xeps), 'veps': torch.tensor(self.dynamics.veps)}
metrics |= {f'{k}/avg': v for (k, v) in avgs.items()}
if ((step is not None) and (writer is not None)):
assert (step is not None)
update_summaries(step=step, writer=writer, metrics=metrics, prefix=job_type, optimizer=optimizer, use_tb=self.config.use_tb, use_wandb=(self.config.use_wandb and self.config.init_wandb))
writer.flush()
if (self.config.init_aim or self.config.init_wandb):
self.track_metrics(record=metrics, avgs=avgs, job_type=job_type, step=step, run=run, arun=arun)
return (avgs, summary)
def track_metrics(self, record: dict[(str, (((torch.Tensor | np.ndarray) | list) | ScalarLike))], avgs: dict[(str, ScalarLike)], job_type: str, step: Optional[int], run: Optional[Any]=None, arun: Optional[Any]=None) -> None:
if (self.local_rank != 0):
return
dQdict = None
dQint = record.get('dQint', None)
if (dQint is not None):
dQdict = {f'dQint/{job_type}': {'val': dQint, 'step': step, 'avg': dQint.mean()}}
if ((wandb.run is not None) and self.config.init_wandb):
wandb.run.log(dQdict, commit=False)
if (arun is not None):
from aim import Distribution
kwargs = {'step': step, 'job_type': job_type, 'arun': arun}
try:
self.aim_track(avgs, prefix='avgs', **kwargs)
self.aim_track(record, prefix='record', **kwargs)
if (dQdict is not None):
self.aim_track({'dQint': dQint}, prefix='dQ', **kwargs)
except ValueError:
self.warning('Unable to track record with aim, skipping!')
def profile_step(self, inputs: tuple[(Tensor, Tensor)]) -> tuple[(Tensor, dict)]:
(xinit, beta) = inputs
assert isinstance(self.dynamics, Dynamics)
assert isinstance(self.config, ExperimentConfig)
try:
self.optimizer.zero_grad()
except Exception:
pass
xinit = self.g.compat_proj(xinit)
if (self.dynamics_engine is not None):
(xout, metrics) = self.dynamics_engine((xinit, beta))
else:
(xout, metrics) = self.dynamics((xinit, beta))
xout = self.g.compat_proj(xout)
xprop = self.g.compat_proj(metrics.pop('mc_states').proposed.x)
beta = beta
loss = self.loss_fn(xinit, x_prop=xprop, acc=metrics['acc'])
loss.backward()
if (self.config.learning_rate.clip_norm > 0.0):
torch.nn.utils.clip_grad.clip_grad_norm(self.dynamics.parameters(), self.config.learning_rate.clip_norm)
self.optimizer.step()
return (xout.detach(), metrics)
def profile(self, nsteps: int=5) -> dict:
assert isinstance(self.dynamics, Dynamics)
self.dynamics.train()
x = self.draw_x()
beta = torch.tensor(1.0)
metrics = {}
for _ in range(nsteps):
(x, metrics) = self.profile_step((x, beta))
return metrics
def hmc_step(self, inputs: tuple[(Tensor, (float | Tensor))], eps: Optional[float]=None, nleapfrog: Optional[int]=None) -> tuple[(Tensor, dict)]:
self.dynamics.eval()
(xi, beta) = inputs
beta = (torch.tensor(beta) if isinstance(beta, float) else beta)
assert isinstance(beta, Tensor)
beta = beta.to(self.device)
xi = self.g.compat_proj(self.dynamics.unflatten(xi.to(self.device)))
(xo, metrics) = self.dynamics.apply_transition_hmc((xi, beta), eps=eps, nleapfrog=nleapfrog)
xp = metrics.pop('mc_states').proposed.x
loss = self.loss_fn(x_init=xi, x_prop=xp, acc=metrics['acc'])
if self.config.dynamics.verbose:
lmetrics = self.loss_fn.lattice_metrics(xinit=xi, xout=xo)
metrics.update(lmetrics)
metrics.update({'loss': loss.item()})
self.dynamics.train()
self._hstep += 1
return (xo.detach(), metrics)
def eval_step(self, inputs: tuple[(Tensor, float)]) -> tuple[(Tensor, dict)]:
self.dynamics.eval()
(xinit, beta) = inputs
beta = torch.tensor(beta).to(self.device)
xinit = self.g.compat_proj(self.dynamics.unflatten(xinit.to(self.device)))
(xout, metrics) = self.dynamics((xinit, beta))
xprop = metrics.pop('mc_states').proposed.x
loss = self.loss_fn(x_init=xinit, x_prop=xprop, acc=metrics['acc'])
if self.config.dynamics.verbose:
lmetrics = self.loss_fn.lattice_metrics(xinit=xinit, xout=xout)
metrics.update(lmetrics)
metrics.update({'loss': loss.item()})
self.dynamics.train()
self._estep += 1
return (xout.detach(), metrics)
def get_context_manager(self, renderable: ConsoleRenderable) -> (Live | nullcontext):
return nullcontext()
def get_printer(self, job_type: str) -> None:
return None
def _setup_eval(self, beta: Optional[float]=None, eval_steps: Optional[int]=None, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, run: Optional[Any]=None, job_type: Optional[str]='eval', nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None, nprint: Optional[int]=None) -> dict:
assert (job_type in ['eval', 'hmc'])
if isinstance(skip, str):
skip = [skip]
if (beta is None):
beta = self.config.annealing_schedule.beta_final
if ((nleapfrog is None) and (str(job_type).lower() == 'hmc')):
nleapfrog = int(self.config.dynamics.nleapfrog)
if self.config.dynamics.merge_directions:
nleapfrog *= 2
if ((eps is None) and (str(job_type).lower() == 'hmc')):
eps = self.config.dynamics.eps_hmc
self.warning(f'Step size `eps` not specified for HMC! Using default: {eps:.4f} for generic HMC')
if (x is None):
x = self.lattice.random()
self.warning(f'x.shape (original): {x.shape}')
if (nchains is not None):
if (isinstance(nchains, int) and (nchains > 0)):
x = x[:nchains]
assert isinstance(x, Tensor)
if (nchains is not None):
self.warning(f'x[:nchains].shape: {x.shape}')
table = Table(row_styles=['dim', 'none'], expand=True)
eval_steps = (self.steps.test if (eval_steps is None) else eval_steps)
assert isinstance(eval_steps, int)
nprint = (max(1, min(50, (eval_steps // 50))) if (nprint is None) else nprint)
nlog = max((1, min((10, eval_steps))))
if (nlog <= eval_steps):
nlog = min(10, max(1, (eval_steps // 100)))
if (run is not None):
run.config.update({job_type: {'beta': beta, 'xshape': x.shape}})
assert isinstance(x, Tensor)
assert isinstance(beta, float)
assert isinstance(nlog, int)
assert isinstance(nprint, int)
assert isinstance(eval_steps, int)
output = {'x': x, 'eps': eps, 'beta': beta, 'nlog': nlog, 'table': table, 'nprint': nprint, 'eval_steps': eval_steps, 'nleapfrog': nleapfrog}
log.info('\n'.join([f'{k}={v}' for (k, v) in output.items() if (k != 'x')]))
return output
def eval(self, beta: Optional[float]=None, eval_steps: Optional[int]=None, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, job_type: Optional[str]='eval', nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None, dynamic_step_size: Optional[bool]=None, nprint: Optional[int]=None, make_plots: bool=True) -> dict:
'Evaluate dynamics.'
assert (job_type in ['eval', 'hmc'])
tables = {}
summaries = []
patience = 5
stuck_counter = 0
setup = self._setup_eval(x=x, run=run, beta=beta, eps=eps, nleapfrog=nleapfrog, skip=skip, nchains=nchains, job_type=job_type, eval_steps=eval_steps, nprint=nprint)
x = setup['x']
eps = setup['eps']
beta = setup['beta']
table = setup['table']
panel = Panel(table)
ctx = self.get_context_manager(panel)
nleapfrog = setup['nleapfrog']
eval_steps = setup['eval_steps']
assert ((x is not None) and (beta is not None))
nlog = setup.get('nlog', self.config.steps.log)
nprint = setup.get('nprint', self.config.steps.print)
timer = self.timers[job_type]
history = self.histories[job_type]
assert ((eval_steps is not None) and (timer is not None) and (history is not None) and (nlog is not None) and (nprint is not None))
device_type = ('cuda' if WITH_CUDA else 'cpu')
if (device_type == 'cuda'):
fpctx = torch.autocast(device_type=device_type)
else:
fpctx = nullcontext()
def eval_fn(z):
with fpctx:
if (job_type == 'hmc'):
assert (eps is not None)
return self.hmc_step(z, eps=eps, nleapfrog=nleapfrog)
return self.eval_step(z)
def refresh_view():
if isinstance(ctx, Live):
ctx.refresh()
def _should_emit(step):
return (((step % nlog) == 0) or ((step % nprint) == 0))
plots = None
if (is_interactive() and make_plots):
plots = plotter.init_plots()
self.dynamics.eval()
with ctx:
x = self.warmup(beta=beta, x=x)
for step in range(eval_steps):
timer.start()
(x, metrics) = eval_fn((x, beta))
dt = timer.stop()
if _should_emit(step):
record = {f'{job_type[0]}step': step, 'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None)}
record.update(metrics)
(avgs, summary) = self.record_metrics(run=run, arun=arun, step=step, writer=writer, metrics=record, job_type=job_type)
summaries.append(summary)
table = self.update_table(table=table, step=step, avgs=avgs)
if ((step % nprint) == 0):
log.info(summary)
refresh_view()
if (avgs.get('acc', 1.0) < 1e-05):
if (stuck_counter < patience):
stuck_counter += 1
else:
self.warning('Chains are stuck! Redrawing x')
x = self.lattice.random()
stuck_counter = 0
if ((job_type == 'hmc') and dynamic_step_size):
acc = record.get('acc_mask', None)
record['eps'] = eps
if ((acc is not None) and (eps is not None)):
acc_avg = acc.mean()
if (acc_avg < 0.66):
eps -= (eps / 10.0)
else:
eps += (eps / 10.0)
if (is_interactive() and self._is_orchestrator and (plots is not None)):
if (len(self.histories[job_type].history.keys()) == 0):
plotter.update_plots(history=metrics, plots=plots, logging_steps=nlog)
else:
plotter.update_plots(history=self.histories[job_type].history, plots=plots, logging_steps=nlog)
if isinstance(ctx, Live):
ctx.console.clear_live()
tables[str(0)] = setup['table']
self.dynamics.train()
return {'timer': timer, 'history': history, 'summaries': summaries, 'tables': tables}
def calc_loss(self, xinit: torch.Tensor, xprop: torch.Tensor, acc: torch.Tensor) -> torch.Tensor:
return self.loss_fn(xinit, xprop, acc)
def forward_step(self, x: torch.Tensor, beta: torch.Tensor) -> tuple[(torch.Tensor, dict)]:
x.requires_grad_(True)
try:
self.optimizer.zero_grad()
except Exception:
pass
with self.autocast_context_train:
if (self.dynamics_engine is not None):
(xout, metrics) = self.dynamics_engine((x, beta))
else:
(xout, metrics) = self.dynamics((x, beta))
self._fstep += 1
return (xout, metrics)
def backward_step(self, loss: torch.Tensor) -> torch.Tensor:
'Backpropagate gradients.'
if ((self.config.backend.lower() in ['ds', 'deepspeed']) and (self.dynamics_engine is not None)):
self.dynamics_engine.backward(loss)
self.dynamics_engine.step()
elif (self.grad_scaler is None):
loss.backward()
if (self.config.learning_rate.clip_norm > 0.0):
torch.nn.utils.clip_grad.clip_grad_norm(parameters=self.dynamics.parameters(), max_norm=self.clip_norm)
self.optimizer.step()
else:
self.grad_scaler.scale(loss).backward()
self.grad_scaler.unscale_(self.optimizer)
if (self.config.learning_rate.clip_norm > 0):
torch.nn.utils.clip_grad.clip_grad_norm(parameters=self.dynamics.parameters(), max_norm=self.config.learning_rate.clip_norm)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
self._bstep += 1
return loss
def train_step(self, inputs: tuple[(Tensor, (Tensor | float))]) -> tuple[(Tensor, dict)]:
'Logic for performing a single training step'
(xinit, beta) = inputs
xinit = self.g.compat_proj(xinit.reshape(self.xshape))
beta = (torch.tensor(beta) if isinstance(beta, float) else beta)
assert isinstance(beta, Tensor)
(xout, metrics) = self.forward_step(x=xinit, beta=beta)
xprop = metrics.pop('mc_states').proposed.x
loss = self.calc_loss(xinit=xinit, xprop=xprop, acc=metrics['acc'])
aux_loss = 0.0
if ((aw := self.config.loss.aux_weight) > 0):
yinit = self.dynamics.unflatten(self.g.random(xinit.shape).to(self.device))
(_, metrics_) = self.forward_step(x=yinit, beta=beta)
yprop = metrics_.pop('mc_states').proposed.x
aux_loss = self.calc_loss(xinit=yinit, xprop=yprop, acc=metrics_['acc'])
aux_loss += (aw * aux_loss)
loss_tot = (loss + aux_loss)
loss = self.backward_step(loss_tot)
if isinstance(loss_tot, Tensor):
loss_tot = loss_tot.item()
metrics['loss'] = loss_tot
if self.config.dynamics.verbose:
with torch.no_grad():
lmetrics = self.loss_fn.lattice_metrics(xinit=xinit, xout=xout)
metrics.update(lmetrics)
self._gstep += 1
return (xout.detach(), metrics)
def train_step_detailed(self, x: Optional[Tensor]=None, beta: Optional[(Tensor | float)]=None, era: int=0, epoch: int=0, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, rows: Optional[dict]=None, summaries: Optional[list]=None, verbose: bool=True) -> tuple[(Tensor, dict)]:
'Logic for performing a single training step'
if (x is None):
x = self.dynamics.lattice.random()
if (beta is None):
beta = self.config.annealing_schedule.beta_init
if isinstance(beta, float):
beta = torch.tensor(beta).to(self.device)
self.timers['train'].start()
(xout, metrics) = self.train_step((x, beta))
dt = self.timers['train'].stop()
record = {'era': era, 'epoch': epoch, 'tstep': self._gstep, 'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None), **metrics}
(avgs, summary) = self.record_metrics(run=run, arun=arun, step=self._gstep, writer=writer, metrics=record, job_type='train', model=self.dynamics, optimizer=self.optimizer)
if (rows is not None):
rows[self._gstep] = avgs
if (summaries is not None):
summaries.append(summary)
if verbose:
log.info(summary)
self._gstep += 1
return (xout.detach(), record)
def eval_step_detailed(self, job_type: str, x: Optional[Tensor]=None, beta: Optional[float]=None, verbose: bool=True) -> tuple[(Tensor, dict)]:
if (x is None):
x = self.dynamics.lattice.random()
if (beta is None):
beta = self.config.annealing_schedule.beta_init
self.timers[job_type].start()
if (job_type == 'eval'):
(xout, metrics) = self.eval_step((x, beta))
elif (job_type == 'hmc'):
(xout, metrics) = self.hmc_step((x, beta))
else:
raise ValueError(f'Job type should be eval or hmc, got: {job_type}')
dt = self.timers[job_type].stop()
record = {'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None), **metrics}
(_, summary) = self.record_metrics(step=self._gstep, metrics=record, job_type=job_type)
if verbose:
log.info(summary)
self._estep += 1
return (xout, record)
def train_epoch(self, x: Tensor, beta: (float | Tensor), era: Optional[int]=None, run: Optional[Any]=None, arun: Optional[Any]=None, nepoch: Optional[int]=None, writer: Optional[Any]=None, extend: int=1, nprint: Optional[int]=None, nlog: Optional[int]=None, warmup: bool=True, plots: Optional[Any]=None) -> tuple[(Tensor, dict)]:
self.dynamics.train()
rows = {}
summaries = []
extend = (1 if (extend is None) else extend)
table = Table(expand=True, box=box.HORIZONTALS, row_styles=['dim', 'none'])
panel = Panel(table)
nepoch = (self.steps.nepoch if (nepoch is None) else nepoch)
assert isinstance(nepoch, int)
nepoch *= extend
losses = []
ctx = self.get_context_manager(panel)
log_freq = (self.steps.log if (nlog is None) else nlog)
print_freq = (self.steps.print if (nprint is None) else nprint)
assert ((log_freq is not None) and isinstance(log_freq, int))
assert ((print_freq is not None) and isinstance(print_freq, int))
def should_print(epoch):
return (self._is_orchestrator and ((epoch % print_freq) == 0))
def should_log(epoch):
return (self._is_orchestrator and ((epoch % log_freq) == 0))
def refresh_view():
if isinstance(ctx, Live):
ctx.refresh()
patience = 10
stuck_iters = 0
with ctx:
if warmup:
wt0 = time.perf_counter()
x = self.warmup(beta=beta, x=x)
self.info(f'Thermalizing configs @ {beta:.2f} took {(time.perf_counter() - wt0):.4f} s')
summary = ''
for epoch in range(nepoch):
self.timers['train'].start()
(x, metrics) = self.train_step((x, beta))
dt = self.timers['train'].stop()
losses.append(metrics['loss'])
if should_log(epoch):
record = {'era': era, 'epoch': epoch, 'tstep': self._gstep, 'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None)}
record.update(metrics)
(avgs, summary) = self.record_metrics(run=run, arun=arun, step=self._gstep, writer=writer, metrics=record, job_type='train', model=self.dynamics, optimizer=self.optimizer)
rows[self._gstep] = avgs
summaries.append(summary)
table = self.update_table(table=table, avgs=avgs, step=epoch)
if (avgs.get('acc', 1.0) < 1e-05):
if (stuck_iters < patience):
stuck_iters += 1
else:
self.warning('Chains are stuck! Redrawing x')
x = self.lattice.random()
stuck_iters = 0
refresh_view()
if (is_interactive() and self._is_orchestrator and (plots is not None)):
if (len(self.histories['train'].history.keys()) == 0):
plotter.update_plots(metrics, plots, logging_steps=log_freq)
else:
plotter.update_plots(self.histories['train'].history, plots=plots, logging_steps=log_freq)
if should_print(epoch):
refresh_view()
log.info(summary)
if isinstance(ctx, Live):
ctx.console.clear_live()
data = {'rows': rows, 'table': table, 'losses': losses, 'summaries': summaries}
return (x, data)
def _setup_training(self, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, train_dir: Optional[os.PathLike]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None) -> dict:
skip = ([skip] if isinstance(skip, str) else skip)
train_dir = (Path(os.getcwd()).joinpath(self._created, 'train') if (train_dir is None) else Path(train_dir))
train_dir.mkdir(exist_ok=True, parents=True)
if (x is None):
x = self.g.random(list(self.xshape)).flatten(1)
nera = (self.config.steps.nera if (nera is None) else nera)
nepoch = (self.config.steps.nepoch if (nepoch is None) else nepoch)
assert ((nera is not None) and isinstance(nera, int))
assert ((nepoch is not None) and isinstance(nepoch, int))
if (beta is None):
betas = self.config.annealing_schedule.setup(nera=nera, nepoch=nepoch)
elif isinstance(beta, (list, np.ndarray)):
nera = len(beta)
betas = {f'{i}': b for (i, b) in zip(range(nera), beta)}
elif isinstance(beta, (int, float)):
betas = {f'{i}': b for (i, b) in zip(range(nera), (nera * [beta]))}
elif isinstance(beta, dict):
nera = len(list(beta.keys()))
betas = {f'{i}': b for (i, b) in beta.items()}
else:
raise TypeError(f'Expected `beta` to be one of: `float, list, dict`, received: {type(beta)}')
beta_final = list(betas.values())[(- 1)]
assert ((beta_final is not None) and isinstance(beta_final, float))
return {'x': x, 'nera': nera, 'nepoch': nepoch, 'betas': betas, 'train_dir': train_dir, 'beta_final': beta_final}
def warmup(self, beta: (float | torch.Tensor), nsteps: int=100, tol: float=1e-05, x: Optional[Tensor]=None, nchains: Optional[int]=None) -> Tensor:
'Thermalize configs'
self.dynamics.eval()
if (x is None):
x = self.dynamics.lattice.random().to(self.device)
if (nchains is not None):
x = x[:nchains]
if isinstance(beta, float):
beta = torch.tensor(beta).to(self.device)
pexact = (plaq_exact(beta).to(self.device).to(self._dtype) if (self.config.dynamics.group == 'U1') else None)
for step in range(nsteps):
(x, metrics) = self.hmc_step((x, beta))
plaqs = metrics.get('plaqs', None)
assert ((x is not None) and isinstance(x, Tensor))
if ((plaqs is not None) and (pexact is not None)):
pdiff = (plaqs - pexact).abs().sum()
if (pdiff < tol):
log.warning(f'Chains thermalized! step: {step}, plaq_diff: {pdiff:.4f}')
return x
if ((nsteps > 100) and ((step % 10) == 0) and self._is_orchestrator):
log.info(f'(warm-up) step: {step}, plaqs: {plaqs.mean():.4f}')
self.dynamics.train()
return x
def train(self, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, train_dir: Optional[os.PathLike]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, nprint: Optional[int]=None, nlog: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None, warmup: bool=True, make_plots: bool=True) -> dict:
'Perform training and return dictionary of results.'
self.dynamics.train()
setup = self._setup_training(x=x, skip=skip, train_dir=train_dir, nera=nera, nepoch=nepoch, beta=beta)
era = 0
epoch = 0
extend = 1
x = setup['x']
nera = setup['nera']
betas = setup['betas']
nepoch = setup['nepoch']
train_dir = setup['train_dir']
beta_final: float = setup['beta_final']
assert ((x is not None) and isinstance(x, Tensor))
assert (nera is not None)
assert (train_dir is not None)
plots = (plotter.init_plots() if (is_interactive() and make_plots) else None)
for era in range(nera):
b = torch.tensor(betas.get(str(era), beta_final))
if ((era == (nera - 1)) and (self.steps.extend_last_era is not None)):
extend = int(self.steps.extend_last_era)
if self._is_orchestrator:
if ((era > 1) and (str((era - 1)) in self.summaries['train'])):
esummary = self.histories['train'].era_summary(f'{(era - 1)}')
log.info(f'''Avgs over last era:
{esummary}
''')
box_header(f'ERA: {era} / {nera}, BETA: {b:.3f}')
epoch_start = time.time()
(x, edata) = self.train_epoch(x=x, beta=b, era=era, run=run, arun=arun, writer=writer, extend=extend, nepoch=nepoch, nprint=nprint, nlog=nlog, warmup=warmup, plots=plots)
self.rows['train'][str(era)] = edata['rows']
self.tables['train'][str(era)] = edata['table']
self.summaries['train'][str(era)] = edata['summaries']
losses = torch.Tensor(list(edata['losses'][1:]))
if self.config.annealing_schedule.dynamic:
dy_avg = (losses[1:] - losses[:(- 1)]).mean().item()
if (dy_avg > 0):
b -= (b / 10.0)
else:
b += (b / 10.0)
if (self._is_orchestrator and self.config.save):
st0 = time.time()
self.save_ckpt(era, epoch, run=run)
log.info(f'Saving took: {(time.time() - st0):<5g}s')
log.info(f'Era {era} took: {(time.time() - epoch_start):<5g}s')
return {'timer': self.timers['train'], 'rows': self.rows['train'], 'summaries': self.summaries['train'], 'history': self.histories['train'], 'tables': self.tables['train']}
def train_dynamic(self, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, train_dir: Optional[os.PathLike]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None) -> dict:
'Perform training and return dictionary of results.'
self.dynamics.train()
setup = self._setup_training(x=x, skip=skip, train_dir=train_dir, nera=nera, nepoch=nepoch, beta=beta)
era = 0
epoch = 0
extend = 1
x = setup['x']
nera = setup['nera']
betas = setup['betas']
nepoch = setup['nepoch']
train_dir = setup['train_dir']
beta_final = setup['beta_final']
b = torch.tensor(betas.get(str(era), beta_final))
assert (x is not None)
assert (nera is not None)
assert (train_dir is not None)
while (b < beta_final):
if ((era == (nera - 1)) and (self.steps.extend_last_era is not None)):
extend = int(self.steps.extend_last_era)
if self._is_orchestrator:
if ((era > 1) and (str((era - 1)) in self.summaries['train'])):
esummary = self.histories['train'].era_summary(f'{(era - 1)}')
log.info(f'''Avgs over last era:
{esummary}
''')
box_header(f'ERA: {era} / {nera}, BETA: {b:.3f}')
epoch_start = time.time()
(x, edata) = self.train_epoch(x=x, beta=b, era=era, run=run, arun=arun, writer=writer, extend=extend, nepoch=nepoch)
st0 = time.time()
losses = torch.stack(edata['losses'][1:])
if self.config.annealing_schedule.dynamic:
dy_avg = (losses[1:] - losses[:(- 1)]).mean().item()
if (dy_avg > 0):
b -= (b / 10.0)
else:
b += (b / 10.0)
self.rows['train'][str(era)] = edata['rows']
self.tables['train'][str(era)] = edata['table']
self.summaries['train'][str(era)] = edata['summaries']
if (((era + 1) == nera) or ((((era + 1) % 5) == 0) and self.config.save)):
self.save_ckpt(era, epoch, run=run)
if self._is_orchestrator:
log.info(f'Saving took: {(time.time() - st0):<5g}s')
log.info(f'Era {era} took: {(time.time() - epoch_start):<5g}s')
era += 1
return {'timer': self.timers['train'], 'rows': self.rows['train'], 'summaries': self.summaries['train'], 'history': self.histories['train'], 'tables': self.tables['train']}
def metric_to_numpy(self, metric: ((((Tensor | list) | np.ndarray) | float) | None)) -> np.ndarray:
if isinstance(metric, float):
return np.array(metric)
if isinstance(metric, list):
if isinstance(metric[0], Tensor):
metric = torch.stack(metric)
elif isinstance(metric[0], np.ndarray):
metric = np.stack(metric)
else:
raise ValueError(f'Unexpected value encountered: {type(metric)}')
if (not isinstance(metric, Tensor)):
try:
metric = torch.Tensor(metric)
except TypeError:
metric = torch.tensor(0.0)
return metric.to(torch.float32).detach().cpu().numpy()
def aim_track(self, metrics: dict, step: int, job_type: str, arun: aim.Run, prefix: Optional[str]=None) -> None:
context = {'subset': job_type}
for (key, val) in metrics.items():
name = (f'{prefix}/{key}' if (prefix is not None) else f'{key}')
if isinstance(val, dict):
for (k, v) in val.items():
self.aim_track(v, step=step, arun=arun, job_type=job_type, prefix=f'{name}/{k}')
if isinstance(val, (Tensor, np.ndarray)):
if (len(val.shape) > 1):
dist = Distribution(val)
arun.track(dist, step=step, name=name, context=context)
arun.track(val.mean(), step=step, name=f'{name}/avg', context=context)
else:
arun.track(val, name=name, step=step, context=context)
def print_weights(self, grab: bool=True):
_ = print_dict(dict(self.dynamics.named_parameters()), grab=grab)
def print_grads(self, grab: bool=True):
_ = print_dict({k: v.grad for (k, v) in self.dynamics.named_parameters()}, grab=grab)
def print_grads_and_weights(self, grab: bool=True):
log.info((80 * '-'))
log.info('GRADS:')
self.print_grads(grab=grab)
log.info((80 * '-'))
log.info('WEIGHTS:')
self.print_weights(grab=grab)
log.info((80 * '-'))
|
class BaseTrainer(ABC):
def __init__(self, cfg: (DictConfig | ExperimentConfig), keep: Optional[(str | Sequence[str])]=None, skip: Optional[(str | Sequence[str])]=None):
self._created = get_timestamp()
if isinstance(cfg, DictConfig):
self.config: ExperimentConfig = instantiate(cfg)
else:
self.config: ExperimentConfig = cfg
assert (self.config.framework in ['pt', 'tf', 'torch', 'pytorch', 'tensorflow'])
self._is_built = False
self.loss_fn = None
self.lattice = None
self.dynamics = None
self.schedule = None
self.optimizer = None
self.lr_schedule = None
self.steps = self.config.steps
self.xshape = self.config.dynamics.xshape
self.keep = ([keep] if isinstance(keep, str) else keep)
self.skip = ([skip] if isinstance(skip, str) else skip)
self.rows = {'train': {}, 'eval': {}, 'hmc': {}}
self.tables = {'train': {}, 'eval': {}, 'hmc': {}}
self.summaries = {'train': {}, 'eval': {}, 'hmc': {}}
self.histories = {'train': BaseHistory(), 'eval': BaseHistory(), 'hmc': BaseHistory()}
self._nlf = self.config.dynamics.nleapfrog
if self.config.dynamics.merge_directions:
self._nlf *= 2
self.timers = {'train': StepTimer(evals_per_step=self._nlf), 'eval': StepTimer(evals_per_step=self._nlf), 'hmc': StepTimer(evals_per_step=self._nlf)}
self.keep = ([keep] if isinstance(keep, str) else keep)
self.skip = ([skip] if isinstance(skip, str) else skip)
self.histories = {'train': BaseHistory(), 'eval': BaseHistory(), 'hmc': BaseHistory()}
self._nlf = self.config.dynamics.nleapfrog
if self.config.dynamics.merge_directions:
self._nlf *= 2
self.timers = {'train': StepTimer(evals_per_step=self._nlf), 'eval': StepTimer(evals_per_step=self._nlf), 'hmc': StepTimer(evals_per_step=self._nlf)}
@abstractmethod
def warning(self, s: str) -> None:
pass
@abstractmethod
def info(self, s: str) -> None:
pass
@abstractmethod
def draw_x(self):
pass
@abstractmethod
def reset_optimizer(self):
pass
@abstractmethod
def build_lattice(self):
pass
@abstractmethod
def build_loss_fn(self):
pass
@abstractmethod
def build_dynamics(self, build_networks: bool=True):
pass
@abstractmethod
def build_optimizer(self):
pass
@abstractmethod
def save_ckpt(self) -> None:
pass
@abstractmethod
def should_log(self, epoch):
pass
@abstractmethod
def should_print(self, epoch):
pass
@abstractmethod
def record_metrics(self, metrics: dict, job_type: str, step: Optional[int]=None, record: Optional[dict]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, model: Optional[Any]=None, optimizer: Optional[Any]=None):
pass
@abstractmethod
def hmc_step(self, inputs: tuple[(Any, float)], eps: float, nleapfrog: Optional[int]=None):
pass
@abstractmethod
def eval_step(self, inputs: tuple[(Any, float)]):
pass
@abstractmethod
def eval(self, beta: Optional[float]=None, x: Optional[Any]=None, skip: Optional[(str | list[str])]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, job_type: Optional[str]='eval', nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None) -> dict:
pass
@abstractmethod
def train_step(self, inputs: tuple[(Any, float)]):
pass
@abstractmethod
def train_epoch(self, inputs: tuple[(Any, float)]):
pass
@abstractmethod
def train(self):
pass
@abstractmethod
def metric_to_numpy(self, metric: Any):
pass
def update_table(self, table: Table, step: int, avgs: dict) -> Table:
if (step == 0):
table = add_columns(avgs, table)
else:
table.add_row(*[f'{v:5}' for (_, v) in avgs.items()])
return table
def metrics_to_numpy(self, metrics: dict[(str, Any)]) -> dict[(str, list[np.ndarray])]:
m = {}
for (key, val) in metrics.items():
if (val is None):
m[key] = np.nan
if isinstance(val, dict):
for (k, v) in val.items():
m[f'{key}/{k}'] = self.metric_to_numpy(v)
elif isinstance(val, (float, int, bool, np.floating)):
m[key] = val
else:
try:
m[key] = self.metric_to_numpy(val)
except ValueError as e:
log.exception(e)
log.error(f'Error converting metrics[{key}] to numpy. Skipping!')
continue
return m
@abstractmethod
def aim_track(self, metrics: dict, step: int, job_type: str, arun: aim.Run, prefix: Optional[str]=None) -> None:
pass
def get_input_spec(self) -> InputSpec:
xshape = self.config.dynamics.xshape
if (self.config.dynamics.group == 'U1'):
xdim = self.config.dynamics.xdim
input_dims = {'xnet': {'x': [xdim, 2], 'v': [xdim]}, 'vnet': {'x': [xdim], 'v': [xdim]}}
elif (self.config.dynamics.group == 'SU3'):
xdim = (np.cumprod(xshape[1:(- 2)])[(- 1)] * 8)
input_dims = {'xnet': {'x': [xdim], 'v': [xdim]}, 'vnet': {'x': [xdim], 'v': [xdim]}}
else:
raise ValueError('Unexpected value for `config.dynamics.group`')
return InputSpec(xshape=tuple(xshape), **input_dims)
|
def setup_tensorflow(precision: Optional[str]=None, ngpus: Optional[int]=None) -> int:
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
import horovod.tensorflow as hvd
(hvd.init() if (not hvd.is_initialized()) else None)
if (precision in ['fp16', 'float16', 'half', '16', 'mixed_float16', 'mixed_bfloat16']):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
else:
tf.keras.backend.set_floatx(precision)
TF_FLOAT = tf.keras.backend.floatx()
eager_mode = os.environ.get('TF_EAGER', None)
if (eager_mode is not None):
log.warning('Detected `TF_EAGER` from env. Running eagerly.')
tf.config.run_functions_eagerly(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
cpus = tf.config.experimental.list_physical_devices('CPU')
if gpus:
try:
if (ngpus is not None):
gpus = gpus[(- ngpus):]
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
if (hvd.rank() == 0):
log.info(f'{len(gpus)}, Physical GPUs and {len(logical_gpus)} Logical GPUs')
except RuntimeError as e:
print(e)
elif cpus:
try:
logical_cpus = tf.config.experimental.list_logical_devices('CPU')
log.info(f'{len(cpus)}, Physical CPUs and {len(logical_cpus)} Logical CPUs')
except RuntimeError as e:
print(e)
RANK = hvd.rank()
WORLD_SIZE = hvd.size()
LOCAL_RANK = hvd.local_rank()
os.environ['RANK'] = str(RANK)
os.environ['WORLD_SIZE'] = str(WORLD_SIZE)
os.environ['LOCAL_RANK'] = str(LOCAL_RANK)
log.warning(f'Using: {TF_FLOAT} precision')
log.info(f'RANK: {hvd.rank()}, LOCAL_RANK: {hvd.local_rank()}')
return RANK
|
def init_deepspeed():
import deepspeed
try:
deepspeed.init_distributed(dist_backend='nccl')
except Exception:
try:
deepspeed.init_distributed(dist_backend='mpi')
except RuntimeError:
deepspeed.init_distributed(dist_backend='gloo')
|
def init_process_group(rank: (int | str), world_size: (int | str), backend: Optional[str]=None) -> None:
import torch
import torch.distributed as dist
if torch.cuda.is_available():
backend = ('nccl' if (backend is None) else str(backend))
else:
backend = ('gloo' if (backend is None) else str(backend))
if (not dist.is_initialized()):
dist.init_process_group(backend=backend, rank=int(rank), world_size=int(world_size), init_method='env://')
|
def run_ddp(fn: Callable, world_size: int) -> None:
import torch.multiprocessing as mp
mp.spawn(fn, args=(world_size,), nprocs=world_size, join=True)
|
def get_rank() -> int:
return int(MPI.COMM_WORLD.Get_rank())
|
def get_world_size() -> int:
return int(MPI.COMM_WORLD.Get_size())
|
def get_local_rank() -> int:
return int(os.environ.get('PMI_LOCAL_RANK', os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK', os.environ.get('LOCAL_RANK', '0'))))
|
def query_environment() -> dict[(str, int)]:
'Query environment variables for info about distributed setup'
ws = os.environ.get('WORLD_SIZE', None)
r = os.environ.get('RANK', None)
lr = os.environ.get('LOCAL_RANK', None)
if ((ws is not None) and (r is not None) and (lr is not None)):
return {'world_size': int(ws), 'rank': int(r), 'local_rank': int(lr)}
return {'world_size': int(get_world_size()), 'rank': int(get_rank()), 'local_rank': int(get_local_rank())}
|
def setup_torch_DDP(port: str='2345') -> dict[(str, int)]:
import torch
rank = os.environ.get('RANK', None)
size = os.environ.get('WORLD_SIZE', None)
local_rank = os.environ.get('LOCAL_RANK', None)
import socket
size = int(get_world_size())
rank = int(get_rank())
local_rank = int(get_local_rank())
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(size)
master_addr = (socket.gethostname() if (rank == 0) else None)
master_addr = MPI.COMM_WORLD.bcast(master_addr, root=0)
os.environ['MASTER_ADDR'] = master_addr
if ((eport := os.environ.get('MASTER_PORT', None)) is None):
os.environ['MASTER_PORT'] = port
else:
os.environ['MASTER_PORT'] = eport
log.info(f'Caught MASTER_PORT:{eport} from environment!')
init_process_group(rank=rank, world_size=size, backend=('nccl' if torch.cuda.is_available() else 'gloo'))
return {'world_size': size, 'rank': rank, 'local_rank': local_rank}
|
def setup_torch_distributed(backend: str, port: str='2345') -> dict:
import torch
rank = os.environ.get('RANK', None)
size = os.environ.get('WORLD_SIZE', None)
local_rank = os.environ.get('PMI_LOCAL_RANK', os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK', None))
be = backend.lower()
assert (be in BACKENDS)
if ((rank == 0) and (local_rank == 0)):
log.info(f'Using {backend} for distributed training')
if (be in {'ddp', 'DDP'}):
dsetup = setup_torch_DDP(port)
size = dsetup['world_size']
rank = dsetup['rank']
local_rank = dsetup['local_rank']
elif (be in {'deepspeed', 'ds'}):
init_deepspeed()
size = get_world_size()
rank = get_rank()
local_rank = get_local_rank()
elif (be in {'horovod', 'hvd'}):
import horovod.torch as hvd
_ = (None if hvd.is_initialized() else hvd.init())
rank = hvd.rank()
size = hvd.size()
local_rank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(hvd.local_rank())
else:
raise ValueError
os.environ['SIZE'] = str(size)
os.environ['RANK'] = str(rank)
os.environ['LOCAL_RANK'] = str(local_rank)
return {'size': size, 'rank': rank, 'local_rank': local_rank}
|
def setup_torch(seed: int, backend: str='horovod', port: str='2345', precision: Optional[str]=None) -> int:
import torch
from l2hmc.common import seed_everything
dtypes = {'float16': torch.float16, 'float32': torch.float32, 'float64': torch.float64}
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.use_deterministic_algorithms(True)
dsetup = setup_torch_distributed(backend=backend, port=port)
rank = dsetup['rank']
size = dsetup['size']
local_rank = dsetup['local_rank']
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(size)
nthreads = os.environ.get('OMP_NUM_THREADS', None)
if (nthreads is not None):
torch.set_num_threads(int(nthreads))
if (precision is not None):
log.warning(f'Setting default dtype: {precision}')
torch.set_default_dtype(dtypes.get(precision, torch.float32))
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
log.info(f'Global Rank: {rank} / {(size - 1)}')
seed_everything(((seed * (rank + 1)) * (local_rank + 1)))
return rank
|
def cleanup() -> None:
import torch.distributed as tdist
tdist.destroy_process_group()
|
@dataclass
class PlotObject():
ax: plt.Axes
line: list[plt.Line2D]
|
@dataclass
class LivePlotData():
data: Any
plot_obj: PlotObject
|
def moving_average(x: ArrayLike, window: int=10):
xarr = np.array(x)
if ((len(xarr.shape) > 0) and (xarr.shape[0] < window)):
return np.mean(xarr, keepdims=True)
return (np.convolve(xarr, np.ones(window), 'valid') / window)
|
def init_plots(title: Optional[str]=None, ylabels: Optional[Sequence[str]]=None, keys: Optional[Sequence[str]]=None, xlabel: str='Step', **kwargs):
set_plot_style()
plots = {}
if plt.interactive:
if ((keys is not None) and (len(keys) > 0)):
for key in keys:
plots[key] = init_live_plot(title=title, ylabel=key, xlabel=xlabel, **kwargs)
c0 = ['C0', 'C1']
if (ylabels is None):
ylabels = ['loss', 'dQint']
plots['loss'] = init_live_joint_plots(ylabels=ylabels, xlabel=xlabel, colors=c0, **kwargs)
return plots
|
def update_plot(y: (np.ndarray | list), ax: plt.Axes, line: list[plt.Line2D], display_id: DisplayHandle, logging_steps: int=1, fig: Optional[plt.Figure]=None) -> None:
if (not is_interactive()):
return
if isinstance(y, list):
yarr: np.ndarray = (np.stack(y) if isinstance(y[0], np.ndarray) else np.array(y))
else:
yarr = y
if (len(yarr.shape) == 2):
yarr = yarr.mean((- 1))
assert isinstance(y, np.ndarray)
x = np.arange(yarr.shape[0])
line[0].set_ydata(yarr)
line[0].set_xdata((logging_steps * x))
ax.relim()
ax.autoscale_view()
if (fig is not None):
fig.canvas.draw()
display_id.update(fig)
plt.show()
|
def update_joint_plots(plot_data1: LivePlotData, plot_data2: LivePlotData, display_id: DisplayHandle, logging_steps: int=1, fig: Optional[(plt.Figure | plt.FigureBase)]=None):
if (not is_interactive()):
return
plot_obj1 = plot_data1.plot_obj
plot_obj2 = plot_data2.plot_obj
y1 = np.array(plot_data1.data)
y2 = np.array(plot_data2.data)
if (len(y1.shape) == 2):
y1 = np.mean(y1, (- 1))
if (len(y2.shape) == 2):
y2 = np.mean(y2, (- 1))
x1 = np.arange(y1.shape[0])
x2 = np.arange(y1.shape[0])
plot_obj1.line[0].set_ydata(y1)
plot_obj1.line[0].set_xdata((logging_steps * x1))
plot_obj2.line[0].set_ydata(y2)
plot_obj2.line[0].set_xdata((logging_steps * x2))
plot_obj1.ax.relim()
plot_obj2.ax.relim()
plot_obj1.ax.autoscale_view()
plot_obj2.ax.autoscale_view()
if (fig is not None):
fig.canvas.draw()
display_id.update(fig)
|
def init_live_plot(dpi: int=400, figsize: Optional[tuple[(int, int)]]=None, xlabel: Optional[str]=None, ylabel: Optional[str]=None, title: Optional[str]=None, **kwargs):
color = kwargs.pop('color', '#0096FF')
xlabel = ('Step' if (xlabel is None) else xlabel)
(fig, ax) = plt.subplots(nrows=1, ncols=1, dpi=dpi, figsize=figsize, constrained_layout=True)
assert isinstance(ax, plt.Axes)
(line,) = ax.plot([0], [0], c=color, animated=True, **kwargs)
if (figsize is None):
dpi = 125
figsize = (9, 3)
if ((title is not None) and (len(title) > 0)):
if isinstance(title, list):
fig.suptitle('\n'.join(title))
else:
fig.suptitle(title)
if (ylabel is not None):
ax.set_ylabel(ylabel, color=color)
ax.tick_params(axis='y', labelcolor=color)
ax.autoscale(True, axis='y')
display_id = display(fig, display_id=True)
return {'ax': ax, 'line': line, 'display_id': display_id}
|
def init_live_joint_plots(ylabels: Sequence[str], dpi: int=120, figsize: Optional[tuple[(int, int)]]=None, xlabel: Optional[str]=None, colors: Optional[Sequence[str]]=None, title: Optional[str]=None, fig: Optional[(plt.Figure | plt.FigureBase)]=None, ax: Optional[plt.Axes]=None):
if (colors is None):
n = np.random.randint(10, size=2)
colors = [f'C{n[0]}', f'C{n[1]}']
if (figsize is None):
dpi = 125
figsize = (9, 3)
if (fig is None):
(fig, ax0) = plt.subplots(1, 1, dpi=dpi, figsize=figsize, constrained_layout=True)
ax = (ax0 if isinstance(ax0, plt.Axes) else ax0[0])
else:
fig = plt.gcf()
if (ax is None):
ax = plt.gca()
assert ((ax is not None) and isinstance(ax, plt.Axes))
ax1 = ax.twinx()
line0 = ax.plot([0], [0], alpha=0.9, c=colors[0], animated=True)
line1 = ax1.plot([0], [0], alpha=0.9, c=colors[1], animated=True)
ax.set_ylabel(ylabels[0], color=colors[0])
ax1.set_ylabel(ylabels[1], color=colors[1])
ax.tick_params(axis='y', labelcolor=colors[0])
ax1.tick_params(axis='y', labelcolor=colors[1])
ax.grid(False)
ax1.grid(False)
ax.set_xlabel(('Step' if (xlabel is None) else xlabel))
if (title is not None):
if (fig is not None):
fig.suptitle(title, font_size='small')
display_id = display(fig, display_id=True)
plot_obj1 = PlotObject(ax, line0)
plot_obj2 = PlotObject(ax1, line1)
return {'fig': fig, 'ax0': ax, 'ax1': ax1, 'plot_obj1': plot_obj1, 'plot_obj2': plot_obj2, 'display_id': display_id}
|
def update_plots(history: dict, plots: dict, logging_steps: int=1):
lpdata = PlotData(history['loss'], plots['loss']['plot_obj1'])
bpdata = PlotData(history['dQint'], plots['loss']['plot_obj2'])
fig_loss = plots['loss']['fig']
id_loss = plots['loss']['display_id']
update_joint_plots(lpdata, bpdata, fig=fig_loss, display_id=id_loss, logging_steps=logging_steps)
for (key, val) in history.items():
if ((key in plots) and (key != 'loss')):
_ = update_plot(y=val, logging_steps=logging_steps, **plots[key])
|
def get_summary_writer(cfg: DictConfig, job_type: str):
'Returns SummaryWriter object for tracking summaries.'
outdir = Path(cfg.get('outdir', os.getcwd()))
jobdir = outdir.joinpath(job_type)
summary_dir = jobdir.joinpath('summaries')
summary_dir.mkdir(exist_ok=True, parents=True)
return SummaryWriter(summary_dir.as_posix())
|
def load_from_ckpt(dynamics: Dynamics, optimizer: torch.optim.Optimizer, cfg: DictConfig) -> tuple[(torch.nn.Module, torch.optim.Optimizer, dict)]:
outdir = Path(cfg.get('outdir', os.getcwd()))
if (not (ckpts := list(outdir.joinpath('train', 'checkpoints').rglob('*.tar')))):
raise FileNotFoundError(f'No checkpoints found in {outdir}')
latest = max(ckpts, key=(lambda p: p.stat().st_ctime))
if (not latest.is_file()):
raise FileNotFoundError(f'No checkpoints found in {outdir}')
log.info(f'Loading from checkpoint: {latest}')
ckpt = torch.load(latest)
dynamics.load_state_dict(ckpt['model_state_dict'])
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
dynamics.assign_eps({'xeps': ckpt['xeps'], 'veps': ckpt['veps']})
return (dynamics, optimizer, ckpt)
|
def get_console(**kwargs) -> Console:
interactive = is_interactive()
from rich.theme import Theme
theme = Theme(STYLES)
return Console(force_jupyter=interactive, log_path=False, theme=theme, soft_wrap=True, **kwargs)
|
def is_interactive() -> bool:
from IPython.core.getipython import get_ipython
eval = (os.environ.get('INTERACTIVE', None) is not None)
bval = (get_ipython() is not None)
return (eval or bval)
|
def get_width():
width = os.environ.get('COLUMNS', os.environ.get('WIDTH', 255))
if (width is not None):
return int(width)
size = shutil.get_terminal_size()
os.environ['COLUMNS'] = str(size.columns)
return size.columns
|
def make_layout(ratio: int=4, visible: bool=True) -> Layout:
'Define the layout.'
layout = Layout(name='root', visible=visible)
layout.split_row(Layout(name='main', ratio=ratio, visible=visible), Layout(name='footer', visible=visible))
return layout
|
def build_layout(steps: Any, visible: bool=True, job_type: Optional[str]='train') -> dict:
job_progress = Progress('{task.description}', SpinnerColumn('dots'), BarColumn(), TimeElapsedColumn(), TextColumn('[progress.percentage]{task.percentage:>3.0f}%'), TimeRemainingColumn())
tasks = {}
border_style = 'white'
if (job_type == 'train'):
border_style = 'green'
tasks['step'] = job_progress.add_task('[blue]Total', total=(steps.nera * steps.nepoch))
tasks['epoch'] = job_progress.add_task('[cyan]Epoch', total=steps.nepoch)
elif (job_type == 'eval'):
border_style = 'green'
tasks['step'] = job_progress.add_task('[green]Eval', total=steps.test)
elif (job_type == 'hmc'):
border_style = 'yellow'
tasks['step'] = job_progress.add_task('[green]HMC', total=steps.test)
else:
raise ValueError(f'''Expected job_type to be one of train, eval, or HMC,
Received: {job_type}''')
progress_table = Table.grid(expand=True)
progress_table.add_row(Panel.fit(job_progress, title=f'[b]{job_type}', border_style=border_style))
layout = make_layout(visible=visible)
if visible:
layout['root']['footer'].update(progress_table)
return {'layout': layout, 'tasks': tasks, 'progress_table': progress_table, 'job_progress': job_progress}
|
def add_columns(avgs: dict, table: Table, skip: Optional[(str | list[str])]=None, keep: Optional[(str | list[str])]=None) -> Table:
for key in avgs:
if ((skip is not None) and (key in skip)):
continue
if ((keep is not None) and (key not in keep)):
continue
if (key == 'loss'):
table.add_column(str(key), justify='center', style='green')
elif (key == 'dt'):
table.add_column(str(key), justify='center', style='red')
elif (key == 'acc'):
table.add_column(str(key), justify='center', style='magenta')
elif (key == 'dQint'):
table.add_column(str(key), justify='center', style='cyan')
elif (key == 'dQsin'):
table.add_column(str(key), justify='center', style='yellow')
else:
table.add_column(str(key), justify='center')
return table
|
def flatten_dict(d) -> dict:
res = {}
if isinstance(d, dict):
for k in d:
if (k == '_target_'):
continue
dflat = flatten_dict(d[k])
for (key, val) in dflat.items():
key = list(key)
key.insert(0, k)
res[tuple(key)] = val
else:
res[()] = d
return res
|
def nested_dict_to_df(d):
dflat = flatten_dict(d)
df = pd.DataFrame.from_dict(dflat, orient='index')
df.index = pd.MultiIndex.from_tuples(df.index)
df = df.unstack(level=(- 1))
df.columns = df.columns.map('{0[1]}'.format)
return df
|
def print_config(config: DictConfig, resolve: bool=True) -> None:
'Prints content of DictConfig using Rich library and its tree structure.\n\n Args:\n config (DictConfig): Configuration composed by Hydra.\n print_order (Sequence[str], optional): Determines in what order config\n components are printed.\n resolve (bool, optional): Whether to resolve reference fields of\n DictConfig.\n '
from l2hmc.configs import OUTPUTS_DIR
tree = rich.tree.Tree('CONFIG')
quee = []
for f in config:
if (f not in quee):
quee.append(f)
dconfig = {}
for f in quee:
branch = tree.add(f)
config_group = config[f]
if isinstance(config_group, DictConfig):
branch_content = OmegaConf.to_yaml(config_group, resolve=resolve)
cfg = OmegaConf.to_container(config_group, resolve=resolve)
else:
branch_content = str(config_group)
cfg = str(config_group)
dconfig[f] = cfg
branch.add(rich.syntax.Syntax(branch_content, 'yaml'))
outfile = Path(os.getcwd()).joinpath('config_tree.log')
with outfile.open('wt') as f:
console = rich.console.Console(file=f)
console.print(tree)
with open('config.json', 'w') as f:
f.write(json.dumps(dconfig))
cfgfile = Path('config.yaml')
OmegaConf.save(config, cfgfile, resolve=True)
cfgdict = OmegaConf.to_object(config)
logdir = Path(os.getcwd()).resolve().as_posix()
if (not config.get('debug_mode', False)):
dbfpath = Path(OUTPUTS_DIR).joinpath('logdirs.csv')
else:
dbfpath = Path(OUTPUTS_DIR).joinpath('logdirs-debug.csv')
if dbfpath.is_file():
mode = 'a'
header = False
else:
mode = 'w'
header = True
df = pd.DataFrame({logdir: cfgdict})
df.T.to_csv(dbfpath.resolve().as_posix(), mode=mode, header=header)
os.environ['LOGDIR'] = logdir
|
@dataclass
class CustomLogging():
version: int = 1
formatters: dict[(str, Any)] = field(default_factory=(lambda : {'simple': {'format': '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'}}))
handlers: dict[(str, Any)] = field(default_factory=(lambda : {'console': {'class': 'rich.logging.RichHandler', 'formatter': 'simple', 'rich_tracebacks': 'true'}, 'file': {'class': 'logging.FileHander', 'formatter': 'simple', 'filename': '${hydra.job.name}.log'}}))
root: dict[(str, Any)] = field(default_factory=(lambda : {'level': 'INFO', 'handlers': ['console', 'file']}))
disable_existing_loggers: bool = False
|
def printarr(*arrs, float_width=6):
'\n Print a pretty table giving name, shape, dtype, type, and content\n information for input tensors or scalars.\n\n Call like: printarr(my_arr, some_other_arr, maybe_a_scalar). Accepts a\n variable number of arguments.\n\n Inputs can be:\n - Numpy tensor arrays\n - Pytorch tensor arrays\n - Jax tensor arrays\n - Python ints / floats\n - None\n\n It may also work with other array-like types, but they have not been tested\n\n Use the `float_width` option specify the precision to which floating point\n types are printed.\n\n Author: Nicholas Sharp (nmwsharp.com)\n Canonical source:\n https://gist.github.com/nmwsharp/54d04af87872a4988809f128e1a1d233\n License: This snippet may be used under an MIT license, and it is also\n released into the public domain. Please retain this docstring as a\n reference.\n '
import inspect
frame_ = inspect.currentframe()
assert (frame_ is not None)
frame = frame_.f_back
default_name = '[temporary]'
def name_from_outer_scope(a):
if (a is None):
return '[None]'
name = default_name
if (frame_ is not None):
for (k, v) in frame_.f_locals.items():
if (v is a):
name = k
break
return name
def dtype_str(a):
if (a is None):
return 'None'
if isinstance(a, int):
return 'int'
return ('float' if isinstance(a, float) else str(a.dtype))
def shape_str(a):
if (a is None):
return 'N/A'
if isinstance(a, int):
return 'scalar'
return ('scalar' if isinstance(a, float) else str(list(a.shape)))
def type_str(a):
return str(type(a))[8:(- 2)]
def device_str(a):
if hasattr(a, 'device'):
device_str = str(a.device)
if (len(device_str) < 10):
return device_str
return ''
def format_float(x):
return f'{x:{float_width}g}'
def minmaxmean_str(a):
if (a is None):
return ('N/A', 'N/A', 'N/A')
if isinstance(a, (int, float)):
return (format_float(a), format_float(a), format_float(a))
min_str = 'N/A'
try:
min_str = format_float(a.min())
except Exception:
pass
max_str = 'N/A'
try:
max_str = format_float(a.max())
except Exception:
pass
mean_str = 'N/A'
try:
mean_str = format_float(a.mean())
except Exception:
pass
return (min_str, max_str, mean_str)
try:
props = ['name', 'dtype', 'shape', 'type', 'device', 'min', 'max', 'mean']
str_props = []
for a in arrs:
minmaxmean = minmaxmean_str(a)
str_props.append({'name': name_from_outer_scope(a), 'dtype': dtype_str(a), 'shape': shape_str(a), 'type': type_str(a), 'device': device_str(a), 'min': minmaxmean[0], 'max': minmaxmean[1], 'mean': minmaxmean[2]})
maxlen = {}
for p in props:
maxlen[p] = 0
for sp in str_props:
for p in props:
maxlen[p] = max(maxlen[p], len(sp[p]))
props = [p for p in props if (maxlen[p] > 0)]
header_str = ''
for p in props:
prefix = ('' if (p == 'name') else ' | ')
fmt_key = ('>' if (p == 'name') else '<')
header_str += f'{prefix}{p:{fmt_key}{maxlen[p]}}'
print(header_str)
print(('-' * len(header_str)))
for strp in str_props:
for p in props:
prefix = ('' if (p == 'name') else ' | ')
fmt_key = ('>' if (p == 'name') else '<')
print(f'{prefix}{strp[p]:{fmt_key}{maxlen[p]}}', end='')
print('')
finally:
del frame
import time
|
@contextmanager
def beat(length: int=1) -> Generator:
with console:
(yield)
time.sleep((length * BEAT_TIME))
|
class DataFramePrettify():
'Create animated and pretty Pandas DataFrame.\n\n Modified from: https://github.com/khuyentran1401/rich-dataframe\n\n Parameters\n ----------\n df : pd.DataFrame\n The data you want to prettify\n row_limit : int, optional\n Number of rows to show, by default 20\n col_limit : int, optional\n Number of columns to show, by default 10\n first_rows : bool, optional\n Whether to show first n rows or last n rows, by default True.\n If this is set to False, show last n rows.\n first_cols : bool, optional\n Whether to show first n columns or last n columns, by default True.\n If this is set to False, show last n rows.\n delay_time : int, optional\n How fast is the animation, by default 5.\n Increase this to have slower animation.\n clear_console: bool, optional\n Clear the console before printing the table, by default True.\n If this is set to False the previous console\n input/output is maintained\n '
def __init__(self, df: pd.DataFrame, row_limit: int=20, col_limit: int=10, first_rows: bool=True, first_cols: bool=True, delay_time: int=5, clear_console: bool=True) -> None:
self.df = df.reset_index().rename(columns={'index': ''})
self.table = Table(show_footer=False)
self.table_centered = Columns((self.table,), align='center', expand=True)
self.num_colors = len(COLORS)
self.delay_time = delay_time
self.row_limit = row_limit
self.first_rows = first_rows
self.col_limit = col_limit
self.first_cols = first_cols
self.clear_console = clear_console
if first_cols:
self.columns = self.df.columns[:col_limit]
else:
self.columns = list(self.df.columns[(- col_limit):])
self.columns.insert(0, 'index')
if first_rows:
self.rows = self.df.values[:row_limit]
else:
self.rows = self.df.values[(- row_limit):]
if self.clear_console:
console.clear()
def _add_columns(self):
for col in self.columns:
with beat(self.delay_time):
self.table.add_column(str(col))
def _add_rows(self):
for row in self.rows:
with beat(self.delay_time):
row = (row[:self.col_limit] if self.first_cols else row[(- self.col_limit):])
row = [str(item) for item in row]
self.table.add_row(*list(row))
def _move_text_to_right(self):
for i in range(len(self.table.columns)):
with beat(self.delay_time):
self.table.columns[i].justify = 'right'
def _add_random_color(self):
for i in range(len(self.table.columns)):
with beat(self.delay_time):
self.table.columns[i].header_style = COLORS[(i % self.num_colors)]
def _add_style(self):
for i in range(len(self.table.columns)):
with beat(self.delay_time):
self.table.columns[i].style = ('bold ' + COLORS[(i % self.num_colors)])
def _adjust_box(self):
for box in [SIMPLE_HEAD, SIMPLE, MINIMAL, SQUARE]:
with beat(self.delay_time):
self.table.box = box
def _dim_row(self):
with beat(self.delay_time):
self.table.row_styles = ['none', 'dim']
def _adjust_border_color(self):
with beat(self.delay_time):
self.table.border_style = 'bright_yellow'
def _change_width(self):
original_width = Measurement.get(console=console, options=console.options, renderable=self.table).maximum
width_ranges = [[original_width, console.width, 2], [console.width, original_width, (- 2)], [original_width, 90, (- 2)], [90, (original_width + 1), 2]]
for width_range in width_ranges:
for width in range(*width_range):
with beat(self.delay_time):
self.table.width = width
with beat(self.delay_time):
self.table.width = None
def _add_caption(self):
row_text = ('first' if self.first_rows else 'last')
col_text = ('first' if self.first_cols else 'last')
with beat(self.delay_time):
self.table.caption = f'Only the {row_text} {self.row_limit} rows and the {col_text} {self.col_limit} columns is shown here.'
with beat(self.delay_time):
self.table.caption = f'Only the [bold green] {row_text} {{self.row_limit}} rows[/bold green] and the [bold red]{{self.col_limit}} {{col_text}} columns[/bold red] is shown here.'
with beat(self.delay_time):
self.table.caption = f'Only the [bold magenta not dim] {row_text} {self.row_limit} rows [/bold magenta not dim] and the [bold green not dim]{col_text} {self.col_limit} columns [/bold green not dim] are shown here.'
def prettify(self):
with Live(self.table_centered, console=console, refresh_per_second=self.delay_time, vertical_overflow='ellipsis'):
self._add_columns()
self._add_rows()
self._move_text_to_right()
self._add_random_color()
self._add_style()
self._adjust_border_color()
self._add_caption()
return self.table
|
def prettify(df: pd.DataFrame, row_limit: int=20, col_limit: int=10, first_rows: bool=True, first_cols: bool=True, delay_time: int=5, clear_console: bool=True):
'Create animated and pretty Pandas DataFrame\n\n Parameters\n ----------\n df : pd.DataFrame\n The data you want to prettify\n row_limit : int, optional\n Number of rows to show, by default 20\n col_limit : int, optional\n Number of columns to show, by default 10\n first_rows : bool, optional\n Whether to show first n rows or last n rows, by default True. If this is set to False, show last n rows.\n first_cols : bool, optional\n Whether to show first n columns or last n columns, by default True. If this is set to False, show last n rows.\n delay_time : int, optional\n How fast is the animation, by default 5. Increase this to have slower animation.\n clear_console: bool, optional\n Clear the console before printing the table, by default True. If this is set to false the previous console input/output is maintained\n '
if isinstance(df, pd.DataFrame):
DataFramePrettify(df, row_limit, col_limit, first_rows, first_cols, delay_time, clear_console).prettify()
else:
print(df)
|
def log_execution_and_time(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
NOW = get_timestamp()
start = time.time()
log.info(f'{NOW} - Start execution of: {function.__name__}')
result = function(*args, **kwargs)
end = time.time()
log.info(f'{function.__name__} took {(end - start):.4f} seconds')
return result
return wrapper
|
class BaseTimer():
def __init__(self, name: str='BaseTimer', desc: Optional[str]=None):
self.name = name
self.desc = desc
self.data: list = []
self.iterations: int = 0
self.started: float = time.time()
self._created: float = time.time()
def start(self) -> None:
self.started = time.time()
def stop(self) -> float:
dt = (time.time() - self.started)
self.data.append(dt)
self.iterations += 1
return dt
|
class TrainTimer():
def __init__(self) -> None:
self.step_timer = StepTimer
self.epoch_timer = StepTimer
|
class StepTimer():
def __init__(self, evals_per_step: int=1) -> None:
self.data = []
self.t = time.time()
self.iterations = 0
self.evals_per_step = evals_per_step
def start(self) -> None:
self.t = time.time()
def stop(self) -> float:
dt = (time.time() - self.t)
self.data.append(dt)
self.iterations += 1
return dt
def get_eval_rate(self, evals_per_step: Optional[int]=None) -> dict:
if (evals_per_step is None):
evals_per_step = self.evals_per_step
elapsed = np.sum(self.data)
num_evals = (evals_per_step * len(self.data))
eval_rate = (num_evals / elapsed)
return {'eval_rate': eval_rate, 'elapsed': elapsed, 'num_evals': num_evals, 'num_steps': len(self.data), 'evals_per_step': evals_per_step}
def write_eval_rate(self, outdir: os.PathLike, mode: str='a', evals_per_step: Optional[int]=None) -> dict:
eval_rate = self.get_eval_rate(evals_per_step)
outfile = Path(outdir).joinpath('step_timer_output.json')
with open(outfile, mode) as f:
json.dump(eval_rate, f)
return eval_rate
def save_data(self, outfile: os.PathLike, mode: str='a') -> pd.DataFrame:
df = pd.DataFrame(self.data)
fpath = Path(outfile).resolve()
fpath.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(fpath.as_posix(), mode=mode)
return df
def save_and_write(self, outdir: os.PathLike, mode: str='a', fname: Optional[str]=None, evals_per_step: Optional[int]=None) -> dict:
fname = ('step_timer' if (fname is None) else fname)
outfile = Path(outdir).joinpath(f'{fname}.csv')
df = self.save_data(outfile=outfile, mode=mode)
data = self.write_eval_rate(outdir=outdir, evals_per_step=evals_per_step)
data.update({'df': df})
return data
|
class History(BaseHistory):
def update(self, metrics: dict) -> dict:
avgs = {}
era = metrics.get('era', 0)
for (key, val) in metrics.items():
avg = None
if isinstance(val, (float, int)):
avg = val
elif isinstance(val, dict):
for (k, v) in val.items():
key = f'{key}/{k}'
try:
avg = self._update(key=key, val=v)
except tf.errors.InvalidArgumentError:
continue
else:
avg = self._update(key=key, val=val)
if (avg is not None):
avgs[key] = avg
try:
self.era_metrics[str(era)][key].append(avg)
except KeyError:
self.era_metrics[str(era)][key] = [avg]
return avgs
def _update(self, key: str, val: Any) -> float:
if (val is None):
raise ValueError(f'None encountered: {key}: {val}')
if isinstance(val, list):
val = np.array(val)
try:
self.history[key].append(val)
except KeyError:
self.history[key] = [val]
if isinstance(val, (float, int)):
return val
try:
return tf.reduce_mean(val)
except Exception:
return val
|
def get_summary_writer(cfg: DictConfig, job_type: str):
'Returns SummaryWriter object for tracking summaries.'
outdir = Path(cfg.get('outdir', os.getcwd()))
jobdir = outdir.joinpath(job_type)
sdir = jobdir.joinpath('summaries')
sdir.mkdir(exist_ok=True, parents=True)
return tf.summary.create_file_writer(sdir.as_posix())
|
def evaluate(cfg: DictConfig, trainer: Trainer, job_type: str, run: Optional[Any]=None, nchains: Optional[int]=10, eps: Optional[TensorLike]=None) -> dict:
assert isinstance(nchains, int)
assert (job_type in {'eval', 'hmc'})
therm_frac = cfg.get('therm_frac', 0.2)
jobdir = get_jobdir(cfg, job_type=job_type)
writer = get_summary_writer(cfg, job_type=job_type)
if (writer is not None):
writer.set_as_default()
output = trainer.eval(run=run, writer=writer, nchains=nchains, job_type=job_type, eps=eps)
dataset = output['history'].get_dataset(therm_frac=therm_frac)
if (run is not None):
dQint = dataset.data_vars.get('dQint').values
drop = int((0.1 * len(dQint)))
dQint = dQint[drop:]
run.summary[f'dQint_{job_type}'] = dQint
run.summary[f'dQint_{job_type}.mean'] = dQint.mean()
_ = analyze_dataset(dataset, run=run, save=True, outdir=jobdir, nchains=nchains, job_type=job_type, title=f'{job_type}: TensorFlow')
if (not is_interactive()):
edir = jobdir.joinpath('logs')
edir.mkdir(exist_ok=True, parents=True)
log.info(f'Saving {job_type} logs to: {edir.as_posix()}')
save_logs(run=run, logdir=edir, job_type=job_type, tables=output['tables'], summaries=output['summaries'])
if (writer is not None):
writer.close()
return output
|
def train(cfg: DictConfig, trainer: Trainer, run: Optional[Any]=None, nchains: Optional[int]=None, **kwargs) -> dict:
nchains = (16 if (nchains is None) else nchains)
jobdir = get_jobdir(cfg, job_type='train')
writer = get_summary_writer(cfg, job_type='train')
if (writer is not None):
writer.set_as_default()
output = trainer.train(run=run, writer=writer, train_dir=jobdir, **kwargs)
if (RANK == 0):
dset = output['history'].get_dataset()
_ = analyze_dataset(dset, run=run, save=True, outdir=jobdir, nchains=nchains, job_type='train', title='Training: TensorFlow')
if (not is_interactive()):
tdir = jobdir.joinpath('logs')
tdir.mkdir(exist_ok=True, parents=True)
log.info(f'Saving train logs to: {tdir.as_posix()}')
save_logs(run=run, logdir=tdir, job_type='train', tables=output['tables'], summaries=output['summaries'])
if (writer is not None):
writer.close()
return output
|
def GetCOCOCatNames():
ClassNames = {}
ClassNames[0] = 'person'
ClassNames[1] = 'bicycle'
ClassNames[2] = 'car'
ClassNames[3] = 'motorcycle'
ClassNames[4] = 'airplane'
ClassNames[5] = 'bus'
ClassNames[6] = 'train'
ClassNames[7] = 'truck'
ClassNames[8] = 'boat'
ClassNames[9] = 'traffic light'
ClassNames[10] = 'fire hydrant'
ClassNames[11] = 'stop sign'
ClassNames[12] = 'parking meter'
ClassNames[13] = 'bench'
ClassNames[14] = 'bird'
ClassNames[15] = 'cat'
ClassNames[16] = 'dog'
ClassNames[17] = 'horse'
ClassNames[18] = 'sheep'
ClassNames[19] = 'cow'
ClassNames[20] = 'elephant'
ClassNames[21] = 'bear'
ClassNames[22] = 'zebra'
ClassNames[23] = 'giraffe'
ClassNames[24] = 'backpack'
ClassNames[25] = 'umbrella'
ClassNames[26] = 'handbag'
ClassNames[27] = 'tie'
ClassNames[28] = 'suitcase'
ClassNames[29] = 'frisbee'
ClassNames[30] = 'skis'
ClassNames[31] = 'snowboard'
ClassNames[32] = 'sports ball'
ClassNames[33] = 'kite'
ClassNames[34] = 'baseball bat'
ClassNames[35] = 'baseball glove'
ClassNames[36] = 'skateboard'
ClassNames[37] = 'surfboard'
ClassNames[38] = 'tennis racket'
ClassNames[39] = 'bottle'
ClassNames[40] = 'wine glass'
ClassNames[41] = 'cup'
ClassNames[42] = 'fork'
ClassNames[43] = 'knife'
ClassNames[44] = 'spoon'
ClassNames[45] = 'bowl'
ClassNames[46] = 'banana'
ClassNames[47] = 'apple'
ClassNames[48] = 'sandwich'
ClassNames[49] = 'orange'
ClassNames[50] = 'broccoli'
ClassNames[51] = 'carrot'
ClassNames[52] = 'hot dog'
ClassNames[53] = 'pizza'
ClassNames[54] = 'donut'
ClassNames[55] = 'cake'
ClassNames[56] = 'chair'
ClassNames[57] = 'couch'
ClassNames[58] = 'potted plant'
ClassNames[59] = 'bed'
ClassNames[60] = 'dining table'
ClassNames[61] = 'toilet'
ClassNames[62] = 'tv'
ClassNames[63] = 'laptop'
ClassNames[64] = 'mouse'
ClassNames[65] = 'remote'
ClassNames[66] = 'keyboard'
ClassNames[67] = 'cell phone'
ClassNames[68] = 'microwave'
ClassNames[69] = 'oven'
ClassNames[70] = 'toaster'
ClassNames[71] = 'sink'
ClassNames[72] = 'refrigerator'
ClassNames[73] = 'book'
ClassNames[74] = 'clock'
ClassNames[75] = 'vase'
ClassNames[76] = 'scissors'
ClassNames[77] = 'teddy bear'
ClassNames[78] = 'hair drier'
ClassNames[79] = 'toothbrush'
return ClassNames
|
@app.route('/')
def home():
return render_template('index.html')
|
@app.route('/predict', methods=['POST'])
def predict():
'\n For rendering results on HTML GUI\n '
st = [str(x) for x in request.form.values()]
prediction = recommend(st[0])
pr = ((((((('1) ' + prediction[0][0]) + ' // ') + '2) ') + prediction[0][1]) + ' // ') + '3) ') + prediction[0][2])
return render_template('index.html', recommended='Recommended Titles: {}'.format(pr))
|
@app.route('/predict_api', methods=['POST'])
def predict_api():
'\n For direct API calls trought request\n '
data = request.get_json(force=True)
prediction = recommend(data)
output = prediction[0][0]
return jsonify(output)
|
def recommend(abstract: str):
from simpletransformers.t5 import T5Model
model_args = {'reprocess_input_data': True, 'overwrite_output_dir': True, 'max_seq_length': 256, 'eval_batch_size': 128, 'num_train_epochs': 1, 'save_eval_checkpoints': False, 'use_multiprocessing': False, 'num_beams': None, 'do_sample': True, 'max_length': 50, 'top_k': 50, 'top_p': 0.95, 'num_return_sequences': 3}
model = T5Model('t5', './checkpoint_15000_1', args=model_args, use_cuda=False)
abss = [('summarize: ' + abstract)]
predicted_title = model.predict(abss)
return predicted_title
|
def getMetadata(path_to_json):
with open(path_to_json, 'r') as f:
for line in f:
(yield line)
|
def json2list(path_to_json):
metadata = getMetadata(path_to_json)
generator_iter = next(metadata)
keys = json.loads(generator_iter)
abstracts = []
titles = []
years = []
categories = []
authors = []
authors_parsed = []
for paper in metadata:
paper_dict = json.loads(paper)
try:
a_year = int(paper_dict.get('journal-ref')[(- 4):])
if ((a_year > 1950) and (a_year < 2020)):
years.append(a_year)
titles.append(paper_dict.get('title'))
abstracts.append(paper_dict.get('abstract'))
categories.append(paper_dict.get('categories'))
authors.append(paper_dict.get('authors'))
authors_parsed.append(paper_dict.get('authors_parsed'))
else:
pass
except:
pass
data = {'years': years, 'titles': titles, 'abstracts': abstracts, 'categories': categories, 'authors': authors, 'authors_parsed': authors_parsed}
print('Abstracts and titles are parsed...')
return data
|
def json2csv(path_to_json):
data = json2list(path_to_json)
df_all = pd.DataFrame({'Title': data['titles'], 'Abstract': data['abstracts'], 'Parsed Authors': data['authors_parsed'], 'Authors': data['authors'], 'Year': data['years'], 'Category': data['categories']})
df_all.to_csv('../data/raw.csv', index=False)
return df_all
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.