code stringlengths 17 6.64M |
|---|
def make_dataset(metrics: dict) -> xr.Dataset:
dset = {}
for (key, val) in metrics.items():
if isinstance(val, list):
import torch
if isinstance(val[0], torch.Tensor):
val = grab_tensor(torch.stack(val))
elif isinstance(val, np.ndarray):
val = np.stack(val)
else:
import tensorflow as tf
if isinstance(val, tf.Tensor):
val = grab_tensor(tf.stack(val))
assert isinstance(val, np.ndarray)
assert (len(val.shape) in {1, 2, 3})
dims = ()
coords = ()
if (len(val.shape) == 1):
ndraws = val.shape[0]
dims = ['draw']
coords = np.arange(len(val))
elif (len(val.shape) == 2):
val = val.T
(nchains, ndraws) = val.shape
dims = ('chain', 'draw')
coords = (np.arange(nchains), np.arange(ndraws))
elif (len(val.shape) == 3):
val = val.T
(nchains, nlf, ndraws) = val.shape
dims = ('chain', 'leapfrog', 'draw')
coords = (np.arange(nchains), np.arange(nlf), np.arange(ndraws))
else:
print(f'val.shape: {val.shape}')
raise ValueError('Invalid shape encountered')
assert ((coords is not None) and (dims is not None))
dset[key] = xr.DataArray(val, dims=dims, coords=tuple(coords))
return xr.Dataset(dset)
|
def plot_dataset(dataset: xr.Dataset, nchains: Optional[int]=10, logfreq: Optional[int]=None, outdir: Optional[os.PathLike]=None, title: Optional[str]=None, job_type: Optional[str]=None, save_plots: bool=True) -> None:
tstamp = get_timestamp()
outdir = (Path(outdir) if (outdir is not None) else Path(os.getcwd()).joinpath(f'{tstamp}'))
outdir.mkdir(exist_ok=True, parents=True)
job_type = (job_type if (job_type is not None) else f'job-{tstamp}')
_ = make_ridgeplots(dataset, outdir=outdir, drop_nans=True, drop_zeros=False, num_chains=nchains, cmap='viridis')
for (key, val) in dataset.data_vars.items():
if (key == 'x'):
continue
(fig, _, _) = plot_dataArray(val, key=key, logfreq=logfreq, outdir=outdir, title=title, line_labels=False, num_chains=nchains, save_plot=save_plots)
if save_plots:
_ = save_figure(fig=fig, key=key, outdir=outdir)
|
def analyze_dataset(dataset: xr.Dataset, outdir: os.PathLike, save: bool=True, use_hdf5: bool=True, nchains: Optional[int]=None, title: Optional[str]=None, logfreq: Optional[int]=None, job_type: Optional[str]=None, run: Optional[Any]=None, arun: Optional[Any]=None) -> xr.Dataset:
'Save plot and analyze resultant `xarray.Dataset`.'
job_type = (job_type if (job_type is not None) else f'job-{get_timestamp()}')
dirs = make_subdirs(outdir)
if ((nchains is not None) and (nchains > 1000)):
nchains_ = (nchains // 4)
log.warning(f'Reducing `nchains` from: {nchains} -> {nchains_} for plotting')
plot_dataset(dataset, nchains=nchains, logfreq=logfreq, title=title, job_type=job_type, outdir=dirs['plots'])
if save:
try:
datafile = save_dataset(dataset, use_hdf5=use_hdf5, outdir=dirs['data'], job_type=job_type)
except ValueError:
datafile = None
for (key, val) in dataset.data_vars.items():
fout = Path(dirs['data']).joinpath(f'{key}.z')
try:
joblib.dump(val.values, fout)
except Exception:
log.error(f'Unable to `joblib.dump` {key}, skipping!')
artifact = None
if (job_type is not None):
pngdir = Path(dirs['plots']).joinpath('pngs')
if pngdir.is_dir():
if (run is not None):
name = f'{job_type}-{run.id}'
artifact = wandb.Artifact(name=name, type='result')
artifact.add_dir(pngdir.as_posix(), name=f'{job_type}/plots')
if (datafile is not None):
artifact.add_file(datafile.as_posix(), name=f'{job_type}/data')
run.log_artifact(artifact)
if (arun is not None):
from aim import Image
for f in list(pngdir.rglob('*.png')):
aimage = Image(Path(f).as_posix(), format='png', quality=100)
arun.track(aimage, name=f'images/{f.stem}', context={'subset': job_type})
return dataset
|
def save_and_analyze_data(dataset: xr.Dataset, outdir: os.PathLike, nchains: Optional[int]=None, logfreq: Optional[int]=None, run: Optional[Any]=None, arun: Optional[Any]=None, job_type: Optional[str]=None, rank: Optional[int]=None, framework: Optional[str]=None, summaries: Optional[list[str]]=None, tables: Optional[dict[(str, Table)]]=None, save_data: bool=True) -> xr.Dataset:
jstr = f'{job_type}'
title = (jstr if (framework is None) else ': '.join([jstr, f'{framework}']))
dataset = analyze_dataset(dataset, run=run, arun=arun, save=save_data, outdir=outdir, nchains=nchains, logfreq=logfreq, job_type=job_type, title=title)
if (not is_interactive()):
edir = Path(outdir).joinpath('logs')
edir.mkdir(exist_ok=True, parents=True)
log.info(f'Saving {job_type} logs to: {edir.as_posix()}')
save_logs(run=run, rank=rank, logdir=edir, job_type=job_type, tables=tables, summaries=summaries)
return dataset
|
def avg_diff(y: list[float], x: Optional[list[float]]=None, *, drop: Optional[(float | int)]=None) -> float:
if (x is not None):
assert (len(y) == len(x))
if (drop is not None):
if (isinstance(drop, int) and (drop > 1.0)):
n = drop
elif (isinstance(drop, float) and (drop < 1.0)):
n = int((drop * len(y)))
else:
raise ValueError('`drop` must either be an `int > 1` or `float < 1.`')
y = y[n:]
if (x is not None):
x = x[n:]
dy = np.subtract(y[1:], y[:(- 1)]).mean()
return (dy if (x is None) else (dy / np.subtract(x[1:], x[:(- 1)]).mean()))
|
def dict_to_list_of_overrides(d: dict):
return [f'{k}={v}' for (k, v) in flatten_dict(d, sep='.').items()]
|
def flatten_dict(d: dict, sep: str='/', pre='') -> dict:
return ({(((pre + sep) + k) if pre else k): v for (kk, vv) in d.items() for (k, v) in flatten_dict(vv, sep, kk).items()} if isinstance(d, dict) else {pre: d})
|
def add_to_outdirs_file(outdir: os.PathLike):
with open(OUTDIRS_FILE, 'a') as f:
f.write((Path(outdir).resolve.as_posix() + '\n'))
|
def get_jobdir(cfg: DictConfig, job_type: str) -> Path:
jobdir = Path(cfg.get('outdir', os.getcwd())).joinpath(job_type)
jobdir.mkdir(exist_ok=True, parents=True)
assert (jobdir is not None)
add_to_outdirs_file(jobdir)
return jobdir
|
def list_to_str(x: list) -> str:
if isinstance(x[0], int):
return '-'.join([str(int(i)) for i in x])
elif isinstance(x[0], float):
return '-'.join([f'{i:2.1f}' for i in x])
else:
return '-'.join([str(i) for i in x])
|
@dataclass
class State():
x: Any
v: Any
beta: Any
|
@dataclass
@rich.repr.auto
class BaseConfig(ABC):
@abstractmethod
def to_str(self) -> str:
pass
def to_json(self) -> str:
return json.dumps(self.__dict__)
def get_config(self) -> dict:
return asdict(self)
def asdict(self) -> dict:
return asdict(self)
def to_dict(self) -> dict:
return deepcopy(self.__dict__)
def to_file(self, fpath: os.PathLike) -> None:
with open(fpath, 'w') as f:
json.dump(self.to_json(), f, indent=4)
def from_file(self, fpath: os.PathLike) -> None:
with open(fpath, 'w') as f:
with open(fpath, 'r') as f:
config = json.load(f)
self.__init__(**config)
def __getitem__(self, key):
return super().__getattribute__(key)
|
@dataclass
class Charges():
intQ: Any
sinQ: Any
|
@dataclass
class LatticeMetrics():
plaqs: Any
charges: Charges
p4x4: Any
def asdict(self) -> dict:
return {'plaqs': self.plaqs, 'sinQ': self.charges.sinQ, 'intQ': self.charges.intQ, 'p4x4': self.p4x4}
|
@dataclass
class EnvConfig():
def __post_init__(self):
import socket
dist_env = udist.query_environment()
self.rank = dist_env['rank']
self.local_rank = dist_env['local_rank']
self.world_size = dist_env['world_size']
try:
self.hostname = socket.gethostname()
self.addr = socket.gethostbyaddr(self.hostname)[0]
except Exception:
self.hostname = 'localhost'
self.addr = socket.gethostbyaddr(self.hostname)[0]
if self.addr.startswith('x3'):
self.machine = 'Polaris'
self.nodefile = os.environ.get('PBS_NODEFILE', None)
elif self.addr.startswith('x1'):
self.machine = 'Sunspot'
self.nodefile = os.environ.get('PBS_NODEFILE', None)
elif self.addr.startswith('thetagpu'):
self.machine = 'ThetaGPU'
self.nodefile = os.environ.get('COBALT_NODEFILE', None)
else:
self.machine = self.addr
self.nodefile = None
self.env = {k: v for (k, v) in dict(os.environ).items() if ((k not in ENV_FILTERS) and (not k.startswith('_ModuleTable')) and (not k.startswith('BASH_FUNC_')))}
|
@dataclass
class wandbSetup(BaseConfig):
id: Optional[str] = None
group: Optional[str] = None
save_code: Optional[bool] = True
sync_tensorboard: Optional[bool] = True
tags: Optional[Sequence[str]] = None
mode: Optional[str] = 'online'
resume: Optional[str] = 'allow'
entity: Optional[str] = 'l2hmc-qcd'
project: Optional[str] = 'l2hmc-qcd'
settings: Optional[dict] = field(default_factory=dict)
def __post_init__(self):
if (self.settings is None):
self.settings = {'start_method': 'thread'}
def to_str(self) -> str:
return ''
|
@dataclass
class wandbConfig(BaseConfig):
setup: wandbSetup
def to_str(self) -> str:
return self.to_json()
|
@dataclass
class NetWeight(BaseConfig):
'Object for selectively scaling different components of learned fns.\n\n Explicitly,\n - s: scales the v (x) scaling function in the v (x) updates\n - t: scales the translation function in the update\n - q: scales the force (v) transformation function in the v (x) updates\n '
s: float = field(default=1.0)
t: float = field(default=1.0)
q: float = field(default=1.0)
def to_dict(self):
return {'s': self.s, 't': self.t, 'q': self.q}
def to_str(self):
return f's{self.s:2.1f}t{self.t:2.1f}q{self.t:2.1f}'
|
@dataclass
class NetWeights(BaseConfig):
'Object for selectively scaling different components of x, v networks.'
x: NetWeight = NetWeight(1.0, 1.0, 1.0)
v: NetWeight = NetWeight(1.0, 1.0, 1.0)
def to_str(self):
return f'nwx-{self.x.to_str()}-nwv-{self.v.to_str()}'
def to_dict(self):
return {'x': self.x.to_dict(), 'v': self.v.to_dict()}
def __post_init__(self):
if (not isinstance(self.x, NetWeight)):
self.x = NetWeight(**self.x)
if (not isinstance(self.v, NetWeight)):
self.v = NetWeight(**self.v)
|
@dataclass
class LearningRateConfig(BaseConfig):
'Learning rate configuration object.'
lr_init: float = 0.001
mode: str = 'auto'
monitor: str = 'loss'
patience: int = 5
cooldown: int = 0
warmup: int = 1000
verbose: bool = True
min_lr: float = 1e-06
factor: float = 0.98
min_delta: float = 0.0001
clip_norm: float = 2.0
def to_str(self):
return f'lr-{self.lr_init:3.2f}'
|
@dataclass
class Steps(BaseConfig):
nera: int
nepoch: int
test: int
log: int = 100
print: int = 200
extend_last_era: Optional[int] = None
def __post_init__(self):
if (self.extend_last_era is None):
self.extend_last_era = 1
self.total = (self.nera * self.nepoch)
freq = int((self.nepoch // 20))
self.log = (max(1, freq) if (self.log is None) else self.log)
self.print = (max(1, freq) if (self.print is None) else self.print)
assert isinstance(self.log, int)
assert isinstance(self.print, int)
def to_str(self) -> str:
return f'nera-{self.nera}_nepoch-{self.nepoch}'
def update(self, nera: Optional[int]=None, nepoch: Optional[int]=None, test: Optional[int]=None, log: Optional[int]=None, print: Optional[int]=None, extend_last_era: Optional[int]=None) -> Steps:
return Steps(nera=(self.nera if (nera is None) else nera), nepoch=(self.nepoch if (nepoch is None) else nepoch), test=(self.test if (test is None) else test), log=(self.log if (log is None) else log), print=(self.print if (print is None) else print), extend_last_era=(self.extend_last_era if (extend_last_era is None) else extend_last_era))
|
@dataclass
class ConvolutionConfig(BaseConfig):
filters: Optional[Sequence[int]] = None
sizes: Optional[Sequence[int]] = None
pool: Optional[Sequence[int]] = None
def __post_init__(self):
if (self.filters is None):
return
if (self.sizes is None):
logger.warning('Using default filter size of 2')
self.sizes = list((len(self.filters) * [2]))
if (self.pool is None):
logger.warning('Using default pooling size of 2')
self.pool = (len(self.filters) * [2])
assert (len(self.filters) == len(self.sizes))
assert (len(self.filters) == len(self.pool))
assert (self.pool is not None)
def to_str(self) -> str:
if (self.filters is None):
return 'conv-None'
if (len(self.filters) > 0):
outstr = [list_to_str(list(self.filters))]
if (self.sizes is not None):
outstr.append(list_to_str(list(self.sizes)))
if (self.pool is not None):
outstr.append(list_to_str(list(self.pool)))
return '-'.join(['conv', '_'.join(outstr)])
return ''
|
@dataclass
class NetworkConfig(BaseConfig):
units: Sequence[int]
activation_fn: str
dropout_prob: float
use_batch_norm: bool = True
def to_str(self):
ustr = '-'.join([str(int(i)) for i in self.units])
dstr = f'dp-{self.dropout_prob:2.1f}'
bstr = f'bn-{self.use_batch_norm}'
return '-'.join(['net', '_'.join([ustr, dstr, bstr])])
|
@dataclass
class DynamicsConfig(BaseConfig):
nchains: int
group: str
latvolume: List[int]
nleapfrog: int
eps: float = 0.01
eps_hmc: float = 0.01
use_ncp: bool = True
verbose: bool = True
eps_fixed: bool = False
use_split_xnets: bool = True
use_separate_networks: bool = True
merge_directions: bool = True
def to_str(self) -> str:
latstr = '-'.join([str(i) for i in self.xshape[1:]])
lfstr = f'nlf-{self.nleapfrog}'
splitstr = f'xsplit-{self.use_split_xnets}'
sepstr = f'sepnets-{self.use_separate_networks}'
mrgstr = f'merge-{self.merge_directions}'
return '/'.join([self.group, latstr, lfstr, splitstr, sepstr, mrgstr])
def __post_init__(self):
assert (self.group.upper() in ['U1', 'SU3'])
if (self.eps_hmc is None):
self.eps_hmc = (1.0 / self.nleapfrog)
if (self.group.upper() == 'U1'):
self.dim = 2
(self.nt, self.nx) = self.latvolume
self.xshape = (self.nchains, self.dim, *self.latvolume)
self.vshape = (self.nchains, self.dim, *self.latvolume)
assert (len(self.xshape) == 4)
assert (len(self.latvolume) == 2)
self.xdim = int(np.cumprod(self.xshape[1:])[(- 1)])
elif (self.group.upper() == 'SU3'):
self.dim = 4
self.link_shape = (3, 3)
self.vec_shape = 8
(self.nt, self.nx, self.ny, self.nz) = self.latvolume
self.xshape = (self.nchains, self.dim, *self.latvolume, *self.link_shape)
self.vshape = (self.nchains, self.dim, *self.latvolume, self.vec_shape)
assert (len(self.xshape) == 8)
assert (len(self.vshape) == 7)
assert (len(self.latvolume) == 4)
self.xdim = int(np.cumprod(self.xshape[1:])[(- 1)])
else:
raise ValueError('Expected `group` to be one of `"U1", "SU3"`')
|
@dataclass
class LossConfig(BaseConfig):
use_mixed_loss: bool = False
charge_weight: float = 0.01
rmse_weight: float = 0.0
plaq_weight: float = 0.0
aux_weight: float = 0.0
def to_str(self) -> str:
return '_'.join([f'qw-{self.charge_weight:2.1f}', f'pw-{self.plaq_weight:2.1f}', f'rw-{self.rmse_weight:2.1f}', f'aw-{self.aux_weight:2.1f}', f'mixed-{self.use_mixed_loss}'])
|
@dataclass
class InputSpec(BaseConfig):
xshape: Sequence[int]
xnet: Optional[Dict[(str, (int | Sequence[int]))]] = None
vnet: Optional[Dict[(str, (int | Sequence[int]))]] = None
def to_str(self):
return '-'.join([str(i) for i in self.xshape])
def __post_init__(self):
if (len(self.xshape) == 2):
self.xdim = self.xshape[(- 1)]
self.vshape = self.xshape
self.vdim = self.xshape[(- 1)]
elif (len(self.xshape) > 2):
self.xdim: int = np.cumprod(self.xshape[1:])[(- 1)]
lat_shape = self.xshape[:(- 2)]
vd = ((self.xshape[(- 1)] ** 2) - 1)
self.vshape: Sequence[int] = (*lat_shape, vd)
self.vdim: int = np.cumprod(self.vshape[1:])[(- 1)]
else:
raise ValueError(f'Invalid `xshape`: {self.xshape}')
if (self.xnet is None):
self.xnet = {'x': self.xshape, 'v': self.xshape}
if (self.vnet is None):
self.vnet = {'x': self.xshape, 'v': self.xshape}
|
@dataclass
class FlopsProfiler():
enabled: bool = False
profile_step: int = 1
module_depth: int = (- 1)
top_modules: int = 1
detailed: bool = True
output_file: Optional[((os.PathLike | str) | Path)] = None
def __post_init__(self):
pass
|
@dataclass
class OptimizerConfig():
type: str
params: Optional[dict] = field(default_factory=dict)
|
@dataclass
class fp16Config():
enabled: bool
auto_cast: bool = True
fp16_master_weights_and_grads: bool = False
min_loss_scale: float = 0.0
|
@dataclass
class CommsLogger():
enabled: bool
verbose: bool = True
prof_all: bool = True
debug: bool = False
|
@dataclass
class AutoTuning():
enabled: bool
arg_mappings: Optional[dict] = field(default_factory=dict)
|
@dataclass
class ZeroOptimization():
stage: int
|
@dataclass
class ExperimentConfig(BaseConfig):
wandb: Any
steps: Steps
framework: str
loss: LossConfig
network: NetworkConfig
conv: ConvolutionConfig
net_weights: NetWeights
dynamics: DynamicsConfig
learning_rate: LearningRateConfig
annealing_schedule: AnnealingSchedule
gradient_accumulation_steps: int = 1
restore: bool = True
save: bool = True
c1: float = 0.0
port: str = '2345'
compile: bool = True
profile: bool = False
init_aim: bool = True
init_wandb: bool = True
use_wandb: bool = True
use_tb: bool = False
debug_mode: bool = False
default_mode: bool = True
print_config: bool = True
precision: str = 'float32'
ignore_warnings: bool = True
backend: str = 'hvd'
seed: Optional[int] = None
ds_config_path: Optional[Any] = None
name: Optional[str] = None
name: Optional[str] = None
width: Optional[int] = None
nchains: Optional[int] = None
compression: Optional[str] = None
def __post_init__(self):
self.env_config = EnvConfig()
if (self.seed is None):
import numpy as np
self.seed = np.random.randint(0)
logger.warning(f'No seed specified, using random seed: {self.seed}')
self.env = EnvConfig()
self.ds_config = {}
self.xdim = self.dynamics.xdim
self.xshape = self.dynamics.xshape
self.micro_batch_size = self.dynamics.nchains
self.global_batch_size = ((self.env.world_size * self.micro_batch_size) * self.gradient_accumulation_steps)
if (self.ds_config_path is None):
fpath = Path(CONF_DIR).joinpath('ds_config.yaml')
self.ds_config_path = fpath.resolve().as_posix()
if (self.precision in FP16_SYNONYMS):
self.precision = 'fp16'
elif (self.precision in BF16_SYNONYMS):
self.precision = 'bf16'
elif (self.precision in FP32_SYNONYMS):
self.precision = 'float32'
elif (self.precision in FP64_SYNONYMS):
self.precision = 'float64'
w = int(os.environ.get('COLUMNS', 200))
self.width = (w if (self.width is None) else self.width)
if (self.framework in SYNONYMS['tensorflow']):
self.backend = 'hvd'
elif (self.framework in SYNONYMS['pytorch']):
if (self.backend is None):
logger.warning('Backend not specified, using DDP')
self.backend = 'DDP'
assert (self.backend.lower() in ['hvd', 'horovod', 'ddp', 'ds', 'deepspeed'])
else:
raise ValueError(f'Unexpected value for framework: {self.framework}')
if self.debug_mode:
self.compile = False
self.annealing_schedule.setup(nera=self.steps.nera, nepoch=self.steps.nepoch)
def load_ds_config(self, fpath: Optional[os.PathLike]=None) -> dict:
fname = (self.ds_config_path if (fpath is None) else fpath)
assert (fname is not None)
ds_config_path = Path(fname)
logger.info(f'Loading DeepSpeed Config from: {ds_config_path.as_posix()}')
if (ds_config_path.suffix == '.json'):
with ds_config_path.open('r') as f:
ds_config = json.load(f)
return ds_config
if (ds_config_path.suffix == '.yaml'):
import yaml
with ds_config_path.open('r') as stream:
ds_config = dict(yaml.safe_load(stream))
return ds_config
raise TypeError('Unexpected FileType')
def set_ds_config(self, ds_config: dict) -> None:
self.ds_config = ds_config
def to_str(self) -> str:
dynstr = self.dynamics.to_str()
constr = self.conv.to_str()
netstr = self.network.to_str()
return '/'.join([dynstr, constr, netstr, self.framework])
def get_checkpoint_dir(self) -> Path:
return Path(CHECKPOINTS_DIR).joinpath(self.to_str())
def rank(self):
if (self.framework in SYNONYMS['pytorch']):
if (self.backend.lower() in SYNONYMS['horovod']):
import horovod.torch as hvd
if (not hvd.is_initialized()):
hvd.init()
return hvd.rank()
elif (self.backend.lower() in SYNONYMS['DDP']):
return int(os.environ.get('RANK', 0))
elif (self.backend.lower() in SYNONYMS['deepspeed']):
import torch.distributed as dist
return dist.get_rank()
elif (self.framework in SYNONYMS['tensorflow']):
import horovod.tensorflow as hvd
if (not hvd.is_initialized()):
hvd.init()
return hvd.rank()
|
@dataclass
class AnnealingSchedule(BaseConfig):
beta_init: float
beta_final: Optional[float] = 1.0
dynamic: bool = False
def to_str(self) -> str:
return f'bi-{self.beta_init}_bf-{self.beta_final}'
def __post_init__(self):
if ((self.beta_final is None) or (self.beta_final < self.beta_init)):
logger.warning(f'''AnnealingSchedule.beta_final must be >= {self.beta_init}, but received: {self.beta_final}.
Setting self.beta_final to {self.beta_init}''')
self.beta_final = float(self.beta_init)
assert (isinstance(self.beta_final, float) and (self.beta_final >= self.beta_init))
def update(self, beta_init: Optional[float]=None, beta_final: Optional[float]=None):
logger.warning('Updating annealing schedule!')
if (beta_init is not None):
logger.warning(f'annealing_schedule.beta_init = {beta_init:.3f}')
self.beta_init = beta_init
if (beta_final is not None):
logger.warning(f'annealing_schedule.beta_final = {beta_final:.3f}')
self.beta_final = beta_final
def setup(self, nera: Optional[int]=None, nepoch: Optional[int]=None, steps: Optional[Steps]=None, beta_init: Optional[float]=None, beta_final: Optional[float]=None) -> dict:
if (nera is None):
assert (steps is not None)
nera = steps.nera
if (nepoch is None):
assert (steps is not None)
nepoch = steps.nepoch
if (beta_init is None):
beta_init = self.beta_init
if (beta_final is None):
beta_final = (self.beta_final if (self.beta_final is not None) else self.beta_init)
self.betas = np.linspace(beta_init, beta_final, nera)
total = (steps.total if (steps is not None) else 1)
self._dbeta = ((beta_final - beta_init) / total)
self.beta_dict = {str(era): self.betas[era] for era in range(nera)}
return self.beta_dict
|
@dataclass
class Annealear():
'Dynamically adjust annealing schedule during training.'
schedule: AnnealingSchedule
patience: int
min_delta: Optional[float] = None
def __post_init__(self):
self.wait = 0
self.best = np.Inf
self._current_era = 0
self._current_beta = self.schedule.beta_init
self._epoch = 0
self._count = 0
self.betas = []
self.loss = []
self.losses = {}
self._reset()
def _reset(self):
self.wait = 0
def update(self, loss: float):
self._epoch += 1
self.loss.append(loss)
@staticmethod
def avg_diff(y: list[float], x: Optional[list[float]]=None, *, drop: Optional[(int | float)]=None) -> float:
'Returns (1/n) β [Ξ΄y/Ξ΄x].'
if (x is not None):
assert (len(x) == len(y))
if (drop is not None):
if isinstance(drop, int):
if (drop <= 1):
raise ValueError('Expected `drop` to be an int > 1')
y = y[drop:]
if (x is not None):
x = x[drop:]
elif isinstance(drop, float):
if (drop <= 1.0):
raise ValueError('Expected `drop` to be a float > 1.')
frac = (drop * len(y))
y = y[frac:]
if (x is not None):
x = x[frac:]
else:
raise ValueError('Expected drop to be one of `int` or `float`.')
dyavg = np.subtract(y[1:], y[:(- 1)]).mean()
if (x is not None):
dxavg = np.subtract(x[1:], x[:(- 1)]).mean()
return (dyavg / dxavg)
return dyavg
def start_epoch(self, era: int, beta: float):
self.losses[f'{era}'] = {'beta': beta, 'loss': []}
self._prev_beta = self.betas[(- 1)]
self._current_era = era
self._current_beta = beta
self.betas.append(beta)
self._prev_best = np.Inf
if ((era - 1) in self.losses.keys()):
self._prev_best = np.min(self.losses[str((era - 1))]['loss'])
def end_epoch(self, losses: list[float], era: Optional[int]=None, beta: Optional[float]=None, drop: Optional[(int | float)]=None) -> float:
current_era = (self._current_era if (era is None) else era)
current_beta = (self._current_beta if (beta is None) else beta)
prev_beta = self._prev_beta
new_beta = (current_beta + self.schedule._dbeta)
self.losses[f'{current_era}'] = {'beta': current_beta, 'loss': losses}
new_best = np.min(losses)
avg_slope = self.avg_diff(losses, drop=drop)
if ((new_best < self._prev_best) or (avg_slope < 0)):
return new_beta
current_beta_count = Counter(self.betas).get(current_beta)
if ((current_beta_count is not None) and isinstance(current_beta_count, int) and (current_beta_count > self.patience)):
return prev_beta
return current_beta
|
def get_config(overrides: Optional[list[str]]=None):
from hydra import initialize_config_dir, compose
from hydra.core.global_hydra import GlobalHydra
GlobalHydra.instance().clear()
overrides = ([] if (overrides is None) else overrides)
with initialize_config_dir(CONF_DIR.absolute().as_posix(), version_base=None):
cfg = compose('config', overrides=overrides)
return cfg
|
def get_experiment(overrides: Optional[list[str]]=None, build_networks: bool=True, keep: Optional[(str | list[str])]=None, skip: Optional[(str | list[str])]=None):
cfg = get_config(overrides)
if (cfg.framework == 'pytorch'):
from l2hmc.experiment.pytorch.experiment import Experiment
return Experiment(cfg, keep=keep, skip=skip, build_networks=build_networks)
elif (cfg.framework == 'tensorflow'):
from l2hmc.experiment.tensorflow.experiment import Experiment
return Experiment(cfg, keep=keep, skip=skip, build_networks=build_networks)
else:
raise ValueError(f'Unexpected value for `cfg.framework: {cfg.framework}')
|
@dataclass
class DiffusionConfig():
'\n Diffusion Config.\n\n Args:\n - `log_likelihood_fn`: Callable[[torch.Tensor], torch.Tensor]:\n - Your log-likelihood function to be sampled. Must be defined in\n terms of a 1D parameter array `x` and a number of dimensions\n `dim`. Some example log-likelihood functions are provided in the\n examples folder.\n - `dim`: int\n - Number of dimensions of the likelihood function\n - `low_bound`: torch.Tensor\n - Array of lower bounds on each parameter\n - `high_bound`: torch.Tensor\n - Array of upper bounds on each parameter\n - `initial_samples`: torch.Tensor\n - Array of samples to be used as the starting point for the\n algorithm. For a likelihood function with widely separated modes,\n several samples from each mode must be supplied here to allow the\n diffusion model to jump between them. For functions with just one\n mode, it is not necessary to provide anything more than an\n initial sample as a starting point. See the examples folder for\n more details.\n - `retrains`: int\n - Number of times to retrain the diffusion model. More retrains\n will improve the performance at the cost of increased runtime.\n - `samples_per_retrain`:\n - Number of diffusion samples to generate before retraining the\n diffusion model.\n - `num_samples_total = retrains * samples_per_retrain`\n - `outdir`: os.PathLike\n - Where to save results\n - `nsteps`: int (default = 20)\n - Number of noising steps in the forward / reverse diffusion\n process.\n - If the diffusion model is failing to closely reproduce the\n target, try increasing.\n - If training is too slow, try decreasing.\n - `sigma`: float (default = 0.05)\n - Width of the pure Metropolis-Hastings Gaussian Proposal.\n - If algorithm stuck in local minima or not exploring enough of\n parameter space, try increasing.\n - `diffusion_prob`: float (default = 0.5)\n - Probability of drawing from the diffusion model. Higher values\n are generally preferred for multi-modal functions where jumping\n between modes with the diffusion samples is required.\n - `bins`: int (default = 20)\n - Number of bins used in 1D histograms of each parameter to\n calculate the Q proposal function weights. If diffusion\n acceptance rate remains very low over many samples, try\n increasing this value for greater resolution in the Q factor\n calculation, though doing so will increase retrain time.\n - `noise_width` (default = 0.05)`\n - Width of noise after performing forward diffusion process. If\n diffusion model is failing to reproduce sharp modes of the target\n distribution (check "diffusion_check.pdf" in the relevant\n outdir), try reducing this value.\n - `beta_1`: float\n - How much noise to add in the first step of the forward diffusion\n process. We use a linear variance schedule, increasing from\n `beta_1` to `beta_2`. If diffusion model is failing to reproduce\n sharp modes of the target distribution (check\n "diffusion_check.pdf" in the relevant outdir), try increasing\n this value.\n - `beta_2`: float\n - How much noise to add at the last step of the forward diffusion\n process. We use a linear variance schedule, increasing from\n `beta_1` to `beta_2`. If diffusion model is failing to reproduce\n sharp modes of the target distribution (check\n "diffusion_check.pdf" in the relevant outdir), try increasing\n this value.\n - `plot_initial`: bool\n - Whether to plot diffusion_check.pdf in the outdir.\n '
log_likelihood_fn: Callable[([torch.Tensor], torch.Tensor)]
dim: int
low_bound: torch.Tensor
high_bound: torch.Tensor
initial_samples: torch.Tensor
train_iters: int
samples_per_retrain: int
outdir: os.PathLike
nsteps: int = 20
sigma: float = 0.3
diffusion_prob: float = 0.5
bins: int = 20
|
class BaseExperiment(ABC):
'Convenience class for running framework independent experiments.'
def __init__(self, cfg: DictConfig) -> None:
super().__init__()
self._created = get_timestamp('%Y-%m-%d-%H%M%S')
self.cfg = cfg
self.config: ExperimentConfig = instantiate(cfg)
assert (self.config.framework.lower() in ['pt', 'tf', 'pytorch', 'torch', 'tensorflow'])
self._is_built = False
self.run = None
self.arun = None
self.trainer: BaseTrainer
(self._outdir, self._jobdirs) = self.get_outdirs()
@abstractmethod
def visualize_model(self, x: Optional[Any]=None) -> None:
pass
@abstractmethod
def train(self) -> dict:
pass
@abstractmethod
def evaluate(self, job_type: str) -> dict:
pass
@abstractmethod
def build_trainer(self, dynamics, loss_fn):
pass
@abstractmethod
def get_summary_writer(self):
pass
@abstractmethod
def update_wandb_config(self, run_id: Optional[str]=None) -> None:
'Must Be overridden to specify uniquie run_id for W&B run'
pass
@abstractmethod
def init_wandb(self):
pass
def init_aim(self) -> aim.Run:
return self._init_aim()
def _init_aim(self) -> aim.Run:
from aim import Run
run = Run(repo=AIM_DIR.as_posix(), experiment='l2hmc-qcd', log_system_params=True)
run['config'] = OmegaConf.to_container(self.cfg, resolve=True, throw_on_missing=True)
run['outdir'] = str(self._outdir.as_posix())
run['hostname'] = str(os.environ.get('HOST', 'localhost'))
return run
def _update_wandb_config(self, device: str, run_id: Optional[str]=None) -> None:
if (run_id is not None):
self.config.wandb.setup.update({'id': run_id})
latstr = 'x'.join([str(i) for i in self.config.dynamics.latvolume])
self.config.wandb.setup.update({'tags': [f'{self.config.framework}', f'{self.config.backend}', f'nlf-{self.config.dynamics.nleapfrog}', f'beta_final-{self.config.annealing_schedule.beta_final}', f'{latstr}', f'{self.config.dynamics.group}']})
def _init_wandb(self):
if ((self.run is not None) and (self.run is wandb.run)):
raise ValueError('WandB already initialized!')
from wandb.util import generate_id
run_id = generate_id()
self.update_wandb_config(run_id=run_id)
wandb.tensorboard.patch(root_logdir=os.getcwd())
run = wandb.init(dir=os.getcwd(), **self.config.wandb.setup)
assert ((run is not None) and (run is wandb.run))
if (wandb.run is not None):
log.critical(f'π {wandb.run.name}')
log.critical(f'π {wandb.run.url}')
log.critical(f'π/: {wandb.run.dir}')
assert ((run is wandb.run) and (run is not None))
wandb.define_metric('dQint_eval', summary='mean')
run.log_code(HERE.as_posix())
cfg_dict = OmegaConf.to_container(self.cfg, resolve=True, throw_on_missing=True)
run.config.update(cfg_dict)
now = datetime.datetime.now()
dstr = now.strftime('%Y-%m-%d')
tstr = now.strftime('%H:%M:%S')
nstr = now.strftime('%Y-%m-%d-%H%M%S')
run.config.update({'DATE': dstr, 'TIME': tstr, 'TSTAMP': nstr})
env = {k: v for (k, v) in dict(os.environ).items() if (not k.startswith('_ModuleTable'))}
for key in (ENV_FILTERS + ['LS_COLORS', 'LSCOLORS', 'PS1']):
_ = env.pop(key, None)
run.config.update({'env': env})
exec = os.environ.get('EXEC', None)
if (exec is not None):
run.config['exec'] = exec
hostfile = os.environ.get('COBALT_NODEFILE', os.environ.get('PBS_NODEFILE', None))
if (hostfile is not None):
if (hpath := Path(hostfile).resolve()).is_file():
hosts = []
with hpath.open('r') as f:
hosts.extend((f.readline().rstrip('\n') for _ in f))
run.config['hosts'] = hosts
try:
hostname = socket.gethostbyaddr(socket.gethostname())[0].lower()
except socket.herror:
log.critical('Error getting hostname! Using `localhost`')
hostname = 'localhost'
run.config['hostname'] = hostname
machine = os.environ.get('MACHINE', None)
if (machine is not None):
run.config['machine'] = machine
elif ('thetagpu' in hostname):
run.config['machine'] = 'ThetaGPU'
elif ('x3' in hostname):
run.config['machine'] = 'Polaris'
elif ('x1' in hostname):
run.config['machine'] = 'Sunspot'
elif ('nid' in hostname):
run.config['machine'] = 'Perlmutter'
else:
run.config['machine'] = hostname
return run
def get_outdirs(self) -> tuple[(Path, dict[(str, Path)])]:
outdir = self.cfg.get('outdir', None)
if (outdir is None):
outdir = Path(os.getcwd())
if is_interactive():
framework = self.cfg.get('framework', None)
outdir = outdir.joinpath('outputs', self._created, framework)
jobdirs = {'train': Path(outdir).joinpath('train'), 'eval': Path(outdir).joinpath('eval'), 'hmc': Path(outdir).joinpath('hmc')}
for val in jobdirs.values():
val.mkdir(exist_ok=True, parents=True)
return (outdir, jobdirs)
def get_jobdir(self, job_type: str) -> Path:
jobdir = self._outdir.joinpath(job_type)
jobdir.mkdir(exist_ok=True, parents=True)
assert (jobdir is not None)
setattr(self, f'{job_type}_dir', jobdir)
if (hasattr(self, 'run') and (getattr(self, 'run', None) is not None)):
assert ((self.run is not None) and (self.run is wandb.run))
self.run.config[f'{job_type}_dir'] = jobdir
with open(OUTDIRS_FILE, 'a') as f:
f.write(Path(jobdir).resolve().as_posix())
return jobdir
def _get_summary_dir(self, job_type: str) -> str:
jobdir = self.get_jobdir(job_type=job_type)
sdir = jobdir.joinpath('summaries')
sdir.mkdir(exist_ok=True, parents=True)
return sdir.as_posix()
def save_timers(self, job_type: str, outdir: Optional[os.PathLike]=None) -> None:
outdir = (self._jobdirs.get(job_type, None) if (outdir is None) else outdir)
assert (outdir is not None)
timerdir = Path(outdir).joinpath('timers')
timerdir.mkdir(exist_ok=True, parents=True)
timers = getattr(self.trainer, 'timers', None)
if (timers is not None):
timer = timers.get(job_type, None)
if (timer is not None):
global_rank = getattr(self.trainer, 'global_rank', 0)
rank = getattr(self.trainer, 'rank', 0)
assert isinstance(timer, StepTimer)
fname = f'step-timer-{job_type}-{global_rank}-{rank}'
timer.save_and_write(outdir=timerdir, fname=fname)
def save_summaries(self, summaries: list[str], job_type: str) -> None:
outdir = self.get_jobdir(job_type)
outfile = outdir.joinpath('summaries.txt')
with open(outfile.as_posix(), 'a') as f:
f.write('\n'.join(summaries))
def save_dataset(self, job_type: str, save_data: bool=True, dset: Optional[xr.Dataset]=None, tables: Optional[dict]=None, nchains: Optional[int]=None, outdir: Optional[os.PathLike]=None, therm_frac: Optional[float]=None, logfreq: int=1) -> xr.Dataset:
summary = self.trainer.summaries.get(job_type, None)
history = self.trainer.histories.get(job_type, None)
summaries = []
if (summary is not None):
summaries = [f'{k} {v}' for (k, v) in summary.items()]
self.save_summaries(summaries, job_type=job_type)
if (history is None):
raise ValueError(f'Unable to recover history for {job_type}')
assert (history is not None)
dset = history.get_dataset(therm_frac=therm_frac)
assert isinstance(dset, xr.Dataset)
chains_to_plot = int(min(self.cfg.dynamics.nchains, max(64, (self.cfg.dynamics.nchains // 8))))
chains_to_plot = (nchains if (nchains is not None) else chains_to_plot)
outdir = (self._outdir if (outdir is None) else outdir)
assert (outdir is not None)
self.save_timers(job_type=job_type, outdir=outdir)
import l2hmc.configs as configs
assert isinstance(self.config, (configs.ExperimentConfig, ExperimentConfig))
_ = save_and_analyze_data(dset, run=self.run, arun=self.arun, logfreq=logfreq, rank=self.config.env.rank, outdir=outdir, tables=tables, summaries=summaries, nchains=chains_to_plot, job_type=job_type, save_data=save_data, framework=self.config.framework)
log.info('Done saving and analyzing data.')
log.info('Creating summaries for WandB, Aim')
dQint = dset.data_vars.get('dQint', None)
if (dQint is not None):
dQint = dQint.values
dQint = np.where(np.isnan(dQint), np.zeros_like(dQint), dQint)
if (self.run is not None):
import wandb
assert (self.run is wandb.run)
self.run.summary[f'dQint_{job_type}'] = dQint
self.run.summary[f'dQint_{job_type}'] = dQint.mean()
if (self.arun is not None):
from aim import Distribution
assert isinstance(self.arun, aim.Run)
dQdist = Distribution(dQint)
self.arun.track(dQdist, name='dQint', context={'subset': job_type})
self.arun.track(dQdist, name='dQint', context={'subset': job_type})
return dset
|
def train_step(x: torch.Tensor, beta: torch.Tensor, trainer: Trainer) -> tuple[(torch.Tensor, dict)]:
(xout, metrics) = trainer.dynamics_engine((x, beta))
mcstates = metrics.pop('mc_states')
loss = trainer.calc_loss(xinit=mcstates.init.x, xprop=mcstates.proposed.x, acc=metrics['acc'])
loss.register_hook((lambda grad: grad.nan_to_num()))
trainer.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad.clip_grad_norm(trainer.dynamics.parameters(), max_norm=0.1)
trainer.optimizer.step()
metrics |= {'loss': loss.item()}
print_dict(metrics, grab=False)
return (xout.detach(), metrics)
|
def train(nsteps: int, trainer: Trainer, beta: (float | torch.Tensor), nlog: int=1, nprint: int=1, x: Optional[torch.Tensor]=None, grab: Optional[bool]=None) -> tuple[(torch.Tensor, dict)]:
beta = (torch.tensor(beta) if isinstance(beta, float) else beta)
history = {}
if (x is None):
state = exp.trainer.dynamics.random_state(beta)
x = state.x
assert (x is not None)
for step in range(nsteps):
log.info(f'STEP: {step}')
(x, metrics) = train_step(x, beta=beta, trainer=trainer)
if ((step > 0) and ((step % nprint) == 0)):
print_dict(metrics, grab=grab)
if ((step > 0) and ((step % nlog) == 0)):
for (key, val) in metrics.items():
try:
history[key].append(val)
except KeyError:
history[key] = [val]
return (x, history)
|
def evaluate(nsteps: int, exp: Experiment, beta: (float | torch.Tensor), nlog: int=1, nprint: int=1, job_type: str='eval', eps: Optional[float]=None, nleapfrog: Optional[int]=None, x: Optional[torch.Tensor]=None, grab: Optional[bool]=None) -> tuple[(torch.Tensor, BaseHistory)]:
history = BaseHistory()
beta_ = (beta.item() if isinstance(beta, torch.Tensor) else beta)
if (x is None):
state = exp.trainer.dynamics.random_state(beta_)
x = state.x
assert (x is not None)
log.info(f'Running {nsteps} steps of {job_type} at beta={beta:.4f}')
if (job_type.lower == 'hmc'):
log.info(f'Using nleapfrog={nleapfrog} steps w/ eps={eps:.4f}')
for step in range(nsteps):
log.info(f'STEP: {step}')
if (job_type.lower() == 'eval'):
(x, metrics) = exp.trainer.eval_step((x, beta_))
elif (job_type.lower() == 'hmc'):
(x, metrics) = exp.trainer.hmc_step((x, beta), eps=eps, nleapfrog=nleapfrog)
else:
raise ValueError('Expected `job_type` to be one of [`eval`, `hmc`]')
if ((step > 0) and ((step % nprint) == 0)):
print_dict(metrics, grab=grab)
if ((step > 0) and ((step % nlog) == 0)):
history.update(metrics)
return (x, history)
|
class Experiment(BaseExperiment):
def __init__(self, cfg: DictConfig, build_networks: bool=True, keep: Optional[(str | list[str])]=None, skip: Optional[(str | list[str])]=None) -> None:
super().__init__(cfg=cfg)
if (not isinstance(self.config, ExperimentConfig)):
self.config = instantiate(cfg)
assert isinstance(self.config, ExperimentConfig)
self.ckpt_dir = self.config.get_checkpoint_dir()
dsetup = setup_torch_distributed(self.config.backend)
self._size = dsetup['size']
self._rank = dsetup['rank']
self._local_rank = dsetup['local_rank']
run = None
arun = None
if ((self._rank == 0) and self.config.init_wandb):
import wandb
log.warning(f'Initialize WandB from {self._rank}:{self._local_rank}')
run = (super()._init_wandb() if (wandb.run is None) else wandb.run)
run.config['SIZE'] = self._size
if ((self._rank == 0) and self.config.init_aim):
log.warning(f'Initializing Aim from {self._rank}:{self._local_rank}')
arun = self.init_aim()
arun['SIZE'] = self._size
if (arun is not None):
if torch.cuda.is_available():
arun['ngpus'] = self._size
else:
arun['ncpus'] = self._size
self.trainer: Trainer = self.build_trainer(keep=keep, skip=skip, build_networks=build_networks, ckpt_dir=self.ckpt_dir)
self.run = run
self.arun = arun
self._is_built = True
assert callable(self.trainer.loss_fn)
assert isinstance(self.trainer, Trainer)
assert isinstance(self.trainer.dynamics, (ptDynamics, Dynamics))
assert isinstance(self.trainer.lattice, (LatticeU1, LatticeSU3))
def set_net_weights(self, net_weights: NetWeights):
from l2hmc.network.pytorch.network import LeapfrogLayer
for step in range(self.config.dynamics.nleapfrog):
xnet0 = self.trainer.dynamics._get_xnet(step, first=True)
xnet1 = self.trainer.dynamics._get_xnet(step, first=False)
vnet = self.trainer.dynamics._get_vnet(step)
assert isinstance(xnet0, LeapfrogLayer)
assert isinstance(xnet1, LeapfrogLayer)
assert isinstance(vnet, LeapfrogLayer)
xnet0.set_net_weight(net_weights.x)
xnet1.set_net_weight(net_weights.x)
vnet.set_net_weight(net_weights.v)
def visualize_model(self, x: Optional[Tensor]=None):
from torchviz import make_dot
device = self.trainer.device
state = self.trainer.dynamics.random_state(1.0)
(m, _) = self.trainer.dynamics._get_mask(0)
beta = state.beta.to(device)
m = m.to(device)
x = state.x.to(device)
v = state.v.to(device)
outdir = Path(self._outdir).joinpath('network_diagrams')
outdir.mkdir(exist_ok=True, parents=True)
vnet = self.trainer.dynamics._get_vnet(0)
xnet = self.trainer.dynamics._get_xnet(0, first=True)
with torch.autocast(device_type=('cuda' if torch.cuda.is_available() else 'cpu')):
force = self.trainer.dynamics.grad_potential(x, beta)
(sv, tv, qv) = self.trainer.dynamics._call_vnet(0, (x, force))
xm = self.trainer.dynamics.unflatten((m * self.trainer.dynamics.flatten(x)))
(sx, tx, qx) = self.trainer.dynamics._call_xnet(0, (xm, v), first=True)
outputs = {'v': {'scale': sv, 'transl': tv, 'transf': qv}, 'x': {'scale': sx, 'transl': tx, 'transf': qx}}
for (key, val) in outputs.items():
for (kk, vv) in val.items():
net = (xnet if (key == 'x') else vnet)
net = net.to(vv.dtype)
_ = make_dot(vv, params=dict(net.named_parameters()), show_attrs=True, show_saved=True).save(outdir.joinpath(f'{key}-{kk}.gv').as_posix())
def update_wandb_config(self, run_id: Optional[str]=None) -> None:
device = ('gpu' if torch.cuda.is_available() else 'cpu')
self._update_wandb_config(device=device, run_id=run_id)
def build_trainer(self, build_networks: bool=True, keep: Optional[(str | list[str])]=None, skip: Optional[(str | list[str])]=None, ckpt_dir: Optional[PathLike]=None) -> Trainer:
ckpt_dir = (self.ckpt_dir if (ckpt_dir is None) else ckpt_dir)
return Trainer(self.cfg, build_networks=build_networks, skip=skip, keep=keep, ckpt_dir=ckpt_dir)
def init_wandb(self):
return super()._init_wandb()
def get_summary_writer(self):
return SummaryWriter(self._outdir)
def train(self, nchains: Optional[int]=None, x: Optional[Tensor]=None, skip: Optional[(str | list[str])]=None, writer: Optional[Any]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, nprint: Optional[int]=None, nlog: Optional[int]=None, beta: Optional[((float | list[float]) | dict[(str, float)])]=None, save_data: bool=True):
jobdir = self.get_jobdir(job_type='train')
writer = (self.get_summary_writer() if (self._rank == 0) else None)
tstart = time.time()
if self.config.annealing_schedule.dynamic:
output = self.trainer.train_dynamic(x=x, nera=nera, nepoch=nepoch, run=self.run, arun=self.arun, writer=writer, train_dir=jobdir, skip=skip, beta=beta)
else:
output = self.trainer.train(x=x, nera=nera, nepoch=nepoch, run=self.run, arun=self.arun, writer=writer, train_dir=jobdir, skip=skip, beta=beta, nprint=nprint, nlog=nlog)
log.info(f'Training took: {(time.time() - tstart):.4f}')
if self.trainer._is_orchestrator:
dset = self.save_dataset(nchains=nchains, save_data=save_data, job_type='train', outdir=jobdir, tables=output.get('tables', None))
output['dataset'] = dset
if (writer is not None):
writer.close()
return output
def evaluate(self, job_type: str, therm_frac: float=0.1, beta: Optional[float]=None, nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None, eval_steps: Optional[int]=None, nprint: Optional[int]=None, x: Optional[torch.Tensor]=None) -> (dict | None):
'Evaluate model.'
if (not self.trainer._is_orchestrator):
return None
assert (job_type in {'eval', 'hmc'})
jobdir = self.get_jobdir(job_type)
writer = self.get_summary_writer()
console = get_console(record=True)
output = self.trainer.eval(beta=beta, x=x, run=self.run, arun=self.arun, writer=writer, nchains=nchains, job_type=job_type, eps=eps, nleapfrog=nleapfrog, eval_steps=eval_steps, nprint=nprint)
output['dataset'] = self.save_dataset(job_type=job_type, outdir=jobdir, tables=output.get('tables', None), therm_frac=therm_frac)
if (writer is not None):
writer.close()
return output
|
class Experiment(BaseExperiment):
def __init__(self, cfg: DictConfig, build_networks: bool=True, keep: Optional[(str | Sequence[str])]=None, skip: Optional[(str | Sequence[str])]=None) -> None:
super().__init__(cfg=cfg)
assert isinstance(self.config, (ExperimentConfig, configs.ExperimentConfig))
self._rank: int = hvd.rank()
self._local_rank: int = hvd.local_rank()
self.ckpt_dir = self.config.get_checkpoint_dir()
self.trainer: Trainer = self.build_trainer(keep=keep, skip=skip, build_networks=build_networks, ckpt_dir=self.ckpt_dir)
run = None
arun = None
if ((self._rank == 0) and self.config.init_wandb):
log.warning(f'Initialize WandB from {self._rank}:{self._local_rank}')
run = super()._init_wandb()
run.config['SIZE'] = hvd.size()
if ((self._rank == 0) and self.config.init_aim):
log.warning(f'Initializing Aim from {self._rank}:{self._local_rank}')
arun = self.init_aim()
arun['SIZE'] = hvd.size()
self.run = run
self.arun = arun
self._is_built = True
assert callable(self.trainer.loss_fn)
assert isinstance(self.trainer, Trainer)
assert isinstance(self.trainer.lattice, (LatticeU1, LatticeSU3))
def visualize_model(self) -> None:
assert ((self.trainer is not None) and isinstance(self.trainer, Trainer))
state = self.trainer.dynamics.random_state(1.0)
force = self.trainer.dynamics.grad_potential(state.x, state.beta)
vnet = self.trainer.dynamics._get_vnet(0)
xnet = self.trainer.dynamics._get_xnet(0, first=True)
(sv, tv, qv) = self.trainer.dynamics._call_vnet(0, (state.x, force), training=True)
(sx, tx, qx) = self.trainer.dynamics._call_xnet(0, (state.x, state.v), first=True, training=True)
outputs = {'v': {'scale': sv, 'transl': tv, 'transf': qv}, 'x': {'scale': sx, 'transl': tx, 'transf': qx}}
outdir = Path(self._outdir).joinpath('network_diagrams')
outdir.mkdir(exist_ok=True, parents=True)
for (key, val) in outputs.items():
for (k, v) in val.items():
net = (xnet if (key == 'x') else vnet)
dot = tf.keras.utils.model_to_dot(net, show_shapes=True, expand_nested=True, show_layer_activations=True)
fout = outdir.joinpath(f'{key}-{k}.png').resolve().as_posix()
log.info(f'Saving model visualizations to: {fout}')
dot.write_png(fout)
def update_wandb_config(self, run_id: Optional[str]=None):
device = ('gpu' if (len(tf.config.list_physical_devices('GPU')) > 0) else 'cpu')
self._update_wandb_config(device=device, run_id=run_id)
def build_trainer(self, build_networks: bool=True, keep: Optional[(str | Sequence[str])]=None, skip: Optional[(str | Sequence[str])]=None, ckpt_dir: Optional[os.PathLike]=None) -> Trainer:
return Trainer(self.cfg, skip=skip, keep=keep, ckpt_dir=ckpt_dir, build_networks=build_networks)
def init_wandb(self):
return super()._init_wandb()
def init_aim(self):
return super()._init_aim()
def get_summary_writer(self, outdir: Optional[os.PathLike]=None):
outdir = (self._outdir if (outdir is None) else outdir)
return tf.summary.create_file_writer(Path(outdir).as_posix())
def train(self, nchains: Optional[int]=None, x: Optional[tf.Tensor]=None, skip: Optional[(str | Sequence[str])]=None, writer: Optional[Any]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, nprint: Optional[int]=None, nlog: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None, save_data: bool=True) -> dict:
jobdir = self.get_jobdir(job_type='train')
writer = None
if (RANK == 0):
writer = self.get_summary_writer()
writer.set_as_default()
if self.config.annealing_schedule.dynamic:
output = self.trainer.train_dynamic(x=x, nera=nera, nepoch=nepoch, arun=self.arun, writer=writer, train_dir=jobdir, skip=skip, beta=beta)
else:
output = self.trainer.train(x=x, nera=nera, nepoch=nepoch, arun=self.arun, writer=writer, train_dir=jobdir, skip=skip, beta=beta, nprint=nprint, nlog=nlog)
if self.trainer._is_orchestrator:
output['dataset'] = self.save_dataset(save_data=save_data, nchains=nchains, job_type='train', outdir=jobdir, tables=output.get('tables', None))
if (writer is not None):
writer.close()
return output
def evaluate(self, job_type: str, therm_frac: float=0.1, beta: Optional[float]=None, nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None, eval_steps: Optional[int]=None, nprint: Optional[int]=None) -> dict:
'Evaluate model.'
assert (job_type in {'eval', 'hmc'})
if (not self.trainer._is_orchestrator):
return {}
jobdir = self.get_jobdir(job_type=job_type)
writer = self.get_summary_writer()
writer.set_as_default()
output = self.trainer.eval(arun=self.arun, writer=writer, nchains=nchains, beta=beta, job_type=job_type, eps=eps, nleapfrog=nleapfrog, eval_steps=eval_steps, nprint=nprint)
output['dataset'] = self.save_dataset(job_type=job_type, outdir=jobdir, therm_frac=therm_frac, tables=output.get('tables', None))
if (writer is not None):
writer.close()
return output
|
class Group(ABC):
'Gauge group represented as matrices in the last two dimensions.'
def __init__(self, dim: int, shape: Sequence[int], dtype: Any, name: Optional[str]=None) -> None:
self._dim = dim
self._shape = shape
self._dtype = dtype
if (name is not None):
self._name = name
@abstractmethod
def exp(self, x: Any) -> Any:
pass
@abstractmethod
def mul(self, a: Any, b: Any, adjoint_a: bool=False, adjoint_b: bool=False) -> Any:
pass
@abstractmethod
def update_gauge(self, x: Any, p: Any) -> Any:
pass
@abstractmethod
def adjoint(self, x: Any) -> Any:
pass
@abstractmethod
def trace(self, x: Any) -> Any:
pass
@abstractmethod
def compat_proj(self, x: Any) -> Any:
pass
@abstractmethod
def random(self, shape: list[int]) -> Any:
pass
@abstractmethod
def random_momentum(self, shape: list[int]) -> Any:
pass
@abstractmethod
def kinetic_energy(self, p: Any) -> Any:
pass
|
class SU3(Group):
def __init__(self) -> None:
super().__init__(dim=4, shape=[3, 3], dtype=torch.complex128, name='SU3')
def update_gauge(self, x: Tensor, p: Tensor) -> Tensor:
return (torch.matrix_exp(p) @ x)
def checkSU(self, x: Tensor) -> tuple[(Tensor, Tensor)]:
return checkSU(x)
def checkU(self, x: Tensor) -> tuple[(Tensor, Tensor)]:
return checkU(x)
def mul(self, a: Tensor, b: Tensor, adjoint_a: bool=False, adjoint_b: bool=False) -> Tensor:
if (adjoint_a and adjoint_b):
return (a.adjoint() @ b.adjoint())
if adjoint_a:
return (a.adjoint() @ b)
if adjoint_b:
return (a @ b.adjoint())
return (a @ b)
def adjoint(self, x: Tensor) -> Tensor:
return x.adjoint()
def trace(self, x: Tensor) -> Tensor:
return torch.diagonal(x, dim1=(- 2), dim2=(- 1)).sum((- 1))
def diff_trace(self, x: Tensor) -> Tensor:
log.error('TODO')
return x
def diff2trace(self, x: Tensor) -> Tensor:
log.error('TODO')
return x
def exp(self, x: Tensor) -> Tensor:
return torch.linalg.matrix_exp(x)
def projectTAH(self, x: Tensor) -> Tensor:
'Returns R = 1/2 (X - Xβ ) - 1/(2 N) tr(X - Xβ )\n R = - T^a tr[T^a (X - Xβ )]\n = T^a β_a (- tr[X + Xβ ])\n '
nc = torch.tensor(x.shape[(- 1)])
assert (nc == 3)
r = (0.5 * (x - x.adjoint()))
d = (torch.diagonal(r, dim1=(- 2), dim2=(- 1)).sum((- 1)) / nc)
r = (r - (d.reshape((d.shape + (1, 1))) * eyeOf(x)))
return r
def compat_proj(self, x: Tensor) -> Tensor:
'Arbitrary matrix C projects to skew-hermitian B := (C - C^H) / 2\n\n Make traceless with tr(B - (tr(B) / N) * I) = tr(B) - tr(B) = 0\n '
return self.projectSU(x)
def random(self, shape: Sequence[int]) -> Tensor:
'Returns (batched) random SU(3) matrices.'
r = torch.randn(*shape, requires_grad=True, device=DEVICE)
i = torch.randn(*shape, requires_grad=True, device=DEVICE)
with torch.no_grad():
x = projectSU(torch.complex(r, i)).to(DEVICE)
return x
def random_momentum(self, shape: Sequence[int]) -> Tensor:
'Returns (batched) Traceless Anti-Hermitian matrices'
return randTAH3(shape[:(- 2)])
def kinetic_energy(self, p: Tensor) -> Tensor:
return (0.5 * (norm2(p) - 8.0).flatten(1).sum(1))
def vec_to_group(self, x: Tensor) -> Tensor:
'\n Returns batched SU(3) matrices.\n\n X = X^a T^a\n tr{X T^b} = X^a tr{T^a T^b} = X^a (-1/2) π
^{ab} = -1/2 X^b\n X^a = -2 X_ij T^a_ji\n '
return self.compat_proj(vec_to_su3(x))
def group_to_vec(self, x: Tensor) -> Tensor:
'\n Returns (batched) 8 real numbers,\n X^a T^a = X - 1/3 tr(X)\n\n Convention:\n tr{T^a T^a} = -1/2\n X^a = - 2 tr[T^a X]\n '
return su3_to_vec(self.compat_proj(x))
def compat_proju(self, u: Tensor, x: Tensor) -> Tensor:
'Arbitrary matrix C projects to skew-hermitian B := (C - C^H) / 2\n\n Make traceless with tr(B - (tr(B) / N) * I) = tr(B) - tr(B) = 0\n '
(_, n, _) = x.shape
algebra_elem = torch.linalg.solve(u, x)[0]
B = ((algebra_elem - algebra_elem.conj().transpose((- 2), (- 1))) / 2.0)
trace = torch.einsum('bii->b', B)
B = (B - (((1 / n) * trace.unsqueeze((- 1)).unsqueeze((- 1))) * torch.eye(n).repeat(x.shape[0], 1, 1)))
assert (torch.abs(torch.mean(torch.einsum('bii->b', B))) < 1e-06)
return B
@staticmethod
def rsqrtPHM3f(tr: Tensor, p2: Tensor, det: Tensor) -> tuple[(Tensor, Tensor, Tensor)]:
(e0, e1, e2) = eigs3x3(tr, p2, det)
se0 = e0.abs().sqrt()
se1 = e1.abs().sqrt()
se2 = e2.abs().sqrt()
u = ((se0 + se1) + se2)
w = ((se0 * se1) * se2)
d = (((w * (se0 + se1)) * (se0 + se2)) * (se1 + se2))
di = (1.0 / d)
c0 = (di * (((((w * u) * u) + ((e0 * se0) * (e1 + e2))) + ((e1 * se1) * (e0 + e2))) + ((e2 * se2) * (e0 + e1))))
c1 = ((- ((tr * u) + w)) * di)
c2 = (u * di)
return (c0, c1, c2)
def rsqrtPHM3(self, x: Tensor) -> Tensor:
tr = torch.diagonal(x, dim1=(- 2), dim2=(- 1)).sum((- 1)).real
x2 = (x @ x)
p2 = torch.diagonal(x2, dim1=(- 2), dim2=(- 1)).sum((- 1)).real
det = x.det().real
(c0_, c1_, c2_) = self.rsqrtPHM3f(tr, p2, det)
c0 = c0_.reshape((c0_.shape + (1, 1))).to(x.dtype)
c1 = c1_.reshape((c1_.shape + (1, 1))).to(x.dtype)
c2 = c2_.reshape((c2_.shape + (1, 1))).to(x.dtype)
return (((c0 * eyeOf(x)) + (c1 * x)) + (c2 * x2))
def projectU(self, x: Tensor) -> Tensor:
t = (x.mH @ x)
t2 = self.rsqrtPHM3(t)
return (x @ t2)
def projectSU(self, x: Tensor) -> Tensor:
return projectSU(x)
def norm2(self, x: Tensor, axis: Sequence[int]=[(- 2), (- 1)], exclude: Optional[Sequence[int]]=None) -> Tensor:
'No reduction if axis is empty'
if (x.dtype in [torch.complex64, torch.complex128]):
x = x.abs()
n = x.square()
if (exclude is None):
if (len(axis) == 0):
return n
return n.sum(*axis)
return n.sum([i for i in range(len(n.shape)) if (i not in exclude)])
|
class SUN():
def __init__(self) -> None:
super(SUN, self).__init__()
def exp(self, x: Tensor, u: Tensor) -> Tensor:
return (x @ expm((x.conj().transpose((- 2), (- 1)) @ u)))
def log(self, x: Tensor, y: Tensor) -> Tensor:
(_, n, _) = x.shape
assert (n == 3), 'Operation supported only for SU(3)'
return (x @ log3x3((x.conj().transpose((- 2), (- 1)) @ y)))
def proju(self, x: Tensor, u: Tensor) -> Tensor:
'Arbitrary matrix C projects to skew-Hermitian\n\n B := (C - C^β ) / 2\n\n then make traceless with\n\n tr{ B - {tr{B} / N} * I) = tr{B} - tr{B} = 0\n '
(_, n, _) = x.shape
algebra_elem = torch.linalg.solve(u, x)[0]
B = ((algebra_elem - algebra_elem.conj().transpose((- 2), (- 1))) / 2)
trace = torch.einsum('bii->b', B)
B = (B - (((1 / n) * trace.unsqueeze((- 1)).unsqueeze((- 1))) * torch.eye(n).repeat(x.shape[0], 1, 1)))
assert (torch.abs(torch.mean(torch.einsum('bii->b', B))) < 1e-06)
return B
|
class SU3(Group):
def __init__(self):
self._nc = 3
self._free_params = 8
super().__init__(dim=4, shape=[3, 3], dtype=tf.complex128)
def update_gauge(self, x: Tensor, p: Tensor) -> Tensor:
return tf.matmul(tf.linalg.expm(p), x)
def checkSU(self, x: Tensor) -> tuple[(Tensor, Tensor)]:
'Returns the average and maximum of the sumf of deviations of:\n - Xβ X\n - det(x)\n from unitarity\n '
return checkSU(x)
def checkU(self, x: Tensor) -> tuple[(Tensor, Tensor)]:
'Returns the average and maximum of\n the sum of the deviations of Xβ X\n '
return checkU(x)
def mul(self, a: Tensor, b: Tensor, adjoint_a: bool=False, adjoint_b: bool=False) -> Tensor:
return tf.linalg.matmul(a, b, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
def adjoint(self, x: Tensor) -> Tensor:
return tf.linalg.adjoint(x)
def trace(self, x: Tensor) -> Tensor:
return tf.linalg.trace(x)
def diff_trace(self, x: Tensor):
log.error('TODO')
return x
def diff2Trace(self, x: Tensor):
log.error('TODO')
return x
def exp(self, x: Tensor) -> Tensor:
return tf.linalg.expm(x)
def projectTAH(self, x: Tensor) -> Tensor:
'Returns R = 1/2 (X - Xβ ) - 1/(2 N) tr(X - Xβ )\n R = - T^a tr[T^a (X - Xβ )]\n = T^a β_a (- tr[X + Xβ ])\n '
return projectTAH(x)
def compat_proj(self, x: Tensor) -> Tensor:
'Arbitrary matrix C projects to skew-hermitian B := (C - C^H) / 2\n\n Make traceless with tr(B - (tr(B) / N) * I) = tr(B) - tr(B) = 0\n '
return projectSU(x)
def random(self, shape: list[int]) -> Tensor:
r = tf.random.normal(shape, dtype=TF_FLOAT)
i = tf.random.normal(shape, dtype=TF_FLOAT)
return projectSU(tf.complex(r, i))
def random_momentum(self, shape: list[int]) -> Tensor:
return randTAH3(shape[:(- 2)])
def kinetic_energy(self, p: Tensor) -> Tensor:
p2 = (norm2(p) - tf.constant(8.0, dtype=TF_FLOAT))
return (0.5 * tf.math.reduce_sum(tf.reshape(p2, [p.shape[0], (- 1)]), axis=1))
def vec_to_group(self, x: Tensor) -> Tensor:
'\n Returns batched SU(3) matrices.\n\n X = X^a T^a\n tr{X T^b} = X^a tr{T^a T^b} = X^a (-1/2) π
^{ab} = -1/2 X^b\n X^a = -2 X_ij T^a_ji\n '
return self.compat_proj(vec_to_su3(x))
def group_to_vec(self, x: Tensor) -> Tensor:
'\n Returns (batched) 8 real numbers,\n X^a T^a = X - 1/3 tr(X)\n\n Convention:\n tr{T^a T^a} = -1/2\n X^a = - 2 tr[T^a X]\n '
return su3_to_vec(self.compat_proj(x))
|
def conjT(x: TensorLike) -> TensorLike:
return transpose(conj(x), ((- 2), (- 1)))
|
class SUN():
def __init__(self) -> None:
super(SUN, self).__init__()
def exp(self, x: TensorLike, u: TensorLike) -> TensorLike:
return (x @ expm((conjT(x) @ u)))
def log(self, x: TensorLike, y: TensorLike) -> TensorLike:
(_, n, _) = x.shape
assert (n == 3), 'Operation only supported for SU(3)'
return (x @ log3x3((conjT(x) @ y)))
def proju(self, x: TensorLike, u: TensorLike) -> TensorLike:
'Arbitrary matrix C projects to skew-Hermitian\n B := (C - Cβ ) / 2\n then make traceless with\n tr{B - [tr{B} / N] * I} = tr{B} - tr{B} = 0\n '
(_, n, _) = x.shape
algebra_elem = tf.linalg.solve(u, x)[0]
B = ((algebra_elem - conjT(algebra_elem)) / 2)
trace = tf.linalg.einsum('bii->b', B)
B = (B - (((1 / n) * tf.expand_dims(tf.expand_dims(trace, (- 1)), (- 1))) * tf.repeat(tf.eye(n), (x.shape[0], 1, 1))))
assert (tf.math.abs(tf.reduce_mean(tf.linalg.einsum('bii->b', B))) < 1e-06)
return B
|
def norm2(x: Tensor, axis=[(- 2), (- 1)]) -> Tensor:
'No reduction if axis is empty'
n = tf.math.real(tf.math.multiply(tf.math.conj(x), x))
if (len(axis) == 0):
return n
return tf.math.reduce_sum(n, axis=axis)
|
def norm2_new(x: Tensor, axis: Optional[list[int]]=None, exclude: Optional[list[int]]=None) -> Tensor:
'No reduction if axis is empty'
axis = ([(- 2), (- 1)] if (axis is None) else axis)
if (x.dtype in [tf.complex64, tf.complex128]):
x = tf.abs(x)
n = tf.math.square(x)
if (exclude is None):
if (len(axis) == 0):
return n
return tf.math.reduce_sum(n, axis=axis)
return tf.math.reduce_sum(n, axis=[i for i in range(len(n.shape)) if (i not in exclude)])
|
def randTAH3(shape: list[int]):
s2 = 0.7071067811865476
s3 = 0.5773502691896257
r3 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
r8 = ((s2 * s3) * tf.random.normal(shape, dtype=TF_FLOAT))
m00 = tf.dtypes.complex(tf.cast(0.0, TF_FLOAT), (r8 + r3))
m11 = tf.dtypes.complex(tf.cast(0.0, TF_FLOAT), (r8 - r3))
m22 = tf.dtypes.complex(tf.cast(0.0, TF_FLOAT), ((- 2) * r8))
r01 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
r02 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
r12 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
i01 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
i02 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
i12 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
m01 = tf.dtypes.complex(r01, i01)
m10 = tf.dtypes.complex((- r01), i01)
m02 = tf.dtypes.complex(r02, i02)
m20 = tf.dtypes.complex((- r02), i02)
m12 = tf.dtypes.complex(r12, i12)
m21 = tf.dtypes.complex((- r12), i12)
return tf.stack([tf.stack([m00, m10, m20], axis=(- 1)), tf.stack([m01, m11, m21], axis=(- 1)), tf.stack([m02, m12, m22], axis=(- 1))], axis=(- 1))
|
def eigs3(tr, p2, det):
tr3 = ((1.0 / 3.0) * tr)
p23 = ((1.0 / 3.0) * p2)
tr32 = (tr3 * tr3)
q = tf.math.abs((0.5 * (p23 - tr32)))
r = (((0.25 * tr3) * ((5 * tr32) - p2)) - (0.5 * det))
sq = tf.math.sqrt(q)
sq3 = (q * sq)
isq3 = (1.0 / sq3)
maxv = tf.constant(3e+38, shape=isq3.shape, dtype=isq3.dtype)
minv = tf.constant((- 3e+38), shape=isq3.shape, dtype=isq3.dtype)
isq3c = tf.math.minimum(maxv, tf.math.maximum(minv, isq3))
rsq3c = (r * isq3c)
maxv = tf.constant(1, shape=isq3.shape, dtype=isq3.dtype)
minv = tf.constant((- 1), shape=isq3.shape, dtype=isq3.dtype)
rsq3 = tf.math.minimum(maxv, tf.math.maximum(minv, rsq3c))
t = ((1.0 / 3.0) * tf.math.acos(rsq3))
st = tf.math.sin(t)
ct = tf.math.cos(t)
sqc = (sq * ct)
sqs = ((1.7320508075688772 * sq) * st)
ll = (tr3 + sqc)
e0 = (tr3 - (2 * sqc))
e1 = (ll + sqs)
e2 = (ll - sqs)
return (e0, e1, e2)
|
def rsqrtPHM3f(tr: Tensor, p2: Tensor, det: Tensor) -> tuple[(Tensor, Tensor, Tensor)]:
(l0, l1, l2) = eigs3(tr, p2, det)
sl0 = tf.math.sqrt(tf.math.abs(l0))
sl1 = tf.math.sqrt(tf.math.abs(l1))
sl2 = tf.math.sqrt(tf.math.abs(l2))
u = ((sl0 + sl1) + sl2)
w = ((sl0 * sl1) * sl2)
d = (((w * (sl0 + sl1)) * (sl0 + sl2)) * (sl1 + sl2))
di = (1.0 / d)
c0 = ((((((w * u) * u) + ((l0 * sl0) * (l1 + l2))) + ((l1 * sl1) * (l0 + l2))) + ((l2 * sl2) * (l0 + l1))) * di)
c1 = ((- ((tr * u) + w)) * di)
c2 = (u * di)
return (c0, c1, c2)
|
def rsqrtPHM3(x: Tensor) -> Tensor:
tr = tf.math.real(tf.linalg.trace(x))
x2 = tf.linalg.matmul(x, x)
p2 = tf.math.real(tf.linalg.trace(x2))
det = tf.math.real(tf.linalg.det(x))
(c0_, c1_, c2_) = rsqrtPHM3f(tr, p2, det)
c0 = tf.cast(tf.reshape(c0_, (c0_.shape + [1, 1])), x.dtype)
c1 = tf.cast(tf.reshape(c1_, (c1_.shape + [1, 1])), x.dtype)
c2 = tf.cast(tf.reshape(c2_, (c2_.shape + [1, 1])), x.dtype)
return (((c0 * eyeOf(x)) + tf.math.multiply(c1, x)) + (c2 * x2))
|
def projectU(x: Tensor) -> Tensor:
"x (x'x)^{-1/2}"
t = (tf.linalg.adjoint(x) @ x)
t2 = rsqrtPHM3(t)
return tf.linalg.matmul(x, t2)
|
def projectSU(x: Tensor) -> Tensor:
nc = tf.constant(x.shape[(- 1)], TF_FLOAT)
m = projectU(x)
d = tf.linalg.det(m)
const = (1.0 / (- nc))
at2 = tf.cast(tf.math.atan2(tf.math.imag(d), tf.math.real(d)), TF_FLOAT)
p = (const * at2)
y = tf.math.multiply(m, tf.cast(tf.reshape(tf.complex(tf.math.cos(p), tf.math.sin(p)), (p.shape + [1, 1])), m.dtype))
return y
|
def projectTAH(x: Tensor) -> Tensor:
'Returns R = 1/2 (X - Xβ ) - 1/(2 N) tr(X - Xβ )\n R = - T^a tr[T^a (X - Xβ )]\n = T^a β_a (- tr[X + Xβ ])\n '
nc = tf.constant(x.shape[(- 1)], dtype=x.dtype)
r = (0.5 * (x - tf.linalg.adjoint(x)))
d = (tf.linalg.trace(r) / nc)
r -= (tf.reshape(d, (d.shape + [1, 1])) * eyeOf(x))
return r
|
def checkU(x: Tensor) -> tuple[(Tensor, Tensor)]:
'Returns the average and maximum of the sum of the deviations of Xβ X'
nc = tf.constant(x.shape[(- 1)], dtype=x.dtype)
d = norm2((tf.linalg.matmul(x, x, adjoint_a=True) - eyeOf(x)))
a = tf.math.reduce_mean(d, axis=range(1, len(d.shape)))
b = tf.math.reduce_max(d, axis=range(1, len(d.shape)))
c = (2 * ((nc * nc) + 1))
return (tf.math.sqrt((a / c)), tf.math.sqrt((b / c)))
|
def checkSU(x: Tensor) -> tuple[(Tensor, Tensor)]:
'Returns the average and maximum of the sumf of deviations of:\n - Xβ X\n - det(x)\n from unitarity\n '
nc = tf.constant(x.shape[(- 1)], dtype=TF_FLOAT)
d = norm2((tf.linalg.matmul(x, x, adjoint_a=True) - eyeOf(x)))
d += norm2(((- 1) + tf.linalg.det(x)), axis=[])
a = tf.cast(tf.math.reduce_mean(d, axis=range(1, len(d.shape))), TF_FLOAT)
b = tf.cast(tf.math.reduce_max(d, axis=range(1, len(d.shape))), TF_FLOAT)
c = tf.cast((2.0 * ((nc * nc) + 1)), TF_FLOAT)
return (tf.math.sqrt(tf.math.divide(a, c)), tf.math.sqrt(tf.math.divide(b, c)))
|
def su3_to_vec(x: Tensor) -> Tensor:
'Only for x in 3x3 anti-Hermitian.\n\n Return 8 real numbers, X^a T^a = X - 1/3 tr(X)\n\n Convention: tr{T^a T^a} = -1/2\n X^a = - 2 tr[T^a X]\n '
c = (- 2)
x00 = x[(..., 0, 0)]
x01 = x[(..., 0, 1)]
x11 = x[(..., 1, 1)]
x02 = x[(..., 0, 2)]
x12 = x[(..., 1, 2)]
x22 = x[(..., 2, 2)]
return tf.stack([(c * tf.math.imag(x01)), (c * tf.math.real(x01)), (tf.math.imag(x11) - tf.math.imag(x00)), (c * tf.math.imag(x02)), (c * tf.math.real(x02)), (c * tf.math.imag(x12)), (c * tf.math.real(x12)), (SQRT1by3 * (((2 * tf.math.imag(x22)) - tf.math.imag(x11)) - tf.math.imag(x00)))], axis=(- 1))
|
def vec_to_su3(v: Tensor) -> Tensor:
'\n X = X^a T^a\n tr{X T^b} = X^a tr{T^a T^b} = X^a (-1/2) π
^ab = -1/2 X^b\n X^a = -2 X_{ij} T^a_{ji}\n '
s3 = 0.5773502691896257
c = (- 0.5)
assert (len(v.shape) > 1)
vT = tf.transpose(v)
v0 = vT[0].T
v1 = vT[1].T
v2 = vT[2].T
v3 = vT[3].T
v4 = vT[4].T
v5 = vT[5].T
v6 = vT[6].T
v7 = vT[7].T
zero = tf.zeros(v0.shape, dtype=v0.dtype)
x01 = (c * tf.dtypes.complex(v1, v0))
x02 = (c * tf.dtypes.complex(v4, v3))
x12 = (c * tf.dtypes.complex(v6, v5))
x2i = (s3 * v7)
x0i = (c * (x2i + v2))
x1i = (c * (x2i - v2))
def neg_conj(x: Tensor) -> Tensor:
return tf.math.negative(tf.math.conj(x))
v1 = tf.stack([tf.dtypes.complex(zero, x0i), neg_conj(x01), neg_conj(x02)], axis=(- 1))
v2 = tf.stack([x01, tf.dtypes.complex(zero, x1i), neg_conj(x12)], axis=(- 1))
v3 = tf.stack([x02, x12, tf.dtypes.complex(zero, x2i)], axis=(- 1))
return tf.stack([v1, v2, v3])
|
def eyeOf(m):
batch_shape = ([1] * (len(m.shape) - 2))
return tf.eye(*m.shape[(- 2):], batch_shape=batch_shape, dtype=m.dtype)
|
def exp(m: Tensor, order: int=12):
eye = eyeOf(m)
x = (eye + (m / tf.constant(order)))
for i in tf.range((order - 1), 0, (- 1)):
x = (eye + (tf.linalg.matmul(m, x) / tf.constant(tf.cast(i, m.dtype))))
return x
|
def su3fabc(v: tf.Tensor) -> Tensor:
'\n returns f^{abc} v[..., c]\n [T^a, T^b] = f^abc T^c\n '
vT = tf.transpose(v)
a01 = ((+ f012) * vT[2])
a01 = ((+ f012) * vT[2])
a02 = ((- f012) * vT[1])
a03 = ((+ f036) * vT[6])
a04 = ((+ f045) * vT[5])
a05 = ((- f045) * vT[4])
a06 = ((- f036) * vT[3])
a12 = ((+ f012) * vT[0])
a13 = ((+ f135) * vT[5])
a14 = ((+ f146) * vT[6])
a15 = ((- f135) * vT[3])
a16 = ((- f146) * vT[4])
a23 = ((+ f234) * vT[4])
a24 = ((- f234) * vT[3])
a25 = ((+ f256) * vT[6])
a26 = ((- f256) * vT[5])
a34 = (((+ f347) * vT[7]) + (f234 * vT[2]))
a35 = ((+ f135) * vT[1])
a36 = ((+ f036) * vT[0])
a37 = ((- f347) * vT[4])
a45 = ((+ f045) * vT[0])
a46 = ((+ f146) * vT[1])
a47 = ((+ f347) * vT[3])
a56 = (((+ f567) * vT[7]) + (f256 * vT[2]))
a57 = ((- f567) * vT[6])
a67 = ((+ f567) * vT[5])
zii = tf.zeros(vT[0].shape, dtype=vT[0].dtype)
return tf.stack([tf.stack([(+ zii), (- a01), (- a02), (- a03), (- a04), (- a05), (- a06), (+ zii)], (- 1)), tf.stack([(+ a01), (+ zii), (- a12), (- a13), (- a14), (- a15), (- a16), (+ zii)], (- 1)), tf.stack([(+ a02), (+ a12), (+ zii), (- a23), (- a24), (- a25), (- a26), (+ zii)], (- 1)), tf.stack([(+ a03), (+ a13), (+ a23), (+ zii), (- a34), (- a35), (- a36), (- a37)], (- 1)), tf.stack([(+ a04), (+ a14), (+ a24), (+ a34), (+ zii), (- a45), (- a46), (- a47)], (- 1)), tf.stack([(+ a05), (+ a15), (+ a25), (+ a35), (+ a45), (+ zii), (- a56), (- a57)], (- 1)), tf.stack([(+ a06), (+ a16), (+ a26), (+ a36), (+ a46), (+ a56), (+ zii), (- a67)], (- 1)), tf.stack([(+ zii), (+ zii), (+ zii), (+ a37), (+ a47), (+ a57), (+ a67), (+ zii)], (- 1))], (- 1)).T
|
def su3dabc(v: Tensor) -> Tensor:
'\n returns d^abc v[...,c]\n {T^a,T^b} = -1/3Ξ΄^ab + i d^abc T^c\n '
vT = tf.transpose(v)
a00 = (d007 * vT[7])
a03 = (d035 * vT[5])
a04 = (d046 * vT[6])
a05 = (d035 * vT[3])
a06 = (d046 * vT[4])
a07 = (d007 * vT[0])
a11 = (d117 * vT[7])
a13 = (d136 * vT[6])
a14 = (d145 * vT[5])
a15 = (d145 * vT[4])
a16 = (d136 * vT[3])
a17 = (d117 * vT[1])
a22 = (d227 * vT[7])
a23 = (d233 * vT[3])
a24 = (d244 * vT[4])
a25 = (d255 * vT[5])
a26 = (d266 * vT[6])
a27 = (d227 * vT[2])
a33 = ((d337 * vT[7]) + (d233 * vT[2]))
a35 = (d035 * vT[0])
a36 = (d136 * vT[1])
a37 = (d337 * vT[3])
a44 = ((d447 * vT[7]) + (d244 * vT[2]))
a45 = (d145 * vT[1])
a46 = (d046 * vT[0])
a47 = (d447 * vT[4])
a55 = ((d557 * vT[7]) + (d255 * vT[2]))
a57 = (d557 * vT[5])
a66 = ((d667 * vT[7]) + (d266 * vT[2]))
a67 = (d667 * vT[6])
a77 = (d777 * vT[7])
zii = tf.zeros(vT[0].shape, dtype=vT[0].dtype)
return tf.stack([tf.stack([a00, zii, zii, a03, a04, a05, a06, a07], (- 1)), tf.stack([zii, a11, zii, a13, a14, a15, a16, a17], (- 1)), tf.stack([zii, zii, a22, a23, a24, a25, a26, a27], (- 1)), tf.stack([a03, a13, a23, a33, zii, a35, a36, a37], (- 1)), tf.stack([a04, a14, a24, zii, a44, a45, a46, a47], (- 1)), tf.stack([a05, a15, a25, a35, a45, a55, zii, a57], (- 1)), tf.stack([a06, a16, a26, a36, a46, zii, a66, a67], (- 1)), tf.stack([a07, a17, a27, a37, a47, a57, a67, a77], (- 1))], axis=(- 1))
|
def SU3Ad(x: Tensor) -> Tensor:
'\n X T^c Xβ = AdX T^c = T^b AdX^bc\n Input x must be in SU(3) group.\n AdX^bc = - 2 tr[T^b X T^c Xβ ] = - 2 tr[T^c Xβ T^b X]\n '
y = tf.expand_dims(x, (- 3))
return su3_to_vec(tf.linalg.matmul(y, tf.linalg.matmul(su3gen(), y), adjoint_a=True))
|
def su3ad(x: Tensor) -> Tensor:
'\n adX^{ab} = - f^{abc} X^c = f^{abc} 2 tr(X T^c) = 2 tr(X [T^a, T^b])\n Input x must be in su(3) algebra.\n '
return su3fabc(tf.negative(su3_to_vec(x)))
|
def su3adapply(adx: Tensor, y: Tensor) -> Tensor:
'\n Note:\n adX(Y) = [X, Y]\n adX(T^b) = T^a adX^{ab}\n = - T^a f^{abc} X^c\n = X^c f^{cba} T^a\n = X^c [T^c, T^b]\n = [X, T^b]\n and\n adX(Y) = T^a adX^{ab} Y^b\n = T^a adX^{ab} (-2) tr{T^b Y}\n '
return vec_to_su3(tf.linalg.matvec(adx, su3_to_vec(y)))
|
def gellMann() -> Tensor:
s3 = 0.5773502691896257
zero3 = tf.zeros([3, 3], dtype=tf.float64)
return tf.stack([tf.dtypes.complex(tf.reshape(tf.constant([0, 1, 0, 1, 0, 0, 0, 0, 0], dtype=tf.float64), [3, 3]), zero3), tf.dtypes.complex(zero3, tf.reshape(tf.constant([0, (- 1), 0, 1, 0, 0, 0, 0, 0], dtype=tf.float64), [3, 3])), tf.dtypes.complex(tf.reshape(tf.constant([1, 0, 0, 0, (- 1), 0, 0, 0, 0], dtype=tf.float64), [3, 3]), zero3), tf.dtypes.complex(tf.reshape(tf.constant([0, 0, 1, 0, 0, 0, 1, 0, 0], dtype=tf.float64), [3, 3]), zero3), tf.dtypes.complex(zero3, tf.reshape(tf.constant([0, 0, (- 1), 0, 0, 0, 1, 0, 0], dtype=tf.float64), [3, 3])), tf.dtypes.complex(tf.reshape(tf.constant([0, 0, 0, 0, 0, 1, 0, 1, 0], dtype=tf.float64), [3, 3]), zero3), tf.dtypes.complex(zero3, tf.reshape(tf.constant([0, 0, 0, 0, 0, (- 1), 0, 1, 0], dtype=tf.float64), [3, 3])), (s3 * tf.dtypes.complex(tf.reshape(tf.constant([1, 0, 0, 0, 1, 0, 0, 0, (- 2)], dtype=tf.float64), [3, 3]), zero3))])
|
def su3gen() -> Tensor:
'\n T[a,i,j] = T^a_ij\n Traceless Anti-Hermitian basis. tr{T^a T^a} = -1/2\n '
global _su3gen_private_global_cache_
if (_su3gen_private_global_cache_ is None):
_su3gen_private_global_cache_ = (tf.dtypes.complex(tf.constant(0, dtype=tf.float64), tf.constant((- 0.5), dtype=tf.float64)) * gellMann())
return _su3gen_private_global_cache_
|
def diffprojectTAH(m: Tensor, p: Optional[Tensor]=None) -> Tensor:
'\n returns β_c p^a = β_c projectTAH(m)^a = - tr[T^a (T^c M + Mβ T^c)]\n P^a = -2 tr[T^a {- T^d tr[T^d (M - Mβ )]}]\n = - tr[T^a (M - Mβ )]\n = - β_a tr[M + Mβ ]\n β_c P^a = - tr[T^a (T^c M + Mβ T^c)]\n = - 1/2 tr[{T^a,T^c} (M+Mβ ) + [T^a,T^c] (M-Mβ )]\n = - 1/2 tr[d^acb T^b i (M+Mβ ) - 1/3 Ξ΄^ac (M+Mβ ) + f^acb T^b (M-Mβ )]\n = - 1/2 { d^acb tr[T^b i(M+Mβ )] - 1/3 Ξ΄^ac tr(M+Mβ ) - f^acb F^b }\n = - 1/2 { d^acb tr[T^b i(M+Mβ )] - 1/3 Ξ΄^ac tr(M+Mβ ) + adF^ac }\n Note:\n T^a T^b = 1/2 {(f^abc + i d^abc) T^c - 1/3 Ξ΄^ab}\n '
if (p is None):
p = projectTAH(m)
mhalfadP = su3ad((tf.constant((- 0.5)) * p))
Ms = (m + tf.linalg.adjoint(m))
trMs = (tf.math.real(tf.linalg.trace(Ms)) / 6.0)
eye = tf.dtypes.complex(tf.constant(0, dtype=tf.float64), tf.constant(1, dtype=tf.float64))
return ((su3dabc((tf.constant(0.25) * su3_to_vec((eye * Ms)))) + (tf.reshape(trMs, (trMs.shape + [1, 1])) * eyeOf(mhalfadP))) + mhalfadP)
|
def diffprojectTAHCross(m: Tensor, x: Optional[Tensor]=None, Adx: Optional[Tensor]=None, p: Optional[Tensor]=None) -> Tensor:
'\n returns\n R^ac = β_c p^a\n = β_c projectTAH(X Y)^a\n = - β_c β_a tr[X Y + Yβ Xβ ],\n where M = X Y\n\n The derivatives β is on X and β is on Y.\n β_c P^a = - 2 ReTr[T^a X T^c Y]\n = - tr[T^a (X T^c Xβ X Y + Yβ Xβ X T^c Xβ )]\n = - tr[T^a (T^b M + Mβ T^b)] AdX^bc\n '
if (Adx is None):
if (x is None):
raise ValueError('diffprojectTAHCross must either provide x or Adx.')
Adx = SU3Ad(x)
return tf.linalg.matmul(diffprojectTAH(m, p), Adx)
|
def diffexp(adX: Tensor, order: int=13) -> Tensor:
'\n return\n J(X) = (1-exp{-adX})/adX\n = Ξ£_{k=0}^{β} 1/(k+1)! (-adX)^k\n up to k=order\n\n [exp{-X(t)} d/dt exp{X(t)}]_ij\n = [J(X) d/dt X(t)]_ij\n = T^a_ij J(X)^ab (-2) T^b_kl [d/dt X(t)]_lk\n\n J(X) = 1 + 1/2 (-adX) (1 + 1/3 (-adX) (1 + 1/4 (-adX) (1 + ...)))\n J(x) β_t x\n = T^a J(x)^ab (-2) tr[T^b β_t x]\n = exp(-x) β_t exp(x)\n J(s x) β_t x = exp(-s x) β_t exp(s x)\n β_s J(s x) β_t x\n = - exp(-s x) x β_t exp(s x) + exp(-s x) β_t x exp(s x)\n = (- exp(-s x) x β_t exp(s x) + exp(-s x) [β_t x] exp(s x)\n + exp(-s x) x β_t exp(s x))\n = exp(-s x) [β_t x] exp(s x)\n = exp(-s adx) β_t x\n = Ξ£_k 1/k! (-1)^k s^k (adx)^k β_t x\n J(0) = 0\n J(x) β_t x\n = β«_0^1 ds Ξ£_{k=0} 1/k! (-1)^k s^k (adx)^k β_t x\n = Ξ£_{k=0} 1/(k+1)! (-1)^k (adx)^k β_t x\n '
m = tf.negative(adX)
eye = eyeOf(m)
x = (eye + ((1.0 / (order + 1.0)) * m))
for i in tf.range(order, 1, (- 1)):
x = (eye + ((1.0 / tf.cast(i, m.dtype)) * tf.linalg.matmul(m, x)))
return x
|
def SU3GradientTF(f: Callable[([Tensor], Tensor)], x: Tensor) -> tuple[(Tensor, Tensor)]:
'Compute gradient using TensorFlow GradientTape.\n\n y = f(x) must be a real scalar value.\n\n Returns:\n - (f(x), D), where D = T^a D^a = T^a β_a f(x)\n\n NOTE: Use real vector derivatives, e.g.\n D^a = β_a f(x)\n = β_t f(exp(T^a) x) |_{t=0}\n '
zeros = tf.zeros(8)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(zeros)
y = f(tf.linalg.matmul(tf.linalg.expm(vec_to_su3(zeros)), x))
d = tape.gradient(y, zeros)
return (y, d)
|
def SU3GradientTFMat(f: Callable, x: Tensor) -> tuple[(Tensor, Tensor)]:
'\n Compute gradient using TensorFlow GradientTape.\n f(x) must be a real scalar value.\n Returns (f(x),D), where D = T^a D^a = T^a β_a f(x)\n Use Matrix derivatives.\n D^a = β_a f(x)\n = [β_a x_ij] [d/dx_ij f(x)]\n = [T^a_ik x_kj] [d/dx_ij f(x)]\n = [T^a_ik xβ _kj] [d/dxβ _ij f(x)]\n Note for TensorFlow,\n β_z f = (β_z f + β_z fβ )β = 2 [β_z Re(f)]β = 2 β_zβ Re(f)\n '
with tf.GradientTape(watch_accessed_variables=False) as t:
t.watch(x)
r = f(x)
d = (tf.constant(0.5) * projectTAH((t.gradient(r, x) @ tf.linalg.adjoint(x))))
return (r, d)
|
def SU3JacobianTF(f: Callable, x: Tensor, is_SU3: bool=True) -> tuple[(Tensor, Tensor)]:
"\n Compute Jacobian using TensorFlow GradientTape with real vector\n derivatives.\n Note for TensorFlow,\n β_z f = (β_z f + β_z fβ )β \n\n In order to have the proper gradient info, we always project the result to\n su(3).\n If is_SU3 is True, we multiply the result by its adjoint before projecting.\n Otherwise we assume the result is su3 and project it directly.\n The input x must be in SU(3).\n Returns f(x) and its Jacobian in ad.\n [d/dSU(3)] SU(3)\n T^c_km X_ml (-2) (β_X_kl F(X)_in) F(X)β _nj T^a_ji\n = T^c_km X_ml (-2) F'(X)_{kl,in} F(X)β _nj T^a_ji\n [d/dSU(3)] su(3)\n (T^c X)_kl (β_X_kl F(X)^a)\n T^c_km X_ml (β_X_kl F(X)^a)\n = T^c_km X_ml F'(X)^a_{kl}\n "
v = tf.zeros(8, dtype=tf.float64)
with tf.GradientTape(watch_accessed_variables=False, persistent=True) as t:
t.watch(v)
Z = f((tf.linalg.expm(vec_to_su3(v)) @ x))
if is_SU3:
z = (Z @ tf.linalg.adjoint(tf.stop_gradient(Z)))
else:
z = Z
z = su3_to_vec(z)
tj = t.jacobian(z, v, experimental_use_pfor=False)
return (Z, tj)
|
def SU3JacobianTFMat(f, x, is_SU3=True):
"\n Compute Jacobian using TensorFlow GradientTape with matrix derivatives.\n Note for TensorFlow,\n β_z f = (β_z f + β_z fβ )β \n\n In order to have the proper gradient info,\n we always project the result to su(3).\n\n If is_SU3 is True, we multiply the result by its adjoint before projecting.\n Otherwise we assume the result is su3 and project it directly.\n The input x must be in SU(3).\n Returns f(x) and its Jacobian in ad.\n [d/dSU(3)] SU(3)\n T^c_km X_ml (-2) (β_X_kl F(X)_in) F(X)β _nj T^a_ji\n = T^c_km X_ml (-2) F'(X)_{kl,in} F(X)β _nj T^a_ji\n [d/dSU(3)] su(3)\n (T^c X)_kl (β_X_kl F(X)^a)\n T^c_km X_ml (β_X_kl F(X)^a)\n = T^c_km X_ml F'(X)^a_{kl}\n "
with tf.GradientTape(watch_accessed_variables=False, persistent=True) as t:
t.watch(x)
Z = f(x)
if is_SU3:
z = tf.linalg.matmul(Z, tf.stop_gradient(Z), adjoint_b=True)
else:
z = Z
z = tf.cast(su3_to_vec(z), TF_COMPLEX)
jzx = t.jacobian(z, x, experimental_use_pfor=False)
tj = tf.math.real(tf.einsum('aik,kj,bij->ba', su3gen(), x, tf.math.conj(jzx)))
return (Z, tj)
|
def rand_unif(shape: Sequence[int], a: float, b: float, requires_grad: bool=True):
'Return tensor x ~ U(a, b), where a <= x <= b with shape `shape`\n\n >>> import numpy as np\n >>> x = rand_unif([1, 2, 3], -1, 1)\n >>> x.shape\n torch.Size([1, 2, 3])\n >>> (-1. <= x.min()).item()\n True\n >>> (x.max() <= 1.).item()\n True\n '
rand = (((a - b) * torch.rand(tuple(shape))) + b)
return rand.clone().detach().requires_grad_(requires_grad)
|
def random_angle(shape: Sequence[int], requires_grad: bool=True) -> Tensor:
'Returns random angle with `shape` and values in (-pi, pi).\n '
return rand_unif(shape, (- PI), PI, requires_grad=requires_grad)
|
def eyeOf(x: torch.Tensor) -> torch.Tensor:
batch_dims = ([1] * (len(x.shape) - 1))
eye = torch.zeros((batch_dims + [*x.shape[(- 1):]])).to(x.device)
eye[(- 1):] = torch.eye(x.shape[(- 1)])
return eye
|
class U1Phase(Group):
def __init__(self) -> None:
dim = 2
shape = [1]
dtype = PT_FLOAT
super().__init__(dim=dim, shape=shape, dtype=dtype)
def phase_to_coords(self, phi: Tensor) -> Tensor:
'Convert complex to Cartesian.\n\n exp(i Ο) --> [cos Ο, sin Ο]\n\n >>> g = U1Phase()\n >>> g.phase_to_coords(torch.tensor([1.0, 0.0]))\n tensor([0.5403, 1.0000, 0.8415, 0.0000])\n '
return torch.cat([phi.cos(), phi.sin()], (- 1))
def coords_to_phase(self, x: Tensor) -> Tensor:
'Convert Cartesian to phase.\n\n [cos Ο, sin Ο] --> atan(sin Ο / cos Ο)\n '
assert (x.shape[(- 1)] == 2)
return torch.atan2(x[(..., (- 1))], x[(..., (- 2))])
@staticmethod
def group_to_vec(x: Tensor) -> Tensor:
return torch.cat([x.cos(), x.sin()], dim=1)
@staticmethod
def vec_to_group(x: Tensor) -> Tensor:
if x.is_complex():
return torch.atan2(x.imag, x.real)
return torch.atan2(x[(..., (- 1))], x[(..., (- 2))])
def exp(self, x: Tensor) -> Tensor:
return torch.complex(x.cos(), x.sin())
def update_gauge(self, x: Tensor, p: Tensor) -> Tensor:
return (x + p)
def mul(self, a: Tensor, b: Tensor, adjoint_a: Optional[bool]=None, adjoint_b: Optional[bool]=None) -> Tensor:
if (adjoint_a and adjoint_b):
return ((- a) - b)
elif adjoint_a:
return ((- a) + b)
elif adjoint_b:
return (a - b)
else:
return (a + b)
def adjoint(self, x: Tensor) -> Tensor:
return (- x)
def trace(self, x: Tensor) -> Tensor:
return torch.cos(x)
def diff_trace(self, x: Tensor) -> Tensor:
return (- torch.sin(x))
def diff2trace(self, x: Tensor) -> Tensor:
return (- torch.cos(x))
@torch.no_grad()
def floormod(self, x: (Tensor | float), y: (Tensor | float)) -> Tensor:
return (x - (torch.floor_divide(x, y) * y))
def compat_proj(self, x: Tensor) -> Tensor:
return (((x + PI) % TWO_PI) - PI)
def projectTAH(self, x: Tensor) -> Tensor:
'Returns\n r = (1/2) * (x - x.H) - j Im[ Tr(x) ] / Nc\n '
return x
def projectSU(self, x):
return self.compat_proj(x)
def random(self, shape: Sequence[int]) -> Tensor:
return self.compat_proj((TWO_PI * torch.rand(*shape)))
def random_momentum(self, shape: Sequence[int]) -> Tensor:
return torch.randn(*shape).reshape(shape[0], (- 1))
def kinetic_energy(self, p: Tensor) -> Tensor:
return (0.5 * p.flatten(1).square().sum((- 1)))
|
class U1Phase(Group):
def __init__(self):
super(U1Phase, self).__init__(dim=2, shape=[1], dtype=TF_FLOAT)
def phase_to_coords(self, phi: Tensor) -> Tensor:
'Convert complex to Cartesian.\n\n exp(i Ο) --> [cos Ο, sin Ο]\n '
coords = [tf.math.cos(phi), tf.math.sin(phi)]
return tf.convert_to_tensor(tf.concat(coords, axis=(- 1)))
def coords_to_phase(self, x: Tensor) -> Tensor:
'Convert Cartesian to phase.\n\n [cos Ο, sin Ο] --> atan(sin Ο / cos Ο)\n '
assert (x.shape[(- 1)] == 2)
return tf.convert_to_tensor(tf.math.atan2(x[(..., (- 1))], x[(..., (- 2))]))
def exp(self, x: Tensor) -> Tensor:
return tf.complex(tf.math.cos(x), tf.math.sin(x))
def update_gauge(self, x: Tensor, p: Tensor) -> Tensor:
return tf.add(x, p)
def mul(self, a: Tensor, b: Tensor, adjoint_a: bool=False, adjoint_b: bool=False) -> Tensor:
if (adjoint_a and adjoint_b):
return tf.subtract(tf.math.negative(a), b)
elif adjoint_a:
return tf.add(tf.math.negative(a), b)
elif adjoint_b:
return tf.subtract(a, b)
else:
return tf.add(a, b)
@staticmethod
def to_tensor(x: Any) -> Tensor:
return tf.convert_to_tensor(x)
def adjoint(self, x: Tensor) -> Tensor:
return self.to_tensor(tf.math.negative(x))
def trace(self, x: Tensor) -> Tensor:
return self.to_tensor(tf.math.cos(x))
def diff_trace(self, x: Tensor) -> Tensor:
return self.to_tensor(tf.math.negative(tf.math.sin(x)))
def diff2trace(self, x: Tensor) -> Tensor:
return self.to_tensor(tf.math.negative(tf.math.cos(x)))
def floormod(self, x: Tensor, y: Tensor) -> Tensor:
return self.to_tensor(tf.subtract(x, tf.multiply(y, tf.math.floordiv(x, y))))
@staticmethod
def group_to_vec(x: Tensor) -> Tensor:
return tf.convert_to_tensor(tf.concat([tf.math.cos(x), tf.math.sin(x)], axis=1))
@staticmethod
def vec_to_group(x: Tensor) -> Tensor:
if (x.dtype in [tf.complex64, tf.complex128]):
return tf.convert_to_tensor(tf.math.atan2(tf.math.imag(x), tf.math.real(x)))
return tf.math.atan2(x[(..., (- 1))], x[(..., (- 2))])
def compat_proj(self, x: Tensor) -> Tensor:
pi = tf.constant(PI, dtype=x.dtype)
return (((x + pi) % TWO_PI) - PI)
def random(self, shape: list[int]) -> Tensor:
return self.compat_proj((TWO_PI * tf.random.uniform(shape, dtype=TF_FLOAT)))
def random_momentum(self, shape: list[int]) -> Tensor:
return tf.random.normal(shape, dtype=TF_FLOAT)
def kinetic_energy(self, p: Tensor) -> Tensor:
return (0.5 * tf.reduce_sum(tf.square(tf.reshape(p, [p.shape[0], (- 1)])), axis=(- 1)))
|
class Lattice(ABC):
def __init__(self, group: Group, nchains: int, shape: list[int]) -> None:
self.g = group
self.link_shape = self.g._shape
self.xshape = [self.g._dim, *shape]
if (len(self.g._shape) > 1):
self.xshape.extend(self.g._shape)
self.dim = self.g._dim
self._shape = [nchains, *self.xshape]
self.volume = np.cumprod(self._shape[1:(- len(self.g._shape))])[(- 1)]
self.nchains = nchains
self._lattice_shape = shape
self.volume = np.cumprod(shape)[(- 1)]
def draw_batch(self) -> Any:
return self.g.random(list(self._shape[:(- 2)]))
def update_link(self, x: Any, p: Any) -> Any:
return self.g.mul(self.g.exp(p), x)
def random(self) -> Any:
return self.g.random(list(self._shape))
def random_momentum(self) -> Any:
return self.g.random_momentum(list(self._shape))
@abstractmethod
def _action(self, wloops: Any, beta: Any) -> Any:
'Compute the action using wloops, at inverse coupling beta.'
pass
def action(self, x: Any, beta: Any) -> Any:
'Compute the action directly from x, at inverse coupling beta.'
wloops = self.wilson_loops(x)
assert (wloops is not None)
return self._action(wloops, beta)
@abstractmethod
def kinetic_energy(self, v: Any) -> Any:
pass
def potential_energy(self, x: Any, beta: Any) -> Any:
return self.action(x, beta)
@abstractmethod
def wilson_loops(self, x: Any) -> Any:
pass
@abstractmethod
def _plaqs(self, wloops: Any) -> Any:
pass
def plaqs(self, x: Optional[Any]=None, wloops: Optional[Any]=None) -> Any:
if (wloops is None):
assert (x is not None)
wloops = (self.wilson_loops(x) if (wloops is None) else wloops)
assert (wloops is not None)
return self._plaqs(wloops)
def charges(self, x: Optional[Any]=None, wloops: Optional[Any]=None) -> Charges:
if (wloops is None):
assert (x is not None)
wloops = self.wilson_loops(x)
return self._charges(wloops=wloops)
@abstractmethod
def _charges(self, wloops: Any) -> Charges:
pass
def sin_charges(self, x: Optional[Any]=None, wloops: Optional[Any]=None) -> Any:
if (wloops is None):
assert (x is not None)
wloops = self.wilson_loops(x)
return self._sin_charges(wloops)
@abstractmethod
def _sin_charges(self, wloops: Any) -> Any:
pass
def int_charges(self, x: Optional[Any]=None, wloops: Optional[Any]=None) -> Any:
if (wloops is None):
assert (x is not None)
wloops = self.wilson_loops(x)
return self._int_charges(wloops)
@abstractmethod
def _int_charges(self, wloops: Any) -> Any:
pass
def unnormalized_log_prob(self, x: Any, beta: Any) -> Any:
return self.action(x=x, beta=beta)
@abstractmethod
def grad_action(self, x: Any, beta: Any) -> Any:
pass
@abstractmethod
def action_with_grad(self, x: Any, beta: Any) -> tuple[(Any, Any)]:
pass
@abstractmethod
def calc_metrics(self, x: Any, beta: Optional[Any]=None) -> dict[(str, Any)]:
wloops = self.wilson_loops(x)
plaqs = self.plaqs(wloops=wloops)
charges = self.charges(wloops=wloops)
metrics = {'plaqs': plaqs, 'intQ': charges.intQ, 'sinQ': charges.sinQ}
if (beta is not None):
(s, ds) = self.action_with_grad(x, beta)
metrics['action'] = s
metrics['grad_action'] = ds
return metrics
@abstractmethod
def plaq_loss(self, acc: Any, x1: Optional[Any]=None, x2: Optional[Any]=None, wloops1: Optional[Any]=None, wloops2: Optional[Any]=None) -> Any:
if (wloops1 is None):
assert (x1 is not None)
wloops1 = self.wilson_loops(x1)
if (wloops2 is None):
assert (x2 is not None)
wloops2 = self.wilson_loops(x2)
pass
@abstractmethod
def charge_loss(self, acc: Any, x1: Optional[Any]=None, x2: Optional[Any]=None, wloops1: Optional[Any]=None, wloops2: Optional[Any]=None) -> Any:
if (wloops1 is None):
assert (x1 is not None)
wloops1 = self.wilson_loops(x1)
if (wloops2 is None):
assert (x2 is not None)
wloops2 = self.wilson_loops(x2)
|
@dataclass
class Charges():
intQ: Array
sinQ: Array
def asdict(self):
return {'intQ': self.intQ, 'sinQ': self.sinQ}
|
@dataclass
class LatticeMetrics():
plaqs: Array
charges: Charges
p4x4: Array
def asdict(self):
return {'plaqs': self.plaqs, 'p4x4': self.p4x4, 'sinQ': self.charges.sinQ, 'intQ': self.charges.intQ}
|
def area_law(beta: float, nplaqs: int):
return ((i1(beta) / i0(beta)) ** nplaqs)
|
def plaq_exact(beta: float):
return area_law(beta, nplaqs=1)
|
def project_angle(x: Array) -> Array:
return (x - (TWO_PI * np.floor(((x + PI) / TWO_PI))))
|
class BaseLatticeU1():
def __init__(self, nchains: int, shape: tuple[(int, int)]):
self.nchains = nchains
self._dim = 2
assert (len(shape) == 2)
(self.nt, self.nx) = shape
self.xshape = (self._dim, *shape)
self._shape = (nchains, *self.xshape)
self.nplaqs = (self.nt * self.nx)
self.nlinks = (self.nplaqs * self._dim)
def draw_uniform_batch(self):
unif = np.random.uniform(self.xshape)
return ((TWO_PI * unif) - PI)
def unnormalized_log_prob(self, x: Array) -> Array:
return self.action(x=x)
def action(self, x: Optional[Array]=None, wloops: Optional[Array]=None) -> Array:
'Calculate the Wilson gauge action for a batch of lattices.'
wloops = (self._get_wloops(x) if (wloops is None) else wloops)
return (1.0 - np.cos(wloops)).sum((1, 2))
def calc_metrics(self, x: Array) -> dict[(str, Array)]:
wloops = self.wilson_loops(x)
plaqs = self.plaqs(wloops=wloops)
charges = self.charges(wloops=wloops)
return {'plaqs': plaqs, 'intQ': charges.intQ, 'sinQ': charges.sinQ}
def observables(self, x: Array) -> LatticeMetrics:
wloops = self.wilson_loops(x)
return LatticeMetrics(p4x4=self.plaqs4x4(x=x), plaqs=self.plaqs(wloops=wloops), charges=self.charges(wloops=wloops))
def wilson_loops(self, x: Array) -> Array:
'Calculate the Wilson loops by summing links in CCW direction.'
(x0, x1) = x.reshape((- 1), *self.xshape).transpose(1, 2, 3, 0)
return (((x0 + np.roll(x1, (- 1), axis=0)) - np.roll(x0, (- 1), axis=1)) - x1).T
def wilson_loops4x4(self, x: Array) -> Array:
'Calculate the 4x4 Wilson loops'
(x0, x1) = x.reshape((- 1), *self.xshape).transpose(1, 0, 2, 3)
return (((((((((((((((x0 + x0.roll((- 1), dims=2)) + x0.roll((- 2), dims=2)) + x0.roll((- 3), dims=2)) + x0.roll((- 4), dims=2)) + x1.roll(((- 4), (- 1)), dims=(2, 1))) + x1.roll(((- 4), (- 2)), dims=(2, 1))) + x1.roll(((- 4), (- 3)), dims=(2, 1))) - x0.roll(((- 3), (- 4)), dims=(2, 1))) - x0.roll(((- 2), (- 4)), dims=(2, 1))) - x0.roll(((- 1), (- 4)), dims=(2, 1))) - x1.roll((- 4), dims=1)) - x1.roll((- 3), dims=1)) - x1.roll((- 2), dims=1)) - x1.roll((- 1), dims=1)) - x1).T
def plaqs(self, x: Optional[Array]=None, wloops: Optional[Array]=None) -> Array:
'Calculate the average plaquettes for a batch of lattices.'
if (wloops is None):
if (x is None):
raise ValueError('One of `x` or `wloops` must be specified.')
wloops = self.wilson_loops(x)
return np.cos(wloops).mean((1, 2))
def _plaqs4x4(self, wloops4x4: Array) -> Array:
return np.cos(wloops4x4).mean((1, 2))
def plaqs4x4(self, x: Optional[Array]=None, wloops4x4: Optional[Array]=None) -> Array:
'Calculate the 4x4 Wilson loops for a batch of lattices.'
if (wloops4x4 is None):
if (x is None):
raise ValueError('One of `x` or `wloops` must be specified.')
wloops4x4 = self.wilson_loops4x4(x)
return self._plaqs4x4(wloops4x4)
def _sin_charges(self, wloops: Array) -> Array:
'Calculate sinQ from Wilson loops.'
return (np.sin(wloops).sum((1, 2)) / TWO_PI)
def _int_charges(self, wloops: Array) -> Array:
'Calculate intQ from Wilson loops.'
return (project_angle(wloops).sum((1, 2)) / TWO_PI)
def _get_wloops(self, x: Optional[Array]=None) -> Array:
if (x is None):
raise ValueError('One of `x` or `wloops` must be specified.')
return self.wilson_loops(x)
def sin_charges(self, x: Optional[Array]=None, wloops: Optional[Array]=None) -> Array:
'Calculate the real-valued charge approximation, sin(Q).'
wloops = (self._get_wloops(x) if (wloops is None) else wloops)
return self._sin_charges(wloops)
def int_charges(self, x: Optional[Array]=None, wloops: Optional[Array]=None) -> Array:
'Calculate the integer valued topological charge, int(Q).'
wloops = (self._get_wloops(x) if (wloops is None) else wloops)
return self._int_charges(wloops)
def charges(self, x: Optional[Array]=None, wloops: Optional[Array]=None) -> Charges:
'Calculate both charge representations and return as single object'
wloops = (self._get_wloops(x) if (wloops is None) else wloops)
sinQ = self._sin_charges(wloops)
intQ = self._int_charges(wloops)
return Charges(intQ=intQ, sinQ=sinQ)
|
def rate(step, model_size, factor, warmup):
if (step == 0):
step = 1
return (factor * ((model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (warmup ** (- 1.5))))))
|
def lr_schedule(model_size, factor, warmup, optimizer) -> LambdaLR:
return LambdaLR(optimizer=optimizer, lr_lambda=(lambda step: rate(step=step, model_size=model_size, factor=factor, warmup=warmup)))
|
class NoamOpt():
'Optim wrapper that implements rate.'
def __init__(self, model_size, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.model_size = model_size
self._rate = 0
def state_dict(self):
'Returns the state of the warmup scheduler as a :class:`dict`.\n It contains an entry for every variable in self.__dict__ which\n is not the optimizer.\n '
return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')}
def load_state_dict(self, state_dict):
"Loads the warmup scheduler's state.\n Arguments:\n state_dict (dict): warmup scheduler state. Should be an object\n returned from a call to :meth:`state_dict`.\n "
self.__dict__.update(state_dict)
def step(self):
'Update parameters and rate'
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
'Implement `lrate` above'
if (step is None):
step = self._step
return ((self.model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5)))))
|
def moving_average(x: np.ndarray, n: int=1000):
out = np.cumsum(x, dtype=np.float32)
out[n:] = (out[n:] - out[:(- n)])
return (out[(n - 1):] / n)
|
class ReduceLROnPlateau(Callback):
"Reduce learning rate when a metric has stopped improving.\n Models often benefit from reducing the learning rate by a factor\n of 2-10 once learning stagnates. This callback monitors a\n quantity and if no improvement is seen for a 'patience' number\n of epochs, the learning rate is reduced.\n Example:\n ```python\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=5, min_lr=0.001)\n model.fit(X_train, Y_train, callbacks=[reduce_lr])\n ```\n Arguments:\n monitor: quantity to be monitored.\n factor: factor by which the learning rate will be reduced.\n `new_lr = lr * factor`.\n patience: number of epochs with no improvement after which learning\n rate will be reduced.\n verbose: int. 0: quiet, 1: update messages.\n mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode, the learning\n rate will be reduced when the quantity monitored has stopped\n decreasing; in `'max'` mode it will be reduced when the quantity\n monitored has stopped increasing; in `'auto'` mode, the direction\n is automatically inferred from the name of the monitored quantity.\n min_delta: threshold for measuring the new optimum, to only focus on\n significant changes.\n cooldown: number of epochs to wait before resuming normal operation\n after lr has been reduced.\n min_lr: lower bound on the learning rate.\n "
def __init__(self, lr_config: LearningRateConfig, **kwargs):
super(ReduceLROnPlateau, self).__init__()
self.cfg = lr_config
self.monitor = self.cfg.monitor
self.factor = self.cfg.factor
self.patience = self.cfg.patience
self.mode = self.cfg.mode
self.warmup_steps = self.cfg.warmup
self.min_delta = self.cfg.min_delta
self.cooldown = self.cfg.cooldown
self.min_lr = self.cfg.min_lr
self.verbose = self.cfg.verbose
if (self.factor >= 1.0):
raise ValueError('ReduceLROnPlateau does not support a factor >= 1.0.')
if ('epsilon' in kwargs):
self.min_delta = kwargs.pop('epsilon')
log.warning('`epsilon` argument is deprecated and will be removed, use `min_delta` instead.')
if (self.mode not in ['auto', 'min', 'max']):
log.warning(f'Learning Rate Plateau Reducing mode {self.mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
self.wait = 0
self.best = 0
self.cooldown_counter = 0
self._reset()
def monitor_op(self, current: float, best: float) -> bool:
m = self.mode
if ((m == 'min') or ((m == 'auto') and ('acc' not in self.monitor))):
return np.less(current, (best - self.min_delta))
else:
return np.greater(current, (best + self.min_delta))
def _reset(self):
'Resets wait counter and cooldown counter.'
if (self.mode not in ['auto', 'min', 'max']):
log.warning(f'Learning Rate Plateau Reducing mode {self.mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
m = self.mode
if ((m == 'min') or ((m == 'auto') and ('acc' not in self.monitor))):
self.best = np.Inf
else:
self.best = (- np.Inf)
self.wait = 0
self.cooldown_counter = 0
def on_train_begin(self, logs=None):
self._reset()
def set_optimizer(self, optimizer: Optimizer):
self.optimizer = optimizer
def on_epoch_end(self, step, logs=None):
if (step < self.warmup_steps):
return
logs = (logs or {})
current = logs.get(self.monitor)
if (current is None):
log.info(f"ReduceLROnPlateau conditioned on metric {self.monitor} which is not available. Available metrics are: {','.join(list(logs.keys()))}")
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif (not self.in_cooldown()):
self.wait += 1
if (self.wait >= self.patience):
step = self.optimizer.iterations
old_lr = K.get_value(self.optimizer.lr)
if (old_lr > self.min_lr):
new_lr = (old_lr * self.factor)
new_lr = max(new_lr, self.min_lr)
K.set_value(self.optimizer.lr, new_lr)
if (self.verbose > 0):
log.warning(f'ReduceLROnPlateau (step {step}): Reducing learning rate from: {old_lr} to {new_lr}.')
print(f'current: {current}, best: {self.best}')
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return (self.cooldown_counter > 0)
|
def mixed_loss(loss: float, weight: float) -> float:
return ((weight / loss) - (loss / weight))
|
class BaseLoss():
def __init__(self, config: LossConfig, metrics_fn: Callable, loss_fns: dict[(str, Callable)], loss_weights: Optional[dict[(str, float)]]=None) -> None:
self.config = config
self.loss_fns = loss_fns
self.metrics_fn = metrics_fn
assert callable(self.metrics_fn)
if (loss_weights is None):
loss_weights = {key: 1.0 for key in self.loss_fns.keys()}
self.loss_weights = loss_weights
def __call__(self, xin, xprop, acc) -> float:
return self.calc_loss(xin, xprop, acc)
def metrics(self, xin: Any, xout: Optional[Any]=None) -> dict:
'Calculate metrics in addition to the Loss.'
metrics = {}
metrics.update(self.metrics_fn(xin))
if (xout is not None):
metrics_out = self.metrics_fn(xout)
for (key, val) in metrics.items():
metrics[f'd{key}'] = (metrics_out[key] - val)
return metrics
def calc_losses(self, xin: Any, xprop: Any, acc: Any) -> tuple[(float, dict[(str, float)])]:
'Aggregate and calculate all losses.'
losses = {}
total = 0.0
for (key, loss_fn) in self.loss_fns.items():
loss = loss_fn(xin, xprop, acc)
weight = self.loss_weights[key]
if self.config.use_mixed_loss:
total += mixed_loss(loss, weight)
else:
total += (loss / weight)
losses[key] = loss
return (total, losses)
def calc_loss(self, xin: Any, xprop: Any, acc: Any) -> float:
'Aggregate and calculate all losses.'
(total, _) = self.calc_losses(xin, xprop, acc)
return total
|
class LatticeLoss():
def __init__(self, lattice: (LatticeU1 | LatticeSU3), loss_config: LossConfig):
self.lattice = lattice
self.config = loss_config
self.xshape = self.lattice.xshape
self.plaq_weight = torch.tensor(self.config.plaq_weight, dtype=torch.float)
self.charge_weight = torch.tensor(self.config.charge_weight, dtype=torch.float)
self.rmse_weight = torch.tensor(self.config.rmse_weight, dtype=torch.float)
if isinstance(self.lattice, LatticeU1):
self.g = U1Phase()
elif isinstance(self.lattice, LatticeSU3):
self.g = SU3()
else:
raise ValueError(f'Unexpected value for `self.g`: {self.g}')
def __call__(self, x_init: Tensor, x_prop: Tensor, acc: Tensor) -> Tensor:
return self.calc_loss(x_init=x_init, x_prop=x_prop, acc=acc)
@staticmethod
def mixed_loss(loss: Tensor, weight: Tensor) -> Tensor:
return ((weight / loss) - (loss / weight))
def _plaq_loss(self, w1: Tensor, w2: Tensor, acc: Tensor, use_mixed_loss: Optional[bool]=None) -> Tensor:
p1 = w1.real.sum(list(range(2, len(w1.shape))))
p2 = w2.real.sum(list(range(2, len(w2.shape))))
ploss = (acc * ((p2 - p1) ** 2))
if use_mixed_loss:
ploss += 0.0001
return self.mixed_loss(ploss, self.plaq_weight).mean()
return ((- ploss) / self.plaq_weight).mean()
def _charge_loss(self, w1: Tensor, w2: Tensor, acc: Tensor, use_mixed_loss: Optional[bool]=None) -> Tensor:
q1 = self.lattice._sin_charges(wloops=w1)
q2 = self.lattice._sin_charges(wloops=w2)
dqsq = ((q2 - q1) ** 2)
qloss = (acc * dqsq)
use_mixed = (self.config.use_mixed_loss if (use_mixed_loss is None) else use_mixed_loss)
if use_mixed:
qloss += 0.0001
return self.mixed_loss(qloss, self.charge_weight).mean()
return ((- qloss) / self.charge_weight).mean()
def lattice_metrics(self, xinit: Tensor, xout: Optional[Tensor]=None) -> dict[(str, Tensor)]:
metrics = self.lattice.calc_metrics(x=xinit)
if (xout is not None):
wloops = self.lattice.wilson_loops(x=xout)
qint = self.lattice._int_charges(wloops=wloops)
qsin = self.lattice._sin_charges(wloops=wloops)
metrics.update({'dQint': (qint - metrics['intQ']).abs(), 'dQsin': (qsin - metrics['sinQ']).abs()})
return metrics
def plaq_loss(self, x_init: Tensor, x_prop: Tensor, acc: Tensor, use_mixed_loss: Optional[bool]=None) -> Tensor:
wl_init = self.lattice.wilson_loops(x=x_init)
wl_prop = self.lattice.wilson_loops(x=x_prop)
return self._plaq_loss(w1=wl_init, w2=wl_prop, acc=acc, use_mixed_loss=use_mixed_loss)
def rmse_loss(self, x_init: Tensor, x_prop: Tensor, acc: Tensor, use_mixed_loss: Optional[bool]=None) -> Tensor:
dx = (x_prop - x_init)
dx2 = ((dx.real ** 2) + (dx.imag ** 2)).flatten(1)
rmse_loss = (acc * dx2.mean(1))
use_mixed = (self.config.use_mixed_loss if (use_mixed_loss is None) else use_mixed_loss)
if use_mixed:
rmse_loss += 0.0001
return self.mixed_loss(rmse_loss, self.rmse_weight).mean()
return ((- rmse_loss) / self.rmse_weight).mean()
def charge_loss(self, x_init: Tensor, x_prop: Tensor, acc: Tensor, use_mixed_loss: Optional[bool]=None) -> Tensor:
wl_init = self.lattice.wilson_loops(x=x_init)
wl_prop = self.lattice.wilson_loops(x=x_prop)
return self._charge_loss(w1=wl_init, w2=wl_prop, acc=acc, use_mixed_loss=use_mixed_loss)
def general_loss(self, x_init: Tensor, x_prop: Tensor, acc: Tensor, plaq_weight: Optional[float]=None, charge_weight: Optional[float]=None, use_mixed_loss: Optional[bool]=None) -> (Tensor | float):
wl_init = self.lattice.wilson_loops(x=x_init)
wl_prop = self.lattice.wilson_loops(x=x_prop)
pw = (self.plaq_weight if (plaq_weight is None) else plaq_weight)
qw = (self.charge_weight if (charge_weight is None) else charge_weight)
ploss = 0.0
qloss = 0.0
loss = 0.0
if (pw > 0):
ploss = self._plaq_loss(w1=wl_init, w2=wl_prop, acc=acc, use_mixed_loss=use_mixed_loss)
loss += (pw * ploss)
if (qw > 0):
qloss = self._charge_loss(w1=wl_init, w2=wl_prop, acc=acc, use_mixed_loss=use_mixed_loss)
loss += (qw * qloss)
return loss
def calc_loss(self, x_init: Tensor, x_prop: Tensor, acc: Tensor) -> Tensor:
wl_init = self.lattice.wilson_loops(x=x_init)
wl_prop = self.lattice.wilson_loops(x=x_prop)
rmse_loss = torch.tensor(0.0).to(x_init.real.dtype).to(x_init.device)
if (self.rmse_weight > 0):
rmse_loss = self.rmse_loss(x_init=x_init, x_prop=x_prop, acc=acc)
plaq_loss = torch.tensor(0.0).to(x_init.real.dtype).to(x_init.device)
if (self.plaq_weight > 0):
plaq_loss = self._plaq_loss(w1=wl_init, w2=wl_prop, acc=acc)
charge_loss = torch.tensor(0.0).to(x_init.real.dtype).to(x_init.device)
if (self.charge_weight > 0):
charge_loss = self._charge_loss(w1=wl_init, w2=wl_prop, acc=acc)
return ((plaq_loss + charge_loss) + rmse_loss)
|
class LatticeLoss():
def __init__(self, lattice: (LatticeU1 | LatticeSU3), loss_config: LossConfig):
self.lattice = lattice
self.config = loss_config
self.plaq_weight = tf.constant(self.config.plaq_weight, dtype=TF_FLOAT)
self.charge_weight = tf.constant(self.config.charge_weight, dtype=TF_FLOAT)
if isinstance(self.lattice, LatticeU1):
self._group = 'U1'
self.g = U1Phase()
elif isinstance(self.lattice, LatticeSU3):
self._group = 'SU3'
self.g = SU3()
else:
raise ValueError(f'Unexpected value for `self.g`: {self.g}')
def __call__(self, x_init: Tensor, x_prop: Tensor, acc: Tensor) -> Tensor:
return self.calc_loss(x_init, x_prop, acc)
@staticmethod
def mixed_loss(loss: Tensor, weight: float) -> Tensor:
w = tf.constant(weight, dtype=loss.dtype)
return ((w / loss) - (loss / w))
def plaq_loss(self, x1: Tensor, x2: Tensor, acc: Tensor) -> Tensor:
w1 = self.lattice.wilson_loops(x=x1)
w2 = self.lattice.wilson_loops(x=x2)
if (self._group == 'U1'):
return self._plaq_loss_u1(w1, w2, acc)
if (self._group == 'SU3'):
return self._plaq_loss_su3(w1, w2, acc)
raise AttributeError(f'Unexpected value for self._group: {self._group}')
def _plaq_loss(self, w1: Tensor, w2: Tensor, acc: Tensor) -> Tensor:
if (self._group == 'U1'):
return self._plaq_loss_u1(w1, w2, acc)
if (self._group == 'SU3'):
return self._plaq_loss_su3(w1, w2, acc)
raise AttributeError(f'Unexpected value for self._group: {self._group}')
def _plaq_loss_su3(self, w1: Tensor, w2: Tensor, acc: Tensor) -> Tensor:
p1 = tf.reduce_sum(tf.math.real(w1), axis=range(2, len(w1.shape)))
p2 = tf.reduce_sum(tf.math.real(w2), axis=range(2, len(w1.shape)))
dp = tf.math.square(tf.subtract(p2, p1))
ploss = tf.multiply(acc, dp)
if self.config.use_mixed_loss:
ploss += 0.0001
return tf.reduce_mean(self.mixed_loss(ploss, self.plaq_weight))
return (- tf.reduce_mean((ploss / self.plaq_weight)))
def _plaq_loss_u1(self, w1: Tensor, w2: Tensor, acc: Tensor) -> Tensor:
dw = tf.subtract(w2, w1)
dwloops = (2.0 * (tf.ones_like(w1) - tf.math.cos(dw)))
dwsum = tf.reduce_sum(dwloops, axis=tuple(range(1, len(dwloops.shape))))
ploss = tf.multiply(acc, dwsum)
if self.config.use_mixed_loss:
ploss += 0.0001
tf.reduce_mean(self.mixed_loss(ploss, self.plaq_weight))
return (- tf.reduce_mean((ploss / self.plaq_weight)))
def charge_loss(self, x1: Tensor, x2: Tensor, acc: Tensor) -> Tensor:
w1 = self.lattice.wilson_loops(x=x1)
w2 = self.lattice.wilson_loops(x=x2)
return self._charge_loss(w1=w1, w2=w2, acc=acc)
def _charge_loss(self, w1: Tensor, w2: Tensor, acc: Tensor) -> Tensor:
dq2 = tf.math.square(tf.subtract(self.lattice._sin_charges(wloops=w2), self.lattice._sin_charges(wloops=w1)))
qloss = tf.multiply(acc, dq2)
if self.config.use_mixed_loss:
qloss += 0.0001
return tf.reduce_mean(self.mixed_loss(qloss, self.charge_weight))
return (- tf.reduce_mean((qloss / self.charge_weight)))
def lattice_metrics(self, xinit: Tensor, xout: Optional[Tensor]=None) -> dict[(str, Tensor)]:
metrics = self.lattice.calc_metrics(x=xinit)
if (xout is not None):
wloops = self.lattice.wilson_loops(x=xout)
qint = self.lattice._int_charges(wloops=wloops)
qsin = self.lattice._sin_charges(wloops=wloops)
metrics.update({'dQint': tf.math.abs(tf.subtract(qint, metrics['intQ'])), 'dQsin': tf.math.abs(tf.subtract(qsin, metrics['sinQ']))})
return metrics
def calc_loss(self, x_init: Tensor, x_prop: Tensor, acc: Tensor) -> Tensor:
wl_init = self.lattice.wilson_loops(x=x_init)
wl_prop = self.lattice.wilson_loops(x=x_prop)
plaq_loss = tf.constant(0.0, dtype=TF_FLOAT)
if (self.plaq_weight > 0):
plaq_loss = self._plaq_loss(w1=wl_init, w2=wl_prop, acc=acc)
charge_loss = tf.constant(0.0, dtype=TF_FLOAT)
if (self.charge_weight > 0):
charge_loss = self._charge_loss(w1=wl_init, w2=wl_prop, acc=acc)
return tf.add(charge_loss, plaq_loss)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.